diff --git a/Makefile b/Makefile index 59a2f23a..5386c846 100644 --- a/Makefile +++ b/Makefile @@ -96,13 +96,16 @@ generate-swagger: # generate mock code generate-mocks: + @./mock_gen.sh + +go-generate: go generate ./... -run: +run: go-generate go run ./cmd/gmqttd start -c ./cmd/gmqttd/default_config.yml # generate all grpc files and mocks and build the go code -build: +build: go-generate go build -o $(BUILD_DIR)/gmqttd ./cmd/gmqttd # generate mocks and run short tests @@ -127,7 +130,7 @@ test-cover: test-all: test test-bench test-cover # Build Golang application binary with settings to enable it to run in a Docker scratch container. -binary: generate-grpc +binary: go-generate CGO_ENABLED=0 GOOS=linux go build -ldflags '-s' -o $(BUILD_DIR)/gmqttd ./cmd/gmqttd build-docker: diff --git a/README.md b/README.md index e77541aa..f84610cd 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,7 @@ [中文文档](https://github.com/DrmagicE/gmqtt/blob/master/README_ZH.md) # Gmqtt [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) [![Build Status](https://travis-ci.org/DrmagicE/gmqtt.svg?branch=master)](https://travis-ci.org/DrmagicE/gmqtt) [![codecov](https://codecov.io/gh/DrmagicE/gmqtt/branch/master/graph/badge.svg)](https://codecov.io/gh/DrmagicE/gmqtt) [![Go Report Card](https://goreportcard.com/badge/github.com/DrmagicE/gmqtt)](https://goreportcard.com/report/github.com/DrmagicE/gmqtt) -News: MQTT V5 is now supported. But due to those new features in v5, there area lots of breaking changes. -If you have any migration problems, feel free to raise an issue. -Or you can use the latest v3 [broker](https://github.com/DrmagicE/gmqtt/tree/v0.1.4). +News: Cluster mode is now supported, see [federation plugin](./plugin/federation/README.md) for examples and details. # Installation ```$ go get -u github.com/DrmagicE/gmqtt``` @@ -18,11 +16,7 @@ See `Server` interface in `server/server.go` and [admin](https://github.com/Drma * Provide GRPC and REST APIs to interact with server. (plugin:[admin](https://github.com/DrmagicE/gmqtt/blob/master/plugin/admin/README.md)) * Provide session persistence which means the broker can retrieve the session data after restart. Currently, only redis backend is supported. - - - -# Limitations -* Cluster is not supported. +* Provide clustering, see [federation plugin](./plugin/federation/README.md) for examples and details. # Get Started @@ -113,7 +107,8 @@ Gmqtt implements the following hooks: | OnDelivered | When a message is delivered to the client | | | OnClosed | When the client is closed | | | OnMsgDropped | When a message is dropped for some reasons| | - +| OnWillPublish | When the client is going to deliver a will message | Modify or drop the will message | +| OnWillPublished| When a will message has been delivered| | See `/examples/hook` for details. @@ -132,8 +127,3 @@ $ go test -race ./... ## Integration Test [paho.mqtt.testing](https://github.com/eclipse/paho.mqtt.testing). - -# TODO -* Support bridge mode and cluster. - -*Breaking changes may occur when adding this new features.* diff --git a/README_ZH.md b/README_ZH.md index 78ff5b6f..212ad058 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -1,6 +1,6 @@ # Gmqtt [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) [![Build Status](https://travis-ci.org/DrmagicE/Gmqtt.svg?branch=master)](https://travis-ci.org/DrmagicE/Gmqtt) [![codecov](https://codecov.io/gh/DrmagicE/Gmqtt/branch/master/graph/badge.svg)](https://codecov.io/gh/DrmagicE/Gmqtt) [![Go Report Card](https://goreportcard.com/badge/github.com/DrmagicE/Gmqtt)](https://goreportcard.com/report/github.com/DrmagicE/Gmqtt) -News: 现已支持V5版本,但由于V5的功能特性,Gmqtt做了很多不兼容的改动,对此有疑问欢迎提issue交流,或者依然使用最新的[V3版本](https://github.com/DrmagicE/gmqtt/tree/v0.1.4). +News: 集群模式已支持,示例和详情请参考[federation plugin](./plugin/federation/README.md)。 Gmqtt是用Go语言实现的一个具备灵活灵活扩展能力,高性能的MQTT broker,其完整实现了MQTT V3.1.1和V5协议。 @@ -15,9 +15,7 @@ Gmqtt是用Go语言实现的一个具备灵活灵活扩展能力,高性能的M * 提供监控指标,支持prometheus。 (plugin: [prometheus](https://github.com/DrmagicE/Gmqtt/blob/master/plugin/prometheus/READEME.md)) * GRPC和REST API 支持. (plugin:[admin](https://github.com/DrmagicE/Gmqtt/blob/master/plugin/admin/READEME.md)) * 支持session持久化,broker重启消息不丢失,目前支持redis持久化。 - -# 缺陷 -* 不支持集群。 +* 支持集群, 示例和详情请参考[federation plugin](./plugin/federation/README.md)。 # 开始 @@ -103,6 +101,8 @@ Gmqtt实现了下列钩子方法。 | OnDelivered | 消息从broker投递到客户端后调用 | | | OnClosed | 客户端断开连接后调用 | 统计在线客户端数量 | | OnMsgDropped | 消息被丢弃时调用 | | +| OnWillPublish | 发布遗嘱消息前 | 修改或丢弃遗嘱消息| +| OnWillPublished| 发布遗嘱消息后| | 在 `/examples/hook` 中有常用钩子的使用方法介绍。 @@ -118,8 +118,3 @@ $ go test -race ./... ## 集成测试 [paho.mqtt.testing](https://github.com/eclipse/paho.mqtt.testing). - -# TODO -* 桥接模式,集群模式 - -*暂时不保证向后兼容,在添加上述新功能时可能会有breaking changes。* diff --git a/cmd/gmqttd/command/start.go b/cmd/gmqttd/command/start.go index 5cef65f4..8c58789c 100644 --- a/cmd/gmqttd/command/start.go +++ b/cmd/gmqttd/command/start.go @@ -107,24 +107,27 @@ func NewStartCmd() *cobra.Command { } else { must(err) } - err = c.Validate() - must(err) - pid, err := pidfile.New(c.PidFile) - if err != nil { - must(fmt.Errorf("open pid file failed: %s", err)) + if c.PidFile != "" { + pid, err := pidfile.New(c.PidFile) + if err != nil { + must(fmt.Errorf("open pid file failed: %s", err)) + } + defer pid.Remove() } - defer pid.Remove() + tcpListeners, websockets, err := GetListeners(c) must(err) l, err := c.GetLogger(c.Log) must(err) logger = l + s := server.New( server.WithConfig(c), server.WithTCPListener(tcpListeners...), server.WithWebsocketServer(websockets...), server.WithLogger(l), ) + err = s.Init() if err != nil { fmt.Println(err) diff --git a/cmd/gmqttd/default_config.yml b/cmd/gmqttd/default_config.yml index 7f097be6..4f2433d7 100644 --- a/cmd/gmqttd/default_config.yml +++ b/cmd/gmqttd/default_config.yml @@ -1,5 +1,6 @@ -# Path to pid file, default to /var/run/gmqttd.pid -# pid_file: +# Path to pid file. +# If not set, there will be no pid file. +# pid_file: /var/run/gmqttd.pid listeners: # bind address @@ -16,7 +17,9 @@ listeners: api: grpc: - - address: "unix:///var/run/gmqttd.sock" # The gRPC server listen address. + # The gRPC server listen address. Supports unix socket and tcp socket. + - address: "tcp://127.0.0.1:8084" + #- address: "unix:///var/run/gmqttd.sock" # tls: # cacert: "path_to_ca_cert_file" # cert: "path_to_cert_file" @@ -24,7 +27,7 @@ api: http: # The HTTP server listen address. This is a reverse-proxy server in front of gRPC server. - address: "tcp://127.0.0.1:8083" - map: "unix:///var/run/gmqttd.sock" # The backend gRPC server endpoint, + map: "tcp://127.0.0.1:8084" # The backend gRPC server endpoint, # tls: # cacert: "path_to_ca_cert_file" # cert: "path_to_cert_file" @@ -82,8 +85,38 @@ plugins: hash: md5 # The file to store password. If it is a relative path, it locates in the same directory as the config file. # (e.g: ./gmqtt_password => /etc/gmqtt/gmqtt_password.yml) - # Default to ./gmqtt_password.yml + # Defaults to ./gmqtt_password.yml # password_file: + federation: + # node_name is the unique identifier for the node in the federation. Defaults to hostname. + # node_name: + # fed_addr is the gRPC server listening address for the federation internal communication. Defaults to :8901 + fed_addr: :8901 + # advertise_fed_addr is used to change the federation gRPC server address that we advertise to other nodes in the cluster. + # Defaults to "fed_addr".However, in some cases, there may be a routable address that cannot be bound. + # If the port is missing, the default federation port (8901) will be used. + advertise_fed_addr: :8901 + # gossip_addr is the address that the gossip will listen on, It is used for both UDP and TCP gossip. Defaults to :8902 + gossip_addr: :8902 + # advertise_gossip_addr is used to change the gossip server address that we advertise to other nodes in the cluster. + # Defaults to "GossipAddr" or the private IP address of the node if the IP in "GossipAddr" is 0.0.0.0. + # If the port is missing, the default gossip port (8902) will be used. + advertise_gossip_addr: :8902 + + # retry_join is the address of other nodes to join upon starting up. + # If port is missing, the default gossip port (8902) will be used. + #retry_join: + # - 127.0.0.1:8902 + + # rejoin_after_leave will be pass to "RejoinAfterLeave" in serf configuration. + # It controls our interaction with the snapshot file. + # When set to false (default), a leave causes a Serf to not rejoin the cluster until an explicit join is received. + # If this is set to true, we ignore the leave, and rejoin the cluster on start. + rejoin_after_leave: false + # snapshot_path will be pass to "SnapshotPath" in serf configuration. + # When Serf is started with a snapshot,it will attempt to join all the previously known nodes until one + # succeeds and will also avoid replaying old user events. + snapshot_path: # plugin loading orders plugin_order: @@ -91,6 +124,7 @@ plugin_order: # - auth - prometheus - admin + - federation log: level: info # debug | info | warn | error format: text # json | text diff --git a/cmd/gmqttd/main.go b/cmd/gmqttd/main.go index 6c6dd387..f9c0a514 100644 --- a/cmd/gmqttd/main.go +++ b/cmd/gmqttd/main.go @@ -3,9 +3,10 @@ package main import ( "fmt" "net/http" - _ "net/http/pprof" "os" "path" + //"runtime/pprof" + _ "runtime/pprof" "github.com/spf13/cobra" @@ -40,12 +41,12 @@ func init() { } func main() { - // f, err := os.Create("cpu.profile") - // if err != nil { - // panic(err) - // } - // pprof.StartCPUProfile(f) - // defer pprof.StopCPUProfile() + //f, err := os.Create("cpu.profile") + //if err != nil { + // panic(err) + //} + //pprof.StartCPUProfile(f) + //defer pprof.StopCPUProfile() go func() { http.ListenAndServe(":6060", nil) }() diff --git a/cmd/gmqttd/plugins.go b/cmd/gmqttd/plugins.go index 6ba33d5d..bdf3dd2e 100644 --- a/cmd/gmqttd/plugins.go +++ b/cmd/gmqttd/plugins.go @@ -1,7 +1,11 @@ +//go:generate sh -c "cd ../../ && go run plugin_generate.go" +// generated by plugin_generate.go; DO NOT EDIT + package main import ( _ "github.com/DrmagicE/gmqtt/plugin/admin" _ "github.com/DrmagicE/gmqtt/plugin/auth" + _ "github.com/DrmagicE/gmqtt/plugin/federation" _ "github.com/DrmagicE/gmqtt/plugin/prometheus" ) diff --git a/config/config.go b/config/config.go index c0999bad..59b43714 100644 --- a/config/config.go +++ b/config/config.go @@ -1,7 +1,6 @@ package config import ( - "errors" "fmt" "io/ioutil" "os" @@ -38,10 +37,6 @@ func RegisterDefaultPluginConfig(name string, config Configuration) { // DefaultConfig return the default configuration. // If config file is not provided, gmqttd will start with DefaultConfig. func DefaultConfig() Config { - pidFile, err := getDefaultPidFile() - if err != nil { - panic(err) - } c := Config{ Listeners: DefaultListeners, MQTT: DefaultMQTTConfig, @@ -50,7 +45,6 @@ func DefaultConfig() Config { Level: "info", Format: "text", }, - PidFile: pidFile, Plugins: make(pluginConfig), Persistence: DefaultPersistenceConfig, TopicAliasManager: DefaultTopicAliasManager, @@ -115,6 +109,7 @@ type Config struct { Listeners []*ListenerConfig `yaml:"listeners"` API API `yaml:"api"` MQTT MQTT `yaml:"mqtt,omitempty"` + GRPC GRPC `yaml:"gRPC"` Log LogConfig `yaml:"log"` PidFile string `yaml:"pid_file"` ConfigDir string `yaml:"config_dir"` @@ -127,6 +122,10 @@ type Config struct { TopicAliasManager TopicAliasManager `yaml:"topic_alias_manager"` } +type GRPC struct { + Endpoint string `yaml:"endpoint"` +} + type TLSOptions struct { // CACert is the trust CA certificate file. CACert string `yaml:"cacert"` @@ -175,9 +174,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } func (c Config) Validate() (err error) { - if c.PidFile == "" { - return errors.New("empty pid_file") - } err = c.Log.Validate() if err != nil { return err diff --git a/config/config_unix.go b/config/config_unix.go deleted file mode 100644 index 6d292c6f..00000000 --- a/config/config_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package config - -func getDefaultPidFile() (string, error) { - return "/var/run/gmqttd.pid", nil -} diff --git a/config/config_windows.go b/config/config_windows.go deleted file mode 100644 index dea52fdd..00000000 --- a/config/config_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows - -package config - -import ( - "os" - "path/filepath" -) - -func getDefaultPidFile() (string, error) { - return filepath.Join(os.Getenv("programdata"), "gmqtt", "gmqttd.pid"), nil -} diff --git a/go.mod b/go.mod index 36e5cce1..086e6462 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,18 @@ module github.com/DrmagicE/gmqtt go 1.14 require ( - github.com/golang/mock v1.2.0 + github.com/golang/mock v1.4.4 github.com/golang/protobuf v1.4.2 github.com/gomodule/redigo v1.8.2 + github.com/google/uuid v1.1.2 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/hashicorp/go-sockaddr v1.0.0 + github.com/hashicorp/logutils v1.0.0 + github.com/hashicorp/serf v0.9.5 github.com/iancoleman/strcase v0.1.2 - github.com/mitchellh/go-homedir v1.1.0 github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v1.4.0 github.com/spf13/cobra v1.0.0 @@ -19,7 +22,6 @@ require ( go.uber.org/zap v1.13.0 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd - golang.org/x/text v0.3.2 // indirect golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc // indirect google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 google.golang.org/grpc v1.34.0 diff --git a/go.sum b/go.sum index c4b78fd1..6444d430 100644 --- a/go.sum +++ b/go.sum @@ -8,11 +8,17 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -37,6 +43,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -53,8 +61,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -71,6 +79,8 @@ github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -81,6 +91,7 @@ github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= @@ -92,8 +103,32 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/iancoleman/strcase v0.1.2 h1:gnomlvw9tnV3ITTAxzKSgTF+8kFWcU/f+TgttpXGz1U= github.com/iancoleman/strcase v0.1.2/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -114,10 +149,22 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -128,6 +175,8 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -135,6 +184,8 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -160,6 +211,9 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -206,8 +260,10 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -219,6 +275,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -229,6 +286,7 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -239,15 +297,23 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -259,8 +325,10 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/mock_gen.sh b/mock_gen.sh new file mode 100755 index 00000000..21b756ba --- /dev/null +++ b/mock_gen.sh @@ -0,0 +1,23 @@ +mockgen -source=config/config.go -destination=./config/config_mock.go -package=config -self_package=github.com/DrmagicE/gmqtt/config +mockgen -source=persistence/queue/elem.go -destination=./persistence/queue/elem_mock.go -package=queue -self_package=github.com/DrmagicE/gmqtt/queue +mockgen -source=persistence/queue/queue.go -destination=./persistence/queue/queue_mock.go -package=queue -self_package=github.com/DrmagicE/gmqtt/queue +mockgen -source=persistence/session/session.go -destination=./persistence/session/session_mock.go -package=session -self_package=github.com/DrmagicE/gmqtt/session +mockgen -source=persistence/subscription/subscription.go -destination=./persistence/subscription/subscription_mock.go -package=subscription -self_package=github.com/DrmagicE/gmqtt/subscription +mockgen -source=persistence/unack/unack.go -destination=./persistence/unack/unack_mock.go -package=unack -self_package=github.com/DrmagicE/gmqtt/unack +mockgen -source=pkg/packets/packets.go -destination=./pkg/packets/packets_mock.go -package=packets -self_package=github.com/DrmagicE/gmqtt/packets +mockgen -source=plugin/auth/account_grpc.pb.go -destination=./plugin/auth/account_grpc.pb_mock.go -package=auth -self_package=github.com/DrmagicE/gmqtt/auth +mockgen -source=plugin/federation/federation.pb.go -destination=./plugin/federation/federation.pb_mock.go -package=federation -self_package=github.com/DrmagicE/gmqtt/federation +mockgen -source=plugin/federation/peer.go -destination=./plugin/federation/peer_mock.go -package=federation -self_package=github.com/DrmagicE/gmqtt/federation +mockgen -source=plugin/federation/membership.go -destination=./plugin/federation/membership_mock.go -package=federation -self_package=github.com/DrmagicE/gmqtt/federation +mockgen -source=retained/interface.go -destination=./retained/interface_mock.go -package=retained -self_package=github.com/DrmagicE/gmqtt/retained +mockgen -source=server/client.go -destination=./server/client_mock.go -package=server -self_package=github.com/DrmagicE/gmqtt/server +mockgen -source=server/persistence.go -destination=./server/persistence_mock.go -package=server -self_package=github.com/DrmagicE/gmqtt/server +mockgen -source=server/plugin.go -destination=./server/plugin_mock.go -package=server -self_package=github.com/DrmagicE/gmqtt/server +mockgen -source=server/server.go -destination=./server/server_mock.go -package=server -self_package=github.com/DrmagicE/gmqtt/server +mockgen -source=server/service.go -destination=./server/service_mock.go -package=server -self_package=github.com/DrmagicE/gmqtt/server +mockgen -source=server/stats.go -destination=./server/stats_mock.go -package=server -self_package=github.com/DrmagicE/gmqtt/server +mockgen -source=server/topic_alias.go -destination=./server/topic_alias_mock.go -package=server -self_package=github.com/DrmagicE/gmqtt/server + +# reflection mode. +# gRPC streaming mock issue: https://github.com/golang/mock/pull/163 +mockgen -package=federation -destination=/usr/local/gopath/src/github.com/DrmagicE/gmqtt/plugin/federation/federation_grpc.pb_mock.go github.com/DrmagicE/gmqtt/plugin/federation FederationClient,Federation_EventStreamClient diff --git a/persistence/memory_test.go b/persistence/memory_test.go index 9940a30e..ee12abe1 100644 --- a/persistence/memory_test.go +++ b/persistence/memory_test.go @@ -9,6 +9,7 @@ import ( "github.com/DrmagicE/gmqtt/config" queue_test "github.com/DrmagicE/gmqtt/persistence/queue/test" sess_test "github.com/DrmagicE/gmqtt/persistence/session/test" + "github.com/DrmagicE/gmqtt/persistence/subscription" sub_test "github.com/DrmagicE/gmqtt/persistence/subscription/test" unack_test "github.com/DrmagicE/gmqtt/persistence/unack/test" "github.com/DrmagicE/gmqtt/server" @@ -27,10 +28,14 @@ func (s *MemorySuite) TestQueue() { queue_test.TestQueue(s.T(), qs) } func (s *MemorySuite) TestSubscription() { - a := assert.New(s.T()) - st, err := s.p.NewSubscriptionStore(queue_test.TestServerConfig) - a.Nil(err) - sub_test.TestSuite(s.T(), st) + newFn := func() subscription.Store { + st, err := s.p.NewSubscriptionStore(queue_test.TestServerConfig) + if err != nil { + panic(err) + } + return st + } + sub_test.TestSuite(s.T(), newFn) } func (s *MemorySuite) TestSession() { diff --git a/persistence/redis_test.go b/persistence/redis_test.go index 82d8a3d2..cae04594 100644 --- a/persistence/redis_test.go +++ b/persistence/redis_test.go @@ -11,6 +11,7 @@ import ( "github.com/DrmagicE/gmqtt/config" queue_test "github.com/DrmagicE/gmqtt/persistence/queue/test" sess_test "github.com/DrmagicE/gmqtt/persistence/session/test" + "github.com/DrmagicE/gmqtt/persistence/subscription" sub_test "github.com/DrmagicE/gmqtt/persistence/subscription/test" unack_test "github.com/DrmagicE/gmqtt/persistence/unack/test" "github.com/DrmagicE/gmqtt/server" @@ -73,10 +74,14 @@ func (s *RedisSuite) TestQueue() { } func (s *RedisSuite) TestSubscription() { - a := assert.New(s.T()) - st, err := s.p.NewSubscriptionStore(config.Config{}) - a.Nil(err) - sub_test.TestSuite(s.T(), st) + newFn := func() subscription.Store { + st, err := s.p.NewSubscriptionStore(config.Config{}) + if err != nil { + panic(err) + } + return st + } + sub_test.TestSuite(s.T(), newFn) } func (s *RedisSuite) TestSession() { diff --git a/persistence/subscription/mem/topic_trie.go b/persistence/subscription/mem/topic_trie.go index c61f7070..7915276c 100644 --- a/persistence/subscription/mem/topic_trie.go +++ b/persistence/subscription/mem/topic_trie.go @@ -123,17 +123,11 @@ func (t *topicTrie) unsubscribe(clientID string, topicName string, shareName str // setRs set the node subscription info into rs func setRs(node *topicNode, rs subscription.ClientSubscriptions) { for cid, subOpts := range node.clients { - if _, ok := rs[cid]; !ok { - rs[cid] = make([]*gmqtt.Subscription, 0) - } rs[cid] = append(rs[cid], subOpts) } for _, c := range node.shared { for cid, subOpts := range c { - if _, ok := rs[cid]; !ok { - rs[cid] = make([]*gmqtt.Subscription, 0) - } rs[cid] = append(rs[cid], subOpts) } } diff --git a/persistence/subscription/mem/trie_db.go b/persistence/subscription/mem/trie_db.go index f223943e..6caee60f 100644 --- a/persistence/subscription/mem/trie_db.go +++ b/persistence/subscription/mem/trie_db.go @@ -284,7 +284,9 @@ func (db *TrieDB) SubscribeLocked(clientID string, subscriptions ...*gmqtt.Subsc } if index[clientID] == nil { index[clientID] = make(map[string]*topicNode) - db.clientStats[clientID] = &subscription.Stats{} + if db.clientStats[clientID] == nil { + db.clientStats[clientID] = &subscription.Stats{} + } } if _, ok := index[clientID][topicName]; !ok { db.stats.SubscriptionsTotal++ @@ -315,6 +317,7 @@ func (db *TrieDB) UnsubscribeLocked(clientID string, topics ...string) { shareName, topic := subscription.SplitTopic(topic) if shareName != "" { topicTrie = db.sharedTrie + index = db.sharedIndex } else if isSystemTopic(topic) { index = db.systemIndex topicTrie = db.systemTrie diff --git a/persistence/subscription/mem/trie_db_test.go b/persistence/subscription/mem/trie_db_test.go deleted file mode 100644 index df7267e0..00000000 --- a/persistence/subscription/mem/trie_db_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package mem - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/DrmagicE/gmqtt/persistence/subscription" - "github.com/DrmagicE/gmqtt/persistence/subscription/test" - "github.com/DrmagicE/gmqtt/pkg/packets" -) - -// TODO move to TestSuite -func TestTrieDB_GetStats(t *testing.T) { - a := assert.New(t) - db := NewStore() - tt := []struct { - clientID string - topic packets.Topic - }{ - {clientID: "id0", topic: packets.Topic{Name: "name0", SubOptions: packets.SubOptions{Qos: packets.Qos0}}}, - - {clientID: "id1", topic: packets.Topic{Name: "name1", SubOptions: packets.SubOptions{Qos: packets.Qos1}}}, - - {clientID: "id2", topic: packets.Topic{Name: "name2", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - - {clientID: "id3", topic: packets.Topic{Name: "name3", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - - {clientID: "id4", topic: packets.Topic{Name: "name3", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - {clientID: "id4", topic: packets.Topic{Name: "name4", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - } - for _, v := range tt { - db.Subscribe(v.clientID, subscription.FromTopic(v.topic, 0)) - } - stats := db.GetStats() - a.EqualValues(len(tt), stats.SubscriptionsTotal) - a.EqualValues(len(tt), stats.SubscriptionsCurrent) - - // If subscribe duplicated topic, total and current statistics should not increase - db.Subscribe("id0", subscription.FromTopic(packets.Topic{SubOptions: packets.SubOptions{Qos: packets.Qos0}, Name: "name0"}, 0)) - stats = db.GetStats() - a.EqualValues(len(tt), stats.SubscriptionsTotal) - a.EqualValues(len(tt), stats.SubscriptionsCurrent) - - utt := []struct { - clientID string - topic packets.Topic - }{ - {clientID: "id0", topic: packets.Topic{Name: "name0", SubOptions: packets.SubOptions{Qos: packets.Qos0}}}, - {clientID: "id1", topic: packets.Topic{Name: "name1", SubOptions: packets.SubOptions{Qos: packets.Qos1}}}, - } - for _, v := range utt { - db.Unsubscribe(v.clientID, v.topic.Name) - } - stats = db.GetStats() - a.EqualValues(len(tt), stats.SubscriptionsTotal) - a.EqualValues(len(tt)-len(utt), stats.SubscriptionsCurrent) - - //if unsubscribe not exists topic, current statistics should not decrease - db.Unsubscribe("id0", "name555") - stats = db.GetStats() - a.EqualValues(len(tt), stats.SubscriptionsTotal) - a.EqualValues(len(tt)-len(utt), stats.SubscriptionsCurrent) - - db.UnsubscribeAll("id4") - stats = db.GetStats() - a.EqualValues(len(tt), stats.SubscriptionsTotal) - a.EqualValues(len(tt)-len(utt)-2, stats.SubscriptionsCurrent) -} - -// TODO move to TestSuite -func TestTrieDB_GetClientStats(t *testing.T) { - a := assert.New(t) - db := NewStore() - tt := []struct { - clientID string - topic packets.Topic - }{ - {clientID: "id0", topic: packets.Topic{Name: "name0", SubOptions: packets.SubOptions{Qos: packets.Qos0}}}, - {clientID: "id0", topic: packets.Topic{Name: "name1", SubOptions: packets.SubOptions{Qos: packets.Qos1}}}, - {clientID: "id1", topic: packets.Topic{Name: "name2", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - {clientID: "id1", topic: packets.Topic{Name: "name3", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - {clientID: "id2", topic: packets.Topic{Name: "name4", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - {clientID: "id2", topic: packets.Topic{Name: "name5", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, - } - for _, v := range tt { - db.Subscribe(v.clientID, subscription.FromTopic(v.topic, 0)) - } - stats, _ := db.GetClientStats("id0") - a.EqualValues(2, stats.SubscriptionsTotal) - a.EqualValues(2, stats.SubscriptionsCurrent) - - db.UnsubscribeAll("id0") - stats, _ = db.GetClientStats("id0") - a.EqualValues(2, stats.SubscriptionsTotal) - a.EqualValues(0, stats.SubscriptionsCurrent) -} - -func TestSuite(t *testing.T) { - store := NewStore() - test.TestSuite(t, store) -} diff --git a/persistence/subscription/interface.go b/persistence/subscription/subscription.go similarity index 100% rename from persistence/subscription/interface.go rename to persistence/subscription/subscription.go diff --git a/persistence/subscription/interface_mock.go b/persistence/subscription/subscription_mock.go similarity index 99% rename from persistence/subscription/interface_mock.go rename to persistence/subscription/subscription_mock.go index 85aa0a58..14d3b2fc 100644 --- a/persistence/subscription/interface_mock.go +++ b/persistence/subscription/subscription_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: persistence/subscription/interface.go +// Source: persistence/subscription/subscription.go // Package subscription is a generated GoMock package. package subscription diff --git a/persistence/subscription/test/test_suite.go b/persistence/subscription/test/test_suite.go index 4b30a7a0..fac43a60 100644 --- a/persistence/subscription/test/test_suite.go +++ b/persistence/subscription/test/test_suite.go @@ -8,6 +8,7 @@ import ( "github.com/DrmagicE/gmqtt" "github.com/DrmagicE/gmqtt/persistence/subscription" + "github.com/DrmagicE/gmqtt/pkg/packets" ) var ( @@ -148,13 +149,118 @@ func testAddSubscribe(t *testing.T, store subscription.Store) { } } -func TestSuite(t *testing.T, store subscription.Store) { +func testGetStatus(t *testing.T, store subscription.Store) { a := assert.New(t) + var err error + tt := []struct { + clientID string + topic packets.Topic + }{ + {clientID: "id0", topic: packets.Topic{Name: "name0", SubOptions: packets.SubOptions{Qos: packets.Qos0}}}, + {clientID: "id1", topic: packets.Topic{Name: "name1", SubOptions: packets.SubOptions{Qos: packets.Qos1}}}, + {clientID: "id2", topic: packets.Topic{Name: "name2", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id3", topic: packets.Topic{Name: "name3", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id4", topic: packets.Topic{Name: "name3", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id4", topic: packets.Topic{Name: "name4", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + // test $share and system topic + {clientID: "id4", topic: packets.Topic{Name: "$share/abc/name4", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id4", topic: packets.Topic{Name: "$SYS/abc/def", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + } + for _, v := range tt { + _, err = store.Subscribe(v.clientID, subscription.FromTopic(v.topic, 0)) + a.NoError(err) + } + stats := store.GetStats() + expectedTotal, expectedCurrent := len(tt), len(tt) + + a.EqualValues(expectedTotal, stats.SubscriptionsTotal) + a.EqualValues(expectedCurrent, stats.SubscriptionsCurrent) + + // If subscribe duplicated topic, total and current statistics should not increase + _, err = store.Subscribe("id0", subscription.FromTopic(packets.Topic{SubOptions: packets.SubOptions{Qos: packets.Qos0}, Name: "name0"}, 0)) + a.NoError(err) + _, err = store.Subscribe("id4", subscription.FromTopic(packets.Topic{SubOptions: packets.SubOptions{Qos: packets.Qos2}, Name: "$share/abc/name4"}, 0)) + a.NoError(err) + + stats = store.GetStats() + a.EqualValues(expectedTotal, stats.SubscriptionsTotal) + a.EqualValues(expectedCurrent, stats.SubscriptionsCurrent) + + utt := []struct { + clientID string + topic packets.Topic + }{ + {clientID: "id0", topic: packets.Topic{Name: "name0", SubOptions: packets.SubOptions{Qos: packets.Qos0}}}, + {clientID: "id1", topic: packets.Topic{Name: "name1", SubOptions: packets.SubOptions{Qos: packets.Qos1}}}, + } + expectedCurrent -= 2 + for _, v := range utt { + a.NoError(store.Unsubscribe(v.clientID, v.topic.Name)) + } + stats = store.GetStats() + a.EqualValues(expectedTotal, stats.SubscriptionsTotal) + a.EqualValues(expectedCurrent, stats.SubscriptionsCurrent) + + //if unsubscribe not exists topic, current statistics should not decrease + a.NoError(store.Unsubscribe("id0", "name555")) + stats = store.GetStats() + a.EqualValues(len(tt), stats.SubscriptionsTotal) + a.EqualValues(expectedCurrent, stats.SubscriptionsCurrent) + + a.NoError(store.Unsubscribe("id4", "$share/abc/name4")) + + expectedCurrent -= 1 + stats = store.GetStats() + a.EqualValues(expectedTotal, stats.SubscriptionsTotal) + a.EqualValues(expectedCurrent, stats.SubscriptionsCurrent) + + a.NoError(store.UnsubscribeAll("id4")) + expectedCurrent -= 3 + stats = store.GetStats() + a.EqualValues(len(tt), stats.SubscriptionsTotal) + a.EqualValues(expectedCurrent, stats.SubscriptionsCurrent) +} + +func testGetClientStats(t *testing.T, store subscription.Store) { + a := assert.New(t) + var err error + tt := []struct { + clientID string + topic packets.Topic + }{ + {clientID: "id0", topic: packets.Topic{Name: "name0", SubOptions: packets.SubOptions{Qos: packets.Qos0}}}, + {clientID: "id0", topic: packets.Topic{Name: "name1", SubOptions: packets.SubOptions{Qos: packets.Qos1}}}, + // test $share and system topic + {clientID: "id0", topic: packets.Topic{Name: "$share/abc/name5", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id0", topic: packets.Topic{Name: "$SYS/a/b/c", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + + {clientID: "id1", topic: packets.Topic{Name: "name0", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id1", topic: packets.Topic{Name: "$share/abc/name5", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id2", topic: packets.Topic{Name: "$SYS/a/b/c", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + {clientID: "id2", topic: packets.Topic{Name: "name5", SubOptions: packets.SubOptions{Qos: packets.Qos2}}}, + } + for _, v := range tt { + _, err = store.Subscribe(v.clientID, subscription.FromTopic(v.topic, 0)) + a.NoError(err) + } + stats, _ := store.GetClientStats("id0") + a.EqualValues(4, stats.SubscriptionsTotal) + a.EqualValues(4, stats.SubscriptionsCurrent) + + a.NoError(store.UnsubscribeAll("id0")) + stats, _ = store.GetClientStats("id0") + a.EqualValues(4, stats.SubscriptionsTotal) + a.EqualValues(0, stats.SubscriptionsCurrent) +} + +func TestSuite(t *testing.T, new func() subscription.Store) { + a := assert.New(t) + store := new() a.Nil(store.Init(nil)) defer store.Close() for i := 0; i <= 1; i++ { testAddSubscribe(t, store) - t.Run("GetTopic"+strconv.Itoa(i), func(t *testing.T) { + t.Run("testGetTopic"+strconv.Itoa(i), func(t *testing.T) { testGetTopic(t, store) }) t.Run("testTopicMatch"+strconv.Itoa(i), func(t *testing.T) { @@ -167,6 +273,20 @@ func TestSuite(t *testing.T, store subscription.Store) { testUnsubscribe(t, store) }) } + + store2 := new() + a.Nil(store2.Init(nil)) + defer store2.Close() + t.Run("testGetStatus", func(t *testing.T) { + testGetStatus(t, store2) + }) + + store3 := new() + a.Nil(store3.Init(nil)) + defer store3.Close() + t.Run("testGetStatus", func(t *testing.T) { + testGetClientStats(t, store3) + }) } func testGetTopic(t *testing.T, store subscription.Store) { a := assert.New(t) @@ -478,5 +598,4 @@ func testIterateSystem(t *testing.T, store subscription.Store) { }) a.ElementsMatch([]*gmqtt.Subscription{systemTopicA}, got["client1"]) a.Len(got["client2"], 0) - } diff --git a/plugin/admin/config.go b/plugin/admin/config.go new file mode 100644 index 00000000..b45e1a1f --- /dev/null +++ b/plugin/admin/config.go @@ -0,0 +1,78 @@ +package admin + +import ( + "errors" + "net" +) + +// Config is the configuration for the admin plugin. +type Config struct { + HTTP HTTPConfig `yaml:"http"` + GRPC GRPCConfig `yaml:"grpc"` +} + +// HTTPConfig is the configuration for http endpoint. +type HTTPConfig struct { + // Enable indicates whether to expose http endpoint. + Enable bool `yaml:"enable"` + // Addr is the address that the http server listen on. + Addr string `yaml:"http_addr"` +} + +// GRPCConfig is the configuration for gRPC endpoint. +type GRPCConfig struct { + // Addr is the address that the gRPC server listen on. + Addr string `yaml:"http_addr"` +} + +// Validate validates the configuration, and return an error if it is invalid. +func (c *Config) Validate() error { + if c.HTTP.Enable { + _, _, err := net.SplitHostPort(c.HTTP.Addr) + if err != nil { + return errors.New("invalid http_addr") + } + } + _, _, err := net.SplitHostPort(c.GRPC.Addr) + if err != nil { + return errors.New("invalid grpc_addr") + } + return nil +} + +// DefaultConfig is the default configuration. +var DefaultConfig = Config{ + HTTP: HTTPConfig{ + Enable: true, + Addr: "127.0.0.1:8083", + }, + GRPC: GRPCConfig{ + Addr: "unix://./gmqttd.sock", + }, +} + +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + type cfg Config + var v = &struct { + Admin cfg `yaml:"admin"` + }{ + Admin: cfg(DefaultConfig), + } + if err := unmarshal(v); err != nil { + return err + } + emptyGRPC := GRPCConfig{} + if v.Admin.GRPC == emptyGRPC { + v.Admin.GRPC = DefaultConfig.GRPC + } + emptyHTTP := HTTPConfig{} + if v.Admin.HTTP == emptyHTTP { + v.Admin.HTTP = DefaultConfig.HTTP + } + empty := cfg(Config{}) + if v.Admin == empty { + v.Admin = cfg(DefaultConfig) + } + *c = Config(v.Admin) + return nil +} diff --git a/plugin/auth/account_grpc.pb_mock.go b/plugin/auth/account_grpc.pb_mock.go new file mode 100644 index 00000000..90b042c4 --- /dev/null +++ b/plugin/auth/account_grpc.pb_mock.go @@ -0,0 +1,246 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: plugin/auth/account_grpc.pb.go + +// Package auth is a generated GoMock package. +package auth + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + empty "github.com/golang/protobuf/ptypes/empty" + grpc "google.golang.org/grpc" + reflect "reflect" +) + +// MockAccountServiceClient is a mock of AccountServiceClient interface +type MockAccountServiceClient struct { + ctrl *gomock.Controller + recorder *MockAccountServiceClientMockRecorder +} + +// MockAccountServiceClientMockRecorder is the mock recorder for MockAccountServiceClient +type MockAccountServiceClientMockRecorder struct { + mock *MockAccountServiceClient +} + +// NewMockAccountServiceClient creates a new mock instance +func NewMockAccountServiceClient(ctrl *gomock.Controller) *MockAccountServiceClient { + mock := &MockAccountServiceClient{ctrl: ctrl} + mock.recorder = &MockAccountServiceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAccountServiceClient) EXPECT() *MockAccountServiceClientMockRecorder { + return m.recorder +} + +// List mocks base method +func (m *MockAccountServiceClient) List(ctx context.Context, in *ListAccountsRequest, opts ...grpc.CallOption) (*ListAccountsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "List", varargs...) + ret0, _ := ret[0].(*ListAccountsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List +func (mr *MockAccountServiceClientMockRecorder) List(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockAccountServiceClient)(nil).List), varargs...) +} + +// Get mocks base method +func (m *MockAccountServiceClient) Get(ctx context.Context, in *GetAccountRequest, opts ...grpc.CallOption) (*GetAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Get", varargs...) + ret0, _ := ret[0].(*GetAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockAccountServiceClientMockRecorder) Get(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAccountServiceClient)(nil).Get), varargs...) +} + +// Update mocks base method +func (m *MockAccountServiceClient) Update(ctx context.Context, in *UpdateAccountRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Update", varargs...) + ret0, _ := ret[0].(*empty.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Update indicates an expected call of Update +func (mr *MockAccountServiceClientMockRecorder) Update(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockAccountServiceClient)(nil).Update), varargs...) +} + +// Delete mocks base method +func (m *MockAccountServiceClient) Delete(ctx context.Context, in *DeleteAccountRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Delete", varargs...) + ret0, _ := ret[0].(*empty.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Delete indicates an expected call of Delete +func (mr *MockAccountServiceClientMockRecorder) Delete(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAccountServiceClient)(nil).Delete), varargs...) +} + +// MockAccountServiceServer is a mock of AccountServiceServer interface +type MockAccountServiceServer struct { + ctrl *gomock.Controller + recorder *MockAccountServiceServerMockRecorder +} + +// MockAccountServiceServerMockRecorder is the mock recorder for MockAccountServiceServer +type MockAccountServiceServerMockRecorder struct { + mock *MockAccountServiceServer +} + +// NewMockAccountServiceServer creates a new mock instance +func NewMockAccountServiceServer(ctrl *gomock.Controller) *MockAccountServiceServer { + mock := &MockAccountServiceServer{ctrl: ctrl} + mock.recorder = &MockAccountServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAccountServiceServer) EXPECT() *MockAccountServiceServerMockRecorder { + return m.recorder +} + +// List mocks base method +func (m *MockAccountServiceServer) List(arg0 context.Context, arg1 *ListAccountsRequest) (*ListAccountsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", arg0, arg1) + ret0, _ := ret[0].(*ListAccountsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List +func (mr *MockAccountServiceServerMockRecorder) List(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockAccountServiceServer)(nil).List), arg0, arg1) +} + +// Get mocks base method +func (m *MockAccountServiceServer) Get(arg0 context.Context, arg1 *GetAccountRequest) (*GetAccountResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1) + ret0, _ := ret[0].(*GetAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockAccountServiceServerMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAccountServiceServer)(nil).Get), arg0, arg1) +} + +// Update mocks base method +func (m *MockAccountServiceServer) Update(arg0 context.Context, arg1 *UpdateAccountRequest) (*empty.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", arg0, arg1) + ret0, _ := ret[0].(*empty.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Update indicates an expected call of Update +func (mr *MockAccountServiceServerMockRecorder) Update(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockAccountServiceServer)(nil).Update), arg0, arg1) +} + +// Delete mocks base method +func (m *MockAccountServiceServer) Delete(arg0 context.Context, arg1 *DeleteAccountRequest) (*empty.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0, arg1) + ret0, _ := ret[0].(*empty.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Delete indicates an expected call of Delete +func (mr *MockAccountServiceServerMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAccountServiceServer)(nil).Delete), arg0, arg1) +} + +// mustEmbedUnimplementedAccountServiceServer mocks base method +func (m *MockAccountServiceServer) mustEmbedUnimplementedAccountServiceServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedAccountServiceServer") +} + +// mustEmbedUnimplementedAccountServiceServer indicates an expected call of mustEmbedUnimplementedAccountServiceServer +func (mr *MockAccountServiceServerMockRecorder) mustEmbedUnimplementedAccountServiceServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedAccountServiceServer", reflect.TypeOf((*MockAccountServiceServer)(nil).mustEmbedUnimplementedAccountServiceServer)) +} + +// MockUnsafeAccountServiceServer is a mock of UnsafeAccountServiceServer interface +type MockUnsafeAccountServiceServer struct { + ctrl *gomock.Controller + recorder *MockUnsafeAccountServiceServerMockRecorder +} + +// MockUnsafeAccountServiceServerMockRecorder is the mock recorder for MockUnsafeAccountServiceServer +type MockUnsafeAccountServiceServerMockRecorder struct { + mock *MockUnsafeAccountServiceServer +} + +// NewMockUnsafeAccountServiceServer creates a new mock instance +func NewMockUnsafeAccountServiceServer(ctrl *gomock.Controller) *MockUnsafeAccountServiceServer { + mock := &MockUnsafeAccountServiceServer{ctrl: ctrl} + mock.recorder = &MockUnsafeAccountServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockUnsafeAccountServiceServer) EXPECT() *MockUnsafeAccountServiceServerMockRecorder { + return m.recorder +} + +// mustEmbedUnimplementedAccountServiceServer mocks base method +func (m *MockUnsafeAccountServiceServer) mustEmbedUnimplementedAccountServiceServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedAccountServiceServer") +} + +// mustEmbedUnimplementedAccountServiceServer indicates an expected call of mustEmbedUnimplementedAccountServiceServer +func (mr *MockUnsafeAccountServiceServerMockRecorder) mustEmbedUnimplementedAccountServiceServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedAccountServiceServer", reflect.TypeOf((*MockUnsafeAccountServiceServer)(nil).mustEmbedUnimplementedAccountServiceServer)) +} diff --git a/plugin/federation/README.md b/plugin/federation/README.md new file mode 100644 index 00000000..b9278c44 --- /dev/null +++ b/plugin/federation/README.md @@ -0,0 +1,213 @@ +# Federation + +Federation is a kind of clustering mechanism which provides high-availability and horizontal scaling. +In Federation mode, multiple gmqtt brokers can be grouped together and "act as one". +However, it is impossible to fulfill all requirements in MQTT specification in a distributed environment. +There are some limitations: +1. Persistent session cannot be resumed from another node. +2. Clients with same client id can connect to different nodes at the same time and will not be kicked out. + +This is because session information only stores in local node and does not share between nodes. + +## Quick Start +The following commands will start a two nodes federation, the configuration files can be found [here](./examples). +Start node1 in Terminal1: +```bash +$ gmqttd start -c path/to/retry_join/node1_config.yml +``` +Start node2 in Terminate2: +```bash +$ gmqttd start -c path/to/retry_join/node2_config2.yml +``` +After node1 and node2 is started, they will join into one federation atomically. + +We can test the federation with `mosquitto_pub/sub`: +Connect to node2 and subscribe topicA: +```bash +$ mosquitto_sub -t topicA -h 127.0.0.1 -p 1884 +``` +Connect to node1 and send a message to topicA: +```bash +$ mosquitto_pub -t topicA -m 123 -h 127.0.0.1 -p 1883 +``` +The `mosquitto_sub` will receive "123" and print it in the terminal. +```bash +$ mosquitto_sub -t topicA -h 127.0.0.1 -p 1884 +123 +``` + +## Join Nodes via REST API +Federation provides gRPC/REST API to join/leave and query members information, see [swagger](./swagger/federation.swagger.json) for details. +In addition to join nodes upon starting up, you can join a node into federation by using `Join` API. + +Start node3 with the configuration with empty `retry_join` which means that the node will not join any nodes upon starting up. +```bash +$ gmqttd start -c path/to/retry_join/join_node3_config.yml +``` +We can send `Join` request to any nodes in the federation to get node3 joined, for example, sends `Join` request to node1: +```bash +$ curl -X POST -d '{"hosts":["127.0.0.1:8932"]}' '127.0.0.1:8083/v1/federation/join' +{} +``` +And check the members in federation: +```bash +curl http://127.0.0.1:8083/v1/federation/members +{ + "members": [ + { + "name": "node1", + "addr": "192.168.0.105:8902", + "tags": { + "fed_addr": "192.168.0.105:8901" + }, + "status": "STATUS_ALIVE" + }, + { + "name": "node2", + "addr": "192.168.0.105:8912", + "tags": { + "fed_addr": "192.168.0.105:8911" + }, + "status": "STATUS_ALIVE" + }, + { + "name": "node3", + "addr": "192.168.0.105:8932", + "tags": { + "fed_addr": "192.168.0.105:8931" + }, + "status": "STATUS_ALIVE" + } + ] +}% +``` +You will see there are 3 nodes ara alive in the federation. + +## Configuration +```go +// Config is the configuration for the federation plugin. +type Config struct { + // NodeName is the unique identifier for the node in the federation. Defaults to hostname. + NodeName string `yaml:"node_name"` + // FedAddr is the gRPC server listening address for the federation internal communication. + // Defaults to :8901. + // If the port is missing, the default federation port (8901) will be used. + FedAddr string `yaml:"fed_addr"` + // AdvertiseFedAddr is used to change the federation gRPC server address that we advertise to other nodes in the cluster. + // Defaults to "FedAddr" or the private IP address of the node if the IP in "FedAddr" is 0.0.0.0. + // However, in some cases, there may be a routable address that cannot be bound. + // If the port is missing, the default federation port (8901) will be used. + AdvertiseFedAddr string `yaml:"advertise_fed_addr"` + // GossipAddr is the address that the gossip will listen on, It is used for both UDP and TCP gossip. Defaults to :8902 + GossipAddr string `yaml:"gossip_addr"` + // AdvertiseGossipAddr is used to change the gossip server address that we advertise to other nodes in the cluster. + // Defaults to "GossipAddr" or the private IP address of the node if the IP in "GossipAddr" is 0.0.0.0. + // If the port is missing, the default gossip port (8902) will be used. + AdvertiseGossipAddr string `yaml:"advertise_gossip_addr"` + // RetryJoin is the address of other nodes to join upon starting up. + // If port is missing, the default gossip port (8902) will be used. + RetryJoin []string `yaml:"retry_join"` + // RetryInterval is the time to wait between join attempts. Defaults to 5s. + RetryInterval time.Duration `yaml:"retry_interval"` + // RetryTimeout is the timeout to wait before joining all nodes in RetryJoin successfully. + // If timeout expires, the server will exit with error. Defaults to 1m. + RetryTimeout time.Duration `yaml:"retry_timeout"` + // SnapshotPath will be pass to "SnapshotPath" in serf configuration. + // When Serf is started with a snapshot, + // it will attempt to join all the previously known nodes until one + // succeeds and will also avoid replaying old user events. + SnapshotPath string `yaml:"snapshot_path"` + // RejoinAfterLeave will be pass to "RejoinAfterLeave" in serf configuration. + // It controls our interaction with the snapshot file. + // When set to false (default), a leave causes a Serf to not rejoin + // the cluster until an explicit join is received. If this is set to + // true, we ignore the leave, and rejoin the cluster on start. + RejoinAfterLeave bool `yaml:"rejoin_after_leave"` +} +``` + +## Implementation Details + +### Inner-node Communication +Nodes in the same federation communicate with each other through a couple of gRPC streaming apis: +```proto +message Event { + uint64 id = 1; + oneof Event { + Subscribe Subscribe = 2; + Message message = 3; + Unsubscribe unsubscribe = 4; + } +} +service Federation { + rpc Hello(ClientHello) returns (ServerHello){} + rpc EventStream (stream Event) returns (stream Ack){} +} +``` +In general, a node is both Client and Server which implements the `Federation` gRPC service. +* As Client, the node will send subscribe, unsubscribe and message published events to other nodes if necessary. +Each event has a EventID, which is incremental and unique in a session. +* As Server, when receives a event from Client, the node returns an acknowledgement after the event has been handled successfully. + +### Session State +The event is designed to be idempotent and will be delivered at least once, just like the QoS 1 message in MQTT protocol. +In order to implement QoS 1 protocol flows, the Client and Server need to associate state with a SessionID, +this is referred to as the Session State. The Server also stores the federation tree and retained messages as part of the Session State. + +The Session State in the Client consists of: + * Events which have been sent to the Server, but have not been acknowledged. + * Events pending transmission to the Server. + +The Session State in the Server consists of: + * The existence of a Session, even if the rest of the Session State is empty. + * The EventID of the next event that the Server is willing to accept. + * Events which have been received from the Client, but have not sent acknowledged yet. + +The Session State stores in memory only. When the Client starts, it generates a random UUID as SessionID. +When the Client detects a new node is joined or reconnects to the Server, it sends the `Hello` request which contains the SessionID to perform a handshake. +During the handshake, the Server will check whether the session for the SessionID exists. + +* If the session not exists, the Server sends response with `clean_start=true`. +* If the session exists, the Server sends response with `clean_start=false` and sets the next EventID that it is willing to accept to `next_event_id`. + +After handshake succeed, the Client will start `EventStream`: +* If the Client receives `clean_start=true`, it sends all local subscriptions and retained messages to the Server in order to sync the full state. +* If the Client receives `clean_start=false`, it sends events of which the EventID is greater than or equal to `next_event_id`. + +### Subscription Tree +Each node in the federation will have two subscription trees, the local tree and the federation tree. +The local tree stores subscriptions for local clients which is managed by gmqtt core and the federation tree stores the subscriptions for remote nodes which is managed by the federation plugin. +The federation tree takes node name as subscriber identifier for subscriptions. +* When receives a sub/unsub packet from a local client, the node will update it's local tree first and then broadcasts the event to other nodes. +* When receives sub/unsub event from a remote node, the node will only update it's federation tree. + +All Nodes in the federation will have the same federation tree, and with this tree, the node can determine which node the incoming message should be routed to. +For example, Node1 and Node2 are in the same federation. Client1 connects to Node1 and subscribes to topic a/b, the subscription trees of these two nodes are as follows: + +Node1 local tree: + +| subscriber | topic | +|------------|-------| +| client1 | a/b | + +Node1 federation tree: +empty. + +Node2 local tree: +empty. + +Node2 federation tree: + +| subscriber | topic | +|------------|-------| +| node1 | a/b | + +### Message Distribution Process +When an MQTT client publishes a message, the node where it is located queries the federation tree +and forwards the message to the relevant node according to the message topic, +and then the relevant node retrieves the local subscription tree and sends the message to the relevant subscriber. + +### Membership Management +Federation uses [Serf](https://github.com/hashicorp/serf) to manage membership. + + diff --git a/plugin/federation/config.go b/plugin/federation/config.go new file mode 100644 index 00000000..f0e63858 --- /dev/null +++ b/plugin/federation/config.go @@ -0,0 +1,189 @@ +package federation + +import ( + "fmt" + "net" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-sockaddr" +) + +// Default config. +const ( + DefaultFedPort = "8901" + DefaultGossipPort = "8902" + DefaultRetryInterval = 5 * time.Second + DefaultRetryTimeout = 1 * time.Minute +) + +// stub function for testing +var getPrivateIP = sockaddr.GetPrivateIP + +// Config is the configuration for the federation plugin. +type Config struct { + // NodeName is the unique identifier for the node in the federation. Defaults to hostname. + NodeName string `yaml:"node_name"` + // FedAddr is the gRPC server listening address for the federation internal communication. + // Defaults to :8901. + // If the port is missing, the default federation port (8901) will be used. + FedAddr string `yaml:"fed_addr"` + // AdvertiseFedAddr is used to change the federation gRPC server address that we advertise to other nodes in the cluster. + // Defaults to "FedAddr" or the private IP address of the node if the IP in "FedAddr" is 0.0.0.0. + // However, in some cases, there may be a routable address that cannot be bound. + // If the port is missing, the default federation port (8901) will be used. + AdvertiseFedAddr string `yaml:"advertise_fed_addr"` + // GossipAddr is the address that the gossip will listen on, It is used for both UDP and TCP gossip. Defaults to :8902 + GossipAddr string `yaml:"gossip_addr"` + // AdvertiseGossipAddr is used to change the gossip server address that we advertise to other nodes in the cluster. + // Defaults to "GossipAddr" or the private IP address of the node if the IP in "GossipAddr" is 0.0.0.0. + // If the port is missing, the default gossip port (8902) will be used. + AdvertiseGossipAddr string `yaml:"advertise_gossip_addr"` + // RetryJoin is the address of other nodes to join upon starting up. + // If port is missing, the default gossip port (8902) will be used. + RetryJoin []string `yaml:"retry_join"` + // RetryInterval is the time to wait between join attempts. Defaults to 5s. + RetryInterval time.Duration `yaml:"retry_interval"` + // RetryTimeout is the timeout to wait before joining all nodes in RetryJoin successfully. + // If timeout expires, the server will exit with error. Defaults to 1m. + RetryTimeout time.Duration `yaml:"retry_timeout"` + // SnapshotPath will be pass to "SnapshotPath" in serf configuration. + // When Serf is started with a snapshot, + // it will attempt to join all the previously known nodes until one + // succeeds and will also avoid replaying old user events. + SnapshotPath string `yaml:"snapshot_path"` + // RejoinAfterLeave will be pass to "RejoinAfterLeave" in serf configuration. + // It controls our interaction with the snapshot file. + // When set to false (default), a leave causes a Serf to not rejoin + // the cluster until an explicit join is received. If this is set to + // true, we ignore the leave, and rejoin the cluster on start. + RejoinAfterLeave bool `yaml:"rejoin_after_leave"` +} + +func isPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + if 1 <= i && i <= 65535 { + return true + } + return false +} + +func getAddr(addr string, defaultPort string, fieldName string, usePrivate bool) (string, error) { + if addr == "" { + return "", fmt.Errorf("missing %s", fieldName) + } + host, port, err := net.SplitHostPort(addr) + if port == "" { + port = defaultPort + } + if addr[len(addr)-1] == ':' { + return "", fmt.Errorf("invalid %s", fieldName) + } + if err != nil && strings.Contains(err.Error(), "missing port in address") { + host, port, err = net.SplitHostPort(addr + ":" + defaultPort) + if err != nil { + return "", fmt.Errorf("invalid %s: %s", fieldName, err) + } + } else if err != nil { + return "", fmt.Errorf("invalid %s: %s", fieldName, err) + } + if usePrivate && (host == "0.0.0.0" || host == "") { + host, err = getPrivateIP() + if err != nil { + return "", err + } + } + if !isPortNumber(port) { + return "", fmt.Errorf("invalid port number: %s", port) + } + return net.JoinHostPort(host, port), nil +} + +// Validate validates the configuration, and return an error if it is invalid. +func (c *Config) Validate() (err error) { + if c.NodeName == "" { + hostName, err := os.Hostname() + if err != nil { + return err + } + c.NodeName = hostName + } + c.FedAddr, err = getAddr(c.FedAddr, DefaultFedPort, "fed_addr", false) + if err != nil { + return err + } + c.GossipAddr, err = getAddr(c.GossipAddr, DefaultGossipPort, "gossip_addr", false) + if err != nil { + return err + } + if c.AdvertiseFedAddr == "" { + c.AdvertiseFedAddr = c.FedAddr + } + c.AdvertiseFedAddr, err = getAddr(c.AdvertiseFedAddr, DefaultFedPort, "advertise_fed_addr", true) + if err != nil { + return err + } + if c.AdvertiseGossipAddr == "" { + c.AdvertiseGossipAddr = c.GossipAddr + } + c.AdvertiseGossipAddr, err = getAddr(c.AdvertiseGossipAddr, DefaultGossipPort, "advertise_gossip_addr", true) + if err != nil { + return err + } + + for k, v := range c.RetryJoin { + c.RetryJoin[k], err = getAddr(v, DefaultGossipPort, "retry_join", false) + if err != nil { + return err + } + } + if c.RetryInterval <= 0 { + return fmt.Errorf("invalid retry_join: %d", c.RetryInterval) + } + + if c.RetryTimeout <= 0 { + return fmt.Errorf("invalid retry_timeout: %d", c.RetryTimeout) + } + return nil +} + +// DefaultConfig is the default configuration. +var DefaultConfig = Config{} + +func init() { + hostName, err := os.Hostname() + if err != nil { + panic(err) + } + DefaultConfig = Config{ + NodeName: hostName, + FedAddr: ":" + DefaultFedPort, + GossipAddr: ":" + DefaultFedPort, + RetryJoin: nil, + RetryInterval: DefaultRetryInterval, + RetryTimeout: DefaultRetryTimeout, + } +} + +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + type cfg Config + df := cfg(DefaultConfig) + var v = &struct { + Federation *cfg `yaml:"federation"` + }{ + Federation: &df, + } + if err := unmarshal(v); err != nil { + return err + } + if v.Federation == nil { + v.Federation = &df + } + *c = Config(*v.Federation) + return nil +} diff --git a/plugin/federation/config_test.go b/plugin/federation/config_test.go new file mode 100644 index 00000000..9f86f09a --- /dev/null +++ b/plugin/federation/config_test.go @@ -0,0 +1,234 @@ +package federation + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func init() { + getPrivateIP = func() (s string, e error) { + return "127.0.0.1", nil + } +} + +func TestConfig_Validate(t *testing.T) { + + var tt = []struct { + name string + cfg *Config + expected *Config + valid bool + }{ + { + name: "invalid1", + cfg: &Config{ + NodeName: "name1", + FedAddr: "", + AdvertiseFedAddr: "127.0.0.1:1234", + GossipAddr: "127.0.0.1:1235", + RetryJoin: nil, + RetryInterval: 0, + RetryTimeout: 0, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: false, + }, + { + name: "invalid2", + cfg: &Config{ + NodeName: "name2", + FedAddr: "127.0.0.1:1233", + AdvertiseFedAddr: "127.0.0.1:1234", + GossipAddr: "127.0.0.1:1235", + RetryJoin: nil, + RetryInterval: 0, + RetryTimeout: 0, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: false, + }, + { + name: "invalid3", + cfg: &Config{ + NodeName: "name2", + FedAddr: "127.0.0.1:", + AdvertiseFedAddr: "127.0.0.1:1234", + GossipAddr: "127.0.0.1:1235", + RetryJoin: nil, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: false, + }, + { + name: "invalid4", + cfg: &Config{ + NodeName: "name2", + FedAddr: "127.0.0.1:1234:", + AdvertiseFedAddr: "127.0.0.1:1234", + GossipAddr: "127.0.0.1:1235", + RetryJoin: nil, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: false, + }, + { + name: "addDefaultPortIPv4", + cfg: &Config{ + NodeName: "name2", + FedAddr: "127.0.0.1", + AdvertiseFedAddr: "127.0.0.1", + GossipAddr: "127.0.0.1", + RetryJoin: []string{"127.0.0.1", "127.0.0.2"}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + expected: &Config{ + NodeName: "name2", + FedAddr: "127.0.0.1:" + DefaultFedPort, + AdvertiseFedAddr: "127.0.0.1:" + DefaultFedPort, + GossipAddr: "127.0.0.1:" + DefaultGossipPort, + AdvertiseGossipAddr: "127.0.0.1:" + DefaultGossipPort, + RetryJoin: []string{"127.0.0.1:" + DefaultGossipPort, "127.0.0.2:" + DefaultGossipPort}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: true, + }, + { + name: "addDefaultPortIPv6", + cfg: &Config{ + NodeName: "name2", + FedAddr: "[::1]", + AdvertiseFedAddr: "[::1]:1234", + GossipAddr: "127.0.0.1", + RetryJoin: []string{"127.0.0.1", "127.0.0.2"}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + expected: &Config{ + NodeName: "name2", + FedAddr: "[::1]:" + DefaultFedPort, + AdvertiseFedAddr: "[::1]:1234", + GossipAddr: "127.0.0.1:" + DefaultGossipPort, + AdvertiseGossipAddr: "127.0.0.1:" + DefaultGossipPort, + RetryJoin: []string{"127.0.0.1:" + DefaultGossipPort, "127.0.0.2:" + DefaultGossipPort}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: true, + }, + { + name: "defaultAdvertise1", + cfg: &Config{ + NodeName: "name2", + FedAddr: "0.0.0.0:1234", + AdvertiseFedAddr: "", + GossipAddr: "127.0.0.1", + RetryJoin: []string{"127.0.0.1", "127.0.0.2"}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + expected: &Config{ + NodeName: "name2", + FedAddr: "0.0.0.0:1234", + AdvertiseFedAddr: "127.0.0.1:1234", + GossipAddr: "127.0.0.1:" + DefaultGossipPort, + AdvertiseGossipAddr: "127.0.0.1:" + DefaultGossipPort, + RetryJoin: []string{"127.0.0.1:" + DefaultGossipPort, "127.0.0.2:" + DefaultGossipPort}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: true, + }, + { + name: "defaultAdvertise2", + cfg: &Config{ + NodeName: "name2", + FedAddr: "0.0.0.0:1234", + AdvertiseFedAddr: "", + GossipAddr: ":1235", + RetryJoin: []string{"127.0.0.1", "127.0.0.2"}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + expected: &Config{ + NodeName: "name2", + FedAddr: "0.0.0.0:1234", + AdvertiseFedAddr: "127.0.0.1:1234", + GossipAddr: ":1235", + AdvertiseGossipAddr: "127.0.0.1:1235", + RetryJoin: []string{"127.0.0.1:" + DefaultGossipPort, "127.0.0.2:" + DefaultGossipPort}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: true, + }, { + name: "defaultAdvertise3", + cfg: &Config{ + NodeName: "name2", + FedAddr: "0.0.0.0:1234", + AdvertiseFedAddr: ":1234", + GossipAddr: ":1235", + RetryJoin: []string{"127.0.0.1", "127.0.0.2"}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + expected: &Config{ + NodeName: "name2", + FedAddr: "0.0.0.0:1234", + AdvertiseFedAddr: "127.0.0.1:1234", + GossipAddr: ":1235", + AdvertiseGossipAddr: "127.0.0.1:1235", + RetryJoin: []string{"127.0.0.1:" + DefaultGossipPort, "127.0.0.2:" + DefaultGossipPort}, + RetryInterval: 1, + RetryTimeout: 2, + SnapshotPath: "", + RejoinAfterLeave: false, + }, + valid: true, + }, + } + for _, v := range tt { + t.Run(v.name, func(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + err := v.cfg.Validate() + if v.valid { + a.NoError(err) + a.Equal(v.expected, v.cfg) + return + } + a.Error(err) + }) + } + +} diff --git a/plugin/federation/examples/join_node3_config.yml b/plugin/federation/examples/join_node3_config.yml new file mode 100644 index 00000000..f310038e --- /dev/null +++ b/plugin/federation/examples/join_node3_config.yml @@ -0,0 +1,69 @@ +listeners: + - address: ":1885" +api: + grpc: + - address: "tcp://127.0.0.1:8284" + http: + - address: "tcp://127.0.0.1:8283" + map: "tcp://127.0.0.1:8284" # The backend gRPC server endpoint +mqtt: + session_expiry: 2h + session_expiry_check_timer: 20s + message_expiry: 2h + max_packet_size: 268435456 + server_receive_maximum: 100 + max_keepalive: 60 + topic_alias_maximum: 10 + subscription_identifier_available: true + wildcard_subscription_available: true + shared_subscription_available: true + maximum_qos: 2 + retain_available: true + max_queued_messages: 10000 + max_inflight: 1000 + queue_qos0_messages: true + delivery_mode: onlyonce # overlap or onlyonce + allow_zero_length_clientid: true + +plugins: + federation: + # node_name is the unique identifier for the node in the federation. Defaults to hostname. + node_name: node3 + # fed_addr is the gRPC server listening address for the federation internal communication. Defaults to :8901 + fed_addr: :8931 + # advertise_fed_addr is used to change the federation gRPC server address that we advertise to other nodes in the cluster. + # Defaults to "fed_addr".However, in some cases, there may be a routable address that cannot be bound. + # If the port is missing, the default federation port (8901) will be used. + advertise_fed_addr: :8931 + # gossip_addr is the address that the gossip will listen on, It is used for both UDP and TCP gossip. Defaults to :8902 + gossip_addr: :8932 + # retry_join is the address of other nodes to join upon starting up. + # If port is missing, the default gossip port (8902) will be used. + #retry_join: + # - 127.0.0.1:8912 + # rejoin_after_leave will be pass to "RejoinAfterLeave" in serf configuration. + # It controls our interaction with the snapshot file. + # When set to false (default), a leave causes a Serf to not rejoin the cluster until an explicit join is received. + # If this is set to true, we ignore the leave, and rejoin the cluster on start. + rejoin_after_leave: false + # snapshot_path will be pass to "SnapshotPath" in serf configuration. + # When Serf is started with a snapshot,it will attempt to join all the previously known nodes until one + # succeeds and will also avoid replaying old user events. + snapshot_path: + +# plugin loading orders +plugin_order: + # Uncomment auth to enable authentication. + # - auth + #- prometheus + #- admin + - federation +log: + level: debug # debug | info | warn | error + format: text # json | text + # whether to dump MQTT packet in debug level + dump_packet: false + + + + diff --git a/plugin/federation/examples/node1_config.yml b/plugin/federation/examples/node1_config.yml new file mode 100644 index 00000000..d87856de --- /dev/null +++ b/plugin/federation/examples/node1_config.yml @@ -0,0 +1,70 @@ +listeners: + - address: ":1883" +api: + grpc: + - address: "tcp://127.0.0.1:8084" + http: + - address: "tcp://127.0.0.1:8083" + map: "tcp://127.0.0.1:8084" # The backend gRPC server endpoint +mqtt: + session_expiry: 2h + session_expiry_check_timer: 20s + message_expiry: 2h + max_packet_size: 268435456 + server_receive_maximum: 100 + max_keepalive: 60 + topic_alias_maximum: 10 + subscription_identifier_available: true + wildcard_subscription_available: true + shared_subscription_available: true + maximum_qos: 2 + retain_available: true + max_queued_messages: 10000 + max_inflight: 1000 + queue_qos0_messages: true + delivery_mode: onlyonce # overlap or onlyonce + allow_zero_length_clientid: true + +plugins: + federation: + # node_name is the unique identifier for the node in the federation. Defaults to hostname. + node_name: node1 + # fed_addr is the gRPC server listening address for the federation internal communication. Defaults to :8901 + fed_addr: :8901 + # advertise_fed_addr is used to change the federation gRPC server address that we advertise to other nodes in the cluster. + # Defaults to "fed_addr".However, in some cases, there may be a routable address that cannot be bound. + # If the port is missing, the default federation port (8901) will be used. + advertise_fed_addr: :8901 + # gossip_addr is the address that the gossip will listen on, It is used for both UDP and TCP gossip. Defaults to :8902 + gossip_addr: :8902 + # retry_join is the address of other nodes to join upon starting up. + # If port is missing, the default gossip port (8902) will be used. + retry_join: + # Change 127.0.0.1 to real routable ip address if you run gmqtt in multiple nodes. + - 127.0.0.1:8912 + # rejoin_after_leave will be pass to "RejoinAfterLeave" in serf configuration. + # It controls our interaction with the snapshot file. + # When set to false (default), a leave causes a Serf to not rejoin the cluster until an explicit join is received. + # If this is set to true, we ignore the leave, and rejoin the cluster on start. + rejoin_after_leave: false + # snapshot_path will be pass to "SnapshotPath" in serf configuration. + # When Serf is started with a snapshot,it will attempt to join all the previously known nodes until one + # succeeds and will also avoid replaying old user events. + snapshot_path: + +# plugin loading orders +plugin_order: + # Uncomment auth to enable authentication. + # - auth + #- prometheus + #- admin + - federation +log: + level: debug # debug | info | warn | error + format: text # json | text + # whether to dump MQTT packet in debug level + dump_packet: false + + + + diff --git a/plugin/federation/examples/node2_config.yml b/plugin/federation/examples/node2_config.yml new file mode 100644 index 00000000..843def82 --- /dev/null +++ b/plugin/federation/examples/node2_config.yml @@ -0,0 +1,70 @@ +listeners: + - address: ":1884" +api: + grpc: + - address: "tcp://127.0.0.1:8184" + http: + - address: "tcp://127.0.0.1:8183" + map: "tcp://127.0.0.1:8184" # The backend gRPC server endpoint +mqtt: + session_expiry: 2h + session_expiry_check_timer: 20s + message_expiry: 2h + max_packet_size: 268435456 + server_receive_maximum: 100 + max_keepalive: 60 + topic_alias_maximum: 10 + subscription_identifier_available: true + wildcard_subscription_available: true + shared_subscription_available: true + maximum_qos: 2 + retain_available: true + max_queued_messages: 10000 + max_inflight: 1000 + queue_qos0_messages: true + delivery_mode: onlyonce # overlap or onlyonce + allow_zero_length_clientid: true + +plugins: + federation: + # node_name is the unique identifier for the node in the federation. Defaults to hostname. + node_name: node2 + # fed_addr is the gRPC server listening address for the federation internal communication. Defaults to :8901 + fed_addr: :8911 + # advertise_fed_addr is used to change the federation gRPC server address that we advertise to other nodes in the cluster. + # Defaults to "fed_addr".However, in some cases, there may be a routable address that cannot be bound. + # If the port is missing, the default federation port (8901) will be used. + advertise_fed_addr: :8911 + # gossip_addr is the address that the gossip will listen on, It is used for both UDP and TCP gossip. Defaults to :8902 + gossip_addr: :8912 + # retry_join is the address of other nodes to join upon starting up. + # If port is missing, the default gossip port (8902) will be used. + retry_join: + # Change 127.0.0.1 to real routable ip address if you run gmqtt in multiple nodes. + - 127.0.0.1:8902 + # rejoin_after_leave will be pass to "RejoinAfterLeave" in serf configuration. + # It controls our interaction with the snapshot file. + # When set to false (default), a leave causes a Serf to not rejoin the cluster until an explicit join is received. + # If this is set to true, we ignore the leave, and rejoin the cluster on start. + rejoin_after_leave: false + # snapshot_path will be pass to "SnapshotPath" in serf configuration. + # When Serf is started with a snapshot,it will attempt to join all the previously known nodes until one + # succeeds and will also avoid replaying old user events. + snapshot_path: + +# plugin loading orders +plugin_order: + # Uncomment auth to enable authentication. + # - auth + #- prometheus + #- admin + - federation +log: + level: debug # debug | info | warn | error + format: text # json | text + # whether to dump MQTT packet in debug level + dump_packet: false + + + + diff --git a/plugin/federation/federation.go b/plugin/federation/federation.go new file mode 100644 index 00000000..8ddda586 --- /dev/null +++ b/plugin/federation/federation.go @@ -0,0 +1,587 @@ +package federation + +import ( + "container/list" + "context" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/ptypes/empty" + "github.com/hashicorp/logutils" + "github.com/hashicorp/serf/serf" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/config" + "github.com/DrmagicE/gmqtt/persistence/subscription" + "github.com/DrmagicE/gmqtt/persistence/subscription/mem" + "github.com/DrmagicE/gmqtt/pkg/packets" + "github.com/DrmagicE/gmqtt/retained" + "github.com/DrmagicE/gmqtt/server" +) + +var _ server.Plugin = (*Federation)(nil) + +const Name = "federation" + +func init() { + server.RegisterPlugin(Name, New) + config.RegisterDefaultPluginConfig(Name, &DefaultConfig) +} + +func getSerfLogger(level string) (io.Writer, error) { + logLevel := strings.ToUpper(level) + var zapLevel zapcore.Level + err := zapLevel.UnmarshalText([]byte(logLevel)) + if err != nil { + return nil, err + } + zp, err := zap.NewStdLogAt(log, zapLevel) + if err != nil { + return nil, err + } + filter := &logutils.LevelFilter{ + Levels: []logutils.LogLevel{"DEBUG", "INFO", "WARN", "ERROR"}, + MinLevel: logutils.LogLevel(logLevel), + Writer: zp.Writer(), + } + return filter, nil +} + +func getSerfConfig(cfg *Config, eventCh chan serf.Event, logOut io.Writer) *serf.Config { + serfCfg := serf.DefaultConfig() + serfCfg.SnapshotPath = cfg.SnapshotPath + serfCfg.RejoinAfterLeave = cfg.RejoinAfterLeave + serfCfg.NodeName = cfg.NodeName + serfCfg.EventCh = eventCh + host, port, _ := net.SplitHostPort(cfg.GossipAddr) + if host != "" { + serfCfg.MemberlistConfig.BindAddr = host + } + p, _ := strconv.Atoi(port) + serfCfg.MemberlistConfig.BindPort = p + + // set advertise + host, port, _ = net.SplitHostPort(cfg.AdvertiseGossipAddr) + if host != "" { + serfCfg.MemberlistConfig.AdvertiseAddr = host + } + p, _ = strconv.Atoi(port) + serfCfg.MemberlistConfig.AdvertisePort = p + + serfCfg.Tags = map[string]string{"fed_addr": cfg.AdvertiseFedAddr} + serfCfg.LogOutput = logOut + serfCfg.MemberlistConfig.LogOutput = logOut + return serfCfg +} + +func New(config config.Config) (server.Plugin, error) { + log = server.LoggerWithField(zap.String("plugin", Name)) + cfg := config.Plugins[Name].(*Config) + f := &Federation{ + config: cfg, + nodeName: cfg.NodeName, + localSubStore: &localSubStore{}, + fedSubStore: &fedSubStore{ + TrieDB: mem.NewStore(), + sharedSent: map[string]uint64{}, + }, + serfEventCh: make(chan serf.Event, 10000), + sessionMgr: &sessionMgr{ + sessions: map[string]*session{}, + }, + peers: make(map[string]*peer), + exit: make(chan struct{}), + wg: &sync.WaitGroup{}, + } + logOut, err := getSerfLogger(config.Log.Level) + if err != nil { + return nil, err + } + serfCfg := getSerfConfig(cfg, f.serfEventCh, logOut) + s, err := serf.Create(serfCfg) + if err != nil { + return nil, err + } + f.serf = s + return f, nil +} + +var log *zap.Logger + +type Federation struct { + config *Config + nodeName string + serfMu sync.Mutex + serf iSerf + serfEventCh chan serf.Event + sessionMgr *sessionMgr + // localSubStore store the subscriptions for the local node. + // The local node will only broadcast "new subscriptions" to other nodes. + // "New subscription" is the first subscription for a topic name. + // It means that if two client in the local node subscribe the same topic, only the first subscription will be broadcast. + localSubStore *localSubStore + // fedSubStore store federation subscription tree which take nodeName as the subscriber identifier. + // It is used to determine which node the incoming message should be routed to. + fedSubStore *fedSubStore + // retainedStore store is the retained store of the gmqtt core. + // Retained message will be broadcast to other nodes in the federation. + retainedStore retained.Store + publisher server.Publisher + exit chan struct{} + memberMu sync.Mutex + peers map[string]*peer + wg *sync.WaitGroup +} + +type fedSubStore struct { + *mem.TrieDB + sharedMu sync.Mutex + // sharedSent store the number of shared topic sent. + // It is used to select which node the message should be send to with round-robin strategy + sharedSent map[string]uint64 +} + +type sessionMgr struct { + sync.RWMutex + sessions map[string]*session +} + +func (s *sessionMgr) add(nodeName string, id string) (cleanStart bool, nextID uint64) { + s.Lock() + defer s.Unlock() + if v, ok := s.sessions[nodeName]; ok && v.id == id { + nextID = v.nextEventID + } else { + // v.id != id indicates that the client side may recover from crash and need to rebuild the full state. + cleanStart = true + } + if cleanStart { + s.sessions[nodeName] = &session{ + id: id, + nodeName: nodeName, + // TODO config + seenEvents: newLRUCache(100), + nextEventID: 0, + } + } + return +} + +func (s *sessionMgr) get(nodeName string) *session { + s.RLock() + defer s.RUnlock() + return s.sessions[nodeName] +} + +// ForceLeave forces a member of a Serf cluster to enter the "left" state. +// Note that if the member is still actually alive, it will eventually rejoin the cluster. +// The true purpose of this method is to force remove "failed" nodes +// See https://www.serf.io/docs/commands/force-leave.html for details. +func (f *Federation) ForceLeave(ctx context.Context, req *ForceLeaveRequest) (*empty.Empty, error) { + if req.NodeName == "" { + return nil, errors.New("host can not be empty") + } + return &empty.Empty{}, f.serf.RemoveFailedNode(req.NodeName) +} + +// ListMembers lists all known members in the Serf cluster. +func (f *Federation) ListMembers(ctx context.Context, req *empty.Empty) (resp *ListMembersResponse, err error) { + resp = &ListMembersResponse{} + for _, v := range f.serf.Members() { + resp.Members = append(resp.Members, &Member{ + Name: v.Name, + Addr: net.JoinHostPort(v.Addr.String(), strconv.Itoa(int(v.Port))), + Tags: v.Tags, + Status: Status(v.Status), + }) + } + return resp, nil +} + +// Leave triggers a graceful leave for the local node. +// This is used to ensure other nodes see the node as "left" instead of "failed". +// Note that a leaved node cannot re-join the cluster unless you restart the leaved node. +func (f *Federation) Leave(ctx context.Context, req *empty.Empty) (resp *empty.Empty, err error) { + return &empty.Empty{}, f.serf.Leave() +} + +func (f *Federation) mustEmbedUnimplementedMembershipServer() { + return +} + +// Join tells the local node to join the an existing cluster. +// See https://www.serf.io/docs/commands/join.html for details. +func (f *Federation) Join(ctx context.Context, req *JoinRequest) (resp *empty.Empty, err error) { + for k, v := range req.Hosts { + req.Hosts[k], err = getAddr(v, DefaultGossipPort, "hosts", false) + if err != nil { + return &empty.Empty{}, status.Error(codes.InvalidArgument, err.Error()) + } + } + _, err = f.serf.Join(req.Hosts, true) + if err != nil { + return nil, err + } + return &empty.Empty{}, nil +} + +type localSubStore struct { + localStore server.SubscriptionService + sync.Mutex + // [clientID][topicName] + index map[string]map[string]struct{} + // topics store the reference counter for each topic. (map[topicName]uint64) + topics map[string]uint64 +} + +// init loads all subscriptions from gmqtt core into federation plugin. +func (l *localSubStore) init(sub server.SubscriptionService) { + l.localStore = sub + l.index = make(map[string]map[string]struct{}) + l.topics = make(map[string]uint64) + l.Lock() + defer l.Unlock() + // copy and convert subscription tree into localSubStore + sub.Iterate(func(clientID string, sub *gmqtt.Subscription) bool { + l.subscribeLocked(clientID, sub.GetFullTopicName()) + return true + }, subscription.IterationOptions{ + Type: subscription.TypeAll, + }) +} + +// subscribe subscribe the topicName for the client and increase the reference counter of the topicName. +// It returns whether the subscription is new +func (l *localSubStore) subscribe(clientID string, topicName string) (new bool) { + l.Lock() + defer l.Unlock() + return l.subscribeLocked(clientID, topicName) +} + +func (l *localSubStore) subscribeLocked(clientID string, topicName string) (new bool) { + if _, ok := l.index[clientID]; !ok { + l.index[clientID] = make(map[string]struct{}) + } + if _, ok := l.index[clientID][topicName]; !ok { + l.index[clientID][topicName] = struct{}{} + l.topics[topicName]++ + if l.topics[topicName] == 1 { + return true + } + } + return false +} + +func (l *localSubStore) decTopicCounterLocked(topicName string) { + if _, ok := l.topics[topicName]; ok { + l.topics[topicName]-- + if l.topics[topicName] <= 0 { + delete(l.topics, topicName) + } + } +} + +// unsubscribe unsubscribe the topicName for the client and decrease the reference counter of the topicName. +// It returns whether the topicName is removed (reference counter == 0) +func (l *localSubStore) unsubscribe(clientID string, topicName string) (remove bool) { + l.Lock() + defer l.Unlock() + if v, ok := l.index[clientID]; ok { + if _, ok := v[topicName]; ok { + delete(v, topicName) + if len(v) == 0 { + delete(l.index, clientID) + } + l.decTopicCounterLocked(topicName) + return l.topics[topicName] == 0 + } + } + return false + +} + +// unsubscribeAll unsubscribes all topics for the given client. +// Typically, this function is called when the client session has terminated. +// It returns any topic that is removed。 +func (l *localSubStore) unsubscribeAll(clientID string) (remove []string) { + l.Lock() + defer l.Unlock() + for topicName := range l.index[clientID] { + l.decTopicCounterLocked(topicName) + if l.topics[topicName] == 0 { + remove = append(remove, topicName) + } + } + delete(l.index, clientID) + return remove +} + +type session struct { + id string + nodeName string + nextEventID uint64 + // seenEvents cache recently seen events to avoid duplicate events. + seenEvents *lruCache +} + +// lruCache is the cache for recently seen events. +type lruCache struct { + l *list.List + items map[uint64]struct{} + size int +} + +func newLRUCache(size int) *lruCache { + return &lruCache{ + l: list.New(), + items: make(map[uint64]struct{}), + size: size, + } +} + +func (l *lruCache) set(id uint64) (exist bool) { + if _, ok := l.items[id]; ok { + return true + } + if l.size == len(l.items) { + elem := l.l.Front() + delete(l.items, elem.Value.(uint64)) + l.l.Remove(elem) + } + l.items[id] = struct{}{} + l.l.PushBack(id) + return false +} + +func getNodeNameFromContext(ctx context.Context) (string, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", status.Errorf(codes.DataLoss, "EventStream: failed to get metadata") + } + s := md.Get("node_name") + if len(s) == 0 { + return "", status.Errorf(codes.InvalidArgument, "EventStream: missing node_name metadata") + } + nodeName := s[0] + if nodeName == "" { + return "", status.Errorf(codes.InvalidArgument, "EventStream: missing node_name metadata") + } + return nodeName, nil +} + +// Hello is the handler for the handshake process before opening the event stream. +func (f *Federation) Hello(ctx context.Context, req *ClientHello) (resp *ServerHello, err error) { + nodeName, err := getNodeNameFromContext(ctx) + if err != nil { + return nil, err + } + cleanStart, nextID := f.sessionMgr.add(nodeName, req.SessionId) + if cleanStart { + _ = f.fedSubStore.UnsubscribeAll(nodeName) + } + resp = &ServerHello{ + CleanStart: cleanStart, + NextEventId: nextID, + } + return resp, nil +} + +func (f *Federation) eventStreamHandler(sess *session, in *Event) (ack *Ack) { + eventID := in.Id + // duplicated event, ignore it + if sess.seenEvents.set(eventID) { + log.Warn("ignore duplicated event", zap.String("event", in.String())) + return &Ack{ + EventId: eventID, + } + } + if sub := in.GetSubscribe(); sub != nil { + _, _ = f.fedSubStore.Subscribe(sess.nodeName, &gmqtt.Subscription{ + ShareName: sub.ShareName, + TopicFilter: sub.TopicFilter, + }) + return &Ack{EventId: eventID} + } + if msg := in.GetMessage(); msg != nil { + pubMsg := eventToMessage(msg) + f.publisher.Publish(pubMsg) + if pubMsg.Retained { + f.retainedStore.AddOrReplace(pubMsg) + } + return &Ack{EventId: eventID} + } + if unsub := in.GetUnsubscribe(); unsub != nil { + _ = f.fedSubStore.Unsubscribe(sess.nodeName, unsub.TopicName) + return &Ack{EventId: eventID} + } + return nil +} + +func (f *Federation) EventStream(stream Federation_EventStreamServer) (err error) { + defer func() { + if err != nil && err != io.EOF { + log.Error("EventStream error", zap.Error(err)) + } + }() + md, ok := metadata.FromIncomingContext(stream.Context()) + if !ok { + return status.Errorf(codes.DataLoss, "EventStream: failed to get metadata") + } + s := md.Get("node_name") + if len(s) == 0 { + return status.Errorf(codes.InvalidArgument, "EventStream: missing node_name metadata") + } + nodeName := s[0] + if nodeName == "" { + return status.Errorf(codes.InvalidArgument, "EventStream: missing node_name metadata") + } + sess := f.sessionMgr.get(nodeName) + if sess == nil { + return status.Errorf(codes.Internal, "EventStream: node not exist") + } + for { + var in *Event + in, err = stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + if ce := log.Check(zapcore.DebugLevel, "event received"); ce != nil { + ce.Write(zap.String("event", in.String())) + } + ack := f.eventStreamHandler(sess, in) + + err = stream.Send(ack) + if err != nil { + return err + } + if ce := log.Check(zapcore.DebugLevel, "event ack sent"); ce != nil { + ce.Write(zap.Uint64("id", ack.EventId)) + } + sess.nextEventID = ack.EventId + 1 + } +} + +func (f *Federation) mustEmbedUnimplementedFederationServer() { + return +} + +var registerAPI = func(service server.Server, f *Federation) error { + apiRegistrar := service.APIRegistrar() + RegisterMembershipServer(apiRegistrar, f) + err := apiRegistrar.RegisterHTTPHandler(RegisterMembershipHandlerFromEndpoint) + return err +} + +func (f *Federation) Load(service server.Server) error { + err := registerAPI(service, f) + if err != nil { + return err + } + f.localSubStore.init(service.SubscriptionService()) + f.retainedStore = service.RetainedService() + f.publisher = service.Publisher() + srv := grpc.NewServer() + RegisterFederationServer(srv, f) + l, err := net.Listen("tcp", f.config.FedAddr) + if err != nil { + return err + } + go func() { + err := srv.Serve(l) + if err != nil { + panic(err) + } + }() + t := time.NewTimer(0) + timeout := time.NewTimer(f.config.RetryTimeout) + for { + select { + case <-timeout.C: + log.Error("retry timeout", zap.Error(err)) + if err != nil { + err = fmt.Errorf("retry timeout: %s", err.Error()) + return err + } + return errors.New("retry timeout") + case <-t.C: + err = f.startSerf(t) + if err == nil { + log.Info("retry join succeed") + return nil + } + log.Info("retry join failed", zap.Error(err)) + } + } +} + +func (f *Federation) Unload() error { + err := f.serf.Leave() + if err != nil { + return err + } + return f.serf.Shutdown() +} + +func (f *Federation) Name() string { + return Name +} + +func messageToEvent(msg *gmqtt.Message) *Message { + eventMsg := &Message{ + TopicName: msg.Topic, + Payload: string(msg.Payload), + Qos: uint32(msg.QoS), + Retained: msg.Retained, + ContentType: msg.ContentType, + CorrelationData: string(msg.CorrelationData), + MessageExpiry: msg.MessageExpiry, + PayloadFormat: uint32(msg.PayloadFormat), + ResponseTopic: msg.ResponseTopic, + } + for _, v := range msg.UserProperties { + ppt := &UserProperty{ + K: make([]byte, len(v.K)), + V: make([]byte, len(v.V)), + } + copy(ppt.K, v.K) + copy(ppt.V, v.V) + eventMsg.UserProperties = append(eventMsg.UserProperties, ppt) + } + return eventMsg +} + +func eventToMessage(event *Message) *gmqtt.Message { + pubMsg := &gmqtt.Message{ + QoS: byte(event.Qos), + Retained: event.Retained, + Topic: event.TopicName, + Payload: []byte(event.Payload), + ContentType: event.ContentType, + CorrelationData: []byte(event.CorrelationData), + MessageExpiry: event.MessageExpiry, + PayloadFormat: packets.PayloadFormat(event.PayloadFormat), + ResponseTopic: event.ResponseTopic, + } + for _, v := range event.UserProperties { + pubMsg.UserProperties = append(pubMsg.UserProperties, packets.UserProperty{ + K: v.K, + V: v.V, + }) + } + return pubMsg +} diff --git a/plugin/federation/federation.pb.go b/plugin/federation/federation.pb.go new file mode 100644 index 00000000..eeab073e --- /dev/null +++ b/plugin/federation/federation.pb.go @@ -0,0 +1,1204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.13.0 +// source: federation.proto + +package federation + +import ( + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type Status int32 + +const ( + Status_STATUS_UNSPECIFIED Status = 0 + Status_STATUS_ALIVE Status = 1 + Status_STATUS_LEAVING Status = 2 + Status_STATUS_LEFT Status = 3 + Status_STATUS_FAILED Status = 4 +) + +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "STATUS_ALIVE", + 2: "STATUS_LEAVING", + 3: "STATUS_LEFT", + 4: "STATUS_FAILED", + } + Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "STATUS_ALIVE": 1, + "STATUS_LEAVING": 2, + "STATUS_LEFT": 3, + "STATUS_FAILED": 4, + } +) + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} + +func (x Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_federation_proto_enumTypes[0].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_federation_proto_enumTypes[0] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{0} +} + +type Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to Event: + // *Event_Subscribe + // *Event_Message + // *Event_Unsubscribe + Event isEvent_Event `protobuf_oneof:"Event"` +} + +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{0} +} + +func (x *Event) GetId() uint64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Event) GetEvent() isEvent_Event { + if m != nil { + return m.Event + } + return nil +} + +func (x *Event) GetSubscribe() *Subscribe { + if x, ok := x.GetEvent().(*Event_Subscribe); ok { + return x.Subscribe + } + return nil +} + +func (x *Event) GetMessage() *Message { + if x, ok := x.GetEvent().(*Event_Message); ok { + return x.Message + } + return nil +} + +func (x *Event) GetUnsubscribe() *Unsubscribe { + if x, ok := x.GetEvent().(*Event_Unsubscribe); ok { + return x.Unsubscribe + } + return nil +} + +type isEvent_Event interface { + isEvent_Event() +} + +type Event_Subscribe struct { + Subscribe *Subscribe `protobuf:"bytes,2,opt,name=Subscribe,proto3,oneof"` +} + +type Event_Message struct { + Message *Message `protobuf:"bytes,3,opt,name=message,proto3,oneof"` +} + +type Event_Unsubscribe struct { + Unsubscribe *Unsubscribe `protobuf:"bytes,4,opt,name=unsubscribe,proto3,oneof"` +} + +func (*Event_Subscribe) isEvent_Event() {} + +func (*Event_Message) isEvent_Event() {} + +func (*Event_Unsubscribe) isEvent_Event() {} + +// Subscribe represents the subscription for a node, it is used to route message among nodes, +// so only shared_name and topic_filter is required. +type Subscribe struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ShareName string `protobuf:"bytes,1,opt,name=share_name,json=shareName,proto3" json:"share_name,omitempty"` + TopicFilter string `protobuf:"bytes,2,opt,name=topic_filter,json=topicFilter,proto3" json:"topic_filter,omitempty"` +} + +func (x *Subscribe) Reset() { + *x = Subscribe{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Subscribe) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Subscribe) ProtoMessage() {} + +func (x *Subscribe) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Subscribe.ProtoReflect.Descriptor instead. +func (*Subscribe) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{1} +} + +func (x *Subscribe) GetShareName() string { + if x != nil { + return x.ShareName + } + return "" +} + +func (x *Subscribe) GetTopicFilter() string { + if x != nil { + return x.TopicFilter + } + return "" +} + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TopicName string `protobuf:"bytes,1,opt,name=topic_name,json=topicName,proto3" json:"topic_name,omitempty"` + Payload string `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + Qos uint32 `protobuf:"varint,3,opt,name=qos,proto3" json:"qos,omitempty"` + Retained bool `protobuf:"varint,4,opt,name=retained,proto3" json:"retained,omitempty"` + // the following fields are using in v5 client. + ContentType string `protobuf:"bytes,5,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + CorrelationData string `protobuf:"bytes,6,opt,name=correlation_data,json=correlationData,proto3" json:"correlation_data,omitempty"` + MessageExpiry uint32 `protobuf:"varint,7,opt,name=message_expiry,json=messageExpiry,proto3" json:"message_expiry,omitempty"` + PayloadFormat uint32 `protobuf:"varint,8,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` + ResponseTopic string `protobuf:"bytes,9,opt,name=response_topic,json=responseTopic,proto3" json:"response_topic,omitempty"` + UserProperties []*UserProperty `protobuf:"bytes,10,rep,name=user_properties,json=userProperties,proto3" json:"user_properties,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{2} +} + +func (x *Message) GetTopicName() string { + if x != nil { + return x.TopicName + } + return "" +} + +func (x *Message) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +func (x *Message) GetQos() uint32 { + if x != nil { + return x.Qos + } + return 0 +} + +func (x *Message) GetRetained() bool { + if x != nil { + return x.Retained + } + return false +} + +func (x *Message) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *Message) GetCorrelationData() string { + if x != nil { + return x.CorrelationData + } + return "" +} + +func (x *Message) GetMessageExpiry() uint32 { + if x != nil { + return x.MessageExpiry + } + return 0 +} + +func (x *Message) GetPayloadFormat() uint32 { + if x != nil { + return x.PayloadFormat + } + return 0 +} + +func (x *Message) GetResponseTopic() string { + if x != nil { + return x.ResponseTopic + } + return "" +} + +func (x *Message) GetUserProperties() []*UserProperty { + if x != nil { + return x.UserProperties + } + return nil +} + +type UserProperty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + K []byte `protobuf:"bytes,1,opt,name=K,proto3" json:"K,omitempty"` + V []byte `protobuf:"bytes,2,opt,name=V,proto3" json:"V,omitempty"` +} + +func (x *UserProperty) Reset() { + *x = UserProperty{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UserProperty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserProperty) ProtoMessage() {} + +func (x *UserProperty) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UserProperty.ProtoReflect.Descriptor instead. +func (*UserProperty) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{3} +} + +func (x *UserProperty) GetK() []byte { + if x != nil { + return x.K + } + return nil +} + +func (x *UserProperty) GetV() []byte { + if x != nil { + return x.V + } + return nil +} + +type Unsubscribe struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TopicName string `protobuf:"bytes,1,opt,name=topic_name,json=topicName,proto3" json:"topic_name,omitempty"` +} + +func (x *Unsubscribe) Reset() { + *x = Unsubscribe{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Unsubscribe) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Unsubscribe) ProtoMessage() {} + +func (x *Unsubscribe) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Unsubscribe.ProtoReflect.Descriptor instead. +func (*Unsubscribe) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{4} +} + +func (x *Unsubscribe) GetTopicName() string { + if x != nil { + return x.TopicName + } + return "" +} + +type Ack struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventId uint64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` +} + +func (x *Ack) Reset() { + *x = Ack{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Ack) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Ack) ProtoMessage() {} + +func (x *Ack) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Ack.ProtoReflect.Descriptor instead. +func (*Ack) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{5} +} + +func (x *Ack) GetEventId() uint64 { + if x != nil { + return x.EventId + } + return 0 +} + +// ClientHello is the request message in handshake process. +type ClientHello struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SessionId string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (x *ClientHello) Reset() { + *x = ClientHello{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHello) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHello) ProtoMessage() {} + +func (x *ClientHello) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHello.ProtoReflect.Descriptor instead. +func (*ClientHello) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{6} +} + +func (x *ClientHello) GetSessionId() string { + if x != nil { + return x.SessionId + } + return "" +} + +// ServerHello is the response message in handshake process. +type ServerHello struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CleanStart bool `protobuf:"varint,1,opt,name=clean_start,json=cleanStart,proto3" json:"clean_start,omitempty"` + NextEventId uint64 `protobuf:"varint,2,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` +} + +func (x *ServerHello) Reset() { + *x = ServerHello{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHello) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHello) ProtoMessage() {} + +func (x *ServerHello) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHello.ProtoReflect.Descriptor instead. +func (*ServerHello) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{7} +} + +func (x *ServerHello) GetCleanStart() bool { + if x != nil { + return x.CleanStart + } + return false +} + +func (x *ServerHello) GetNextEventId() uint64 { + if x != nil { + return x.NextEventId + } + return 0 +} + +type JoinRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hosts []string `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` +} + +func (x *JoinRequest) Reset() { + *x = JoinRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JoinRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JoinRequest) ProtoMessage() {} + +func (x *JoinRequest) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JoinRequest.ProtoReflect.Descriptor instead. +func (*JoinRequest) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{8} +} + +func (x *JoinRequest) GetHosts() []string { + if x != nil { + return x.Hosts + } + return nil +} + +type Member struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + Tags map[string]string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=gmqtt.federation.api.Status" json:"status,omitempty"` +} + +func (x *Member) Reset() { + *x = Member{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Member) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Member) ProtoMessage() {} + +func (x *Member) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Member.ProtoReflect.Descriptor instead. +func (*Member) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{9} +} + +func (x *Member) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Member) GetAddr() string { + if x != nil { + return x.Addr + } + return "" +} + +func (x *Member) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Member) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_STATUS_UNSPECIFIED +} + +type ListMembersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Members []*Member `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` +} + +func (x *ListMembersResponse) Reset() { + *x = ListMembersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMembersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMembersResponse) ProtoMessage() {} + +func (x *ListMembersResponse) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMembersResponse.ProtoReflect.Descriptor instead. +func (*ListMembersResponse) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{10} +} + +func (x *ListMembersResponse) GetMembers() []*Member { + if x != nil { + return x.Members + } + return nil +} + +type ForceLeaveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` +} + +func (x *ForceLeaveRequest) Reset() { + *x = ForceLeaveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_federation_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ForceLeaveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceLeaveRequest) ProtoMessage() {} + +func (x *ForceLeaveRequest) ProtoReflect() protoreflect.Message { + mi := &file_federation_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceLeaveRequest.ProtoReflect.Descriptor instead. +func (*ForceLeaveRequest) Descriptor() ([]byte, []int) { + return file_federation_proto_rawDescGZIP(), []int{11} +} + +func (x *ForceLeaveRequest) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + +var File_federation_proto protoreflect.FileDescriptor + +var file_federation_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xe3, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3f, 0x0a, + 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x48, 0x00, 0x52, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x39, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x75, 0x6e, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x42, 0x07, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x09, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x80, 0x03, 0x0a, 0x07, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x71, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x71, 0x6f, 0x73, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, + 0x0a, 0x10, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x79, + 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x4b, + 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, + 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, + 0x73, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x52, 0x0e, 0x75, 0x73, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x2a, 0x0a, 0x0c, 0x55, + 0x73, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x4b, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x4b, 0x12, 0x0c, 0x0a, 0x01, 0x56, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x56, 0x22, 0x2c, 0x0a, 0x0b, 0x55, 0x6e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x20, 0x0a, 0x03, 0x41, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x2c, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x52, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, + 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x6c, 0x65, 0x61, 0x6e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6e, 0x65, + 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x23, 0x0a, 0x0b, 0x4a, 0x6f, 0x69, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x68, 0x6f, 0x73, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x22, 0xdb, + 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x64, 0x64, + 0x72, 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x54, 0x61, + 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x34, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, + 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4d, 0x0a, 0x13, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x6d, 0x62, + 0x65, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, 0x30, 0x0a, 0x11, 0x46, + 0x6f, 0x72, 0x63, 0x65, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2a, 0x6a, 0x0a, + 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x4c, 0x49, 0x56, 0x45, 0x10, + 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x4c, 0x45, 0x41, 0x56, + 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x4c, 0x45, 0x46, 0x54, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x32, 0xb1, 0x03, 0x0a, 0x0a, 0x4d, 0x65, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x12, 0x61, 0x0a, 0x04, 0x4a, 0x6f, 0x69, 0x6e, + 0x12, 0x21, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x1e, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x18, 0x22, 0x13, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x6f, 0x69, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x58, 0x0a, 0x05, 0x4c, + 0x65, 0x61, 0x76, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x22, 0x14, 0x2f, 0x76, + 0x31, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6c, 0x65, 0x61, + 0x76, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0a, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x4c, 0x65, + 0x61, 0x76, 0x65, 0x12, 0x27, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x6f, 0x72, 0x63, 0x65, + 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1a, 0x2f, 0x76, + 0x31, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x70, 0x0a, 0x0b, 0x4c, + 0x69, 0x73, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x29, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x32, 0xaa, 0x01, + 0x0a, 0x0a, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x05, + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x21, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, + 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x1a, 0x21, 0x2e, 0x67, 0x6d, 0x71, 0x74, 0x74, + 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x22, 0x00, 0x12, 0x4b, 0x0a, + 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1b, 0x2e, 0x67, + 0x6d, 0x71, 0x74, 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6d, 0x71, 0x74, + 0x74, 0x2e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x41, 0x63, 0x6b, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0e, 0x5a, 0x0c, 0x2e, 0x3b, + 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_federation_proto_rawDescOnce sync.Once + file_federation_proto_rawDescData = file_federation_proto_rawDesc +) + +func file_federation_proto_rawDescGZIP() []byte { + file_federation_proto_rawDescOnce.Do(func() { + file_federation_proto_rawDescData = protoimpl.X.CompressGZIP(file_federation_proto_rawDescData) + }) + return file_federation_proto_rawDescData +} + +var file_federation_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_federation_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_federation_proto_goTypes = []interface{}{ + (Status)(0), // 0: gmqtt.federation.api.Status + (*Event)(nil), // 1: gmqtt.federation.api.Event + (*Subscribe)(nil), // 2: gmqtt.federation.api.Subscribe + (*Message)(nil), // 3: gmqtt.federation.api.Message + (*UserProperty)(nil), // 4: gmqtt.federation.api.UserProperty + (*Unsubscribe)(nil), // 5: gmqtt.federation.api.Unsubscribe + (*Ack)(nil), // 6: gmqtt.federation.api.Ack + (*ClientHello)(nil), // 7: gmqtt.federation.api.ClientHello + (*ServerHello)(nil), // 8: gmqtt.federation.api.ServerHello + (*JoinRequest)(nil), // 9: gmqtt.federation.api.JoinRequest + (*Member)(nil), // 10: gmqtt.federation.api.Member + (*ListMembersResponse)(nil), // 11: gmqtt.federation.api.ListMembersResponse + (*ForceLeaveRequest)(nil), // 12: gmqtt.federation.api.ForceLeaveRequest + nil, // 13: gmqtt.federation.api.Member.TagsEntry + (*empty.Empty)(nil), // 14: google.protobuf.Empty +} +var file_federation_proto_depIdxs = []int32{ + 2, // 0: gmqtt.federation.api.Event.Subscribe:type_name -> gmqtt.federation.api.Subscribe + 3, // 1: gmqtt.federation.api.Event.message:type_name -> gmqtt.federation.api.Message + 5, // 2: gmqtt.federation.api.Event.unsubscribe:type_name -> gmqtt.federation.api.Unsubscribe + 4, // 3: gmqtt.federation.api.Message.user_properties:type_name -> gmqtt.federation.api.UserProperty + 13, // 4: gmqtt.federation.api.Member.tags:type_name -> gmqtt.federation.api.Member.TagsEntry + 0, // 5: gmqtt.federation.api.Member.status:type_name -> gmqtt.federation.api.Status + 10, // 6: gmqtt.federation.api.ListMembersResponse.members:type_name -> gmqtt.federation.api.Member + 9, // 7: gmqtt.federation.api.Membership.Join:input_type -> gmqtt.federation.api.JoinRequest + 14, // 8: gmqtt.federation.api.Membership.Leave:input_type -> google.protobuf.Empty + 12, // 9: gmqtt.federation.api.Membership.ForceLeave:input_type -> gmqtt.federation.api.ForceLeaveRequest + 14, // 10: gmqtt.federation.api.Membership.ListMembers:input_type -> google.protobuf.Empty + 7, // 11: gmqtt.federation.api.Federation.Hello:input_type -> gmqtt.federation.api.ClientHello + 1, // 12: gmqtt.federation.api.Federation.EventStream:input_type -> gmqtt.federation.api.Event + 14, // 13: gmqtt.federation.api.Membership.Join:output_type -> google.protobuf.Empty + 14, // 14: gmqtt.federation.api.Membership.Leave:output_type -> google.protobuf.Empty + 14, // 15: gmqtt.federation.api.Membership.ForceLeave:output_type -> google.protobuf.Empty + 11, // 16: gmqtt.federation.api.Membership.ListMembers:output_type -> gmqtt.federation.api.ListMembersResponse + 8, // 17: gmqtt.federation.api.Federation.Hello:output_type -> gmqtt.federation.api.ServerHello + 6, // 18: gmqtt.federation.api.Federation.EventStream:output_type -> gmqtt.federation.api.Ack + 13, // [13:19] is the sub-list for method output_type + 7, // [7:13] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_federation_proto_init() } +func file_federation_proto_init() { + if File_federation_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_federation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Subscribe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UserProperty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Unsubscribe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Ack); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHello); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHello); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JoinRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Member); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMembersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_federation_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForceLeaveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_federation_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Event_Subscribe)(nil), + (*Event_Message)(nil), + (*Event_Unsubscribe)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_federation_proto_rawDesc, + NumEnums: 1, + NumMessages: 13, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_federation_proto_goTypes, + DependencyIndexes: file_federation_proto_depIdxs, + EnumInfos: file_federation_proto_enumTypes, + MessageInfos: file_federation_proto_msgTypes, + }.Build() + File_federation_proto = out.File + file_federation_proto_rawDesc = nil + file_federation_proto_goTypes = nil + file_federation_proto_depIdxs = nil +} diff --git a/plugin/federation/federation.pb.gw.go b/plugin/federation/federation.pb.gw.go new file mode 100644 index 00000000..031a6fac --- /dev/null +++ b/plugin/federation/federation.pb.gw.go @@ -0,0 +1,382 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: federation.proto + +/* +Package federation is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package federation + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Membership_Join_0(ctx context.Context, marshaler runtime.Marshaler, client MembershipClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Join(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Membership_Join_0(ctx context.Context, marshaler runtime.Marshaler, server MembershipServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Join(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Membership_Leave_0(ctx context.Context, marshaler runtime.Marshaler, client MembershipClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Leave(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Membership_Leave_0(ctx context.Context, marshaler runtime.Marshaler, server MembershipServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Leave(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Membership_ForceLeave_0(ctx context.Context, marshaler runtime.Marshaler, client MembershipClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ForceLeaveRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ForceLeave(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Membership_ForceLeave_0(ctx context.Context, marshaler runtime.Marshaler, server MembershipServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ForceLeaveRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ForceLeave(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Membership_ListMembers_0(ctx context.Context, marshaler runtime.Marshaler, client MembershipClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ListMembers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Membership_ListMembers_0(ctx context.Context, marshaler runtime.Marshaler, server MembershipServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.ListMembers(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterMembershipHandlerServer registers the http handlers for service Membership to "mux". +// UnaryRPC :call MembershipServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterMembershipHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MembershipServer) error { + + mux.Handle("POST", pattern_Membership_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Membership_Join_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Membership_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Membership_Leave_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Membership_ForceLeave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Membership_ForceLeave_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_ForceLeave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Membership_ListMembers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Membership_ListMembers_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_ListMembers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterMembershipHandlerFromEndpoint is same as RegisterMembershipHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMembershipHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMembershipHandler(ctx, mux, conn) +} + +// RegisterMembershipHandler registers the http handlers for service Membership to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMembershipHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMembershipHandlerClient(ctx, mux, NewMembershipClient(conn)) +} + +// RegisterMembershipHandlerClient registers the http handlers for service Membership +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MembershipClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MembershipClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MembershipClient" to call the correct interceptors. +func RegisterMembershipHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MembershipClient) error { + + mux.Handle("POST", pattern_Membership_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Membership_Join_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Membership_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Membership_Leave_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Membership_ForceLeave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Membership_ForceLeave_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_ForceLeave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Membership_ListMembers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Membership_ListMembers_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Membership_ListMembers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Membership_Join_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "federation", "join"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Membership_Leave_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "federation", "leave"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Membership_ForceLeave_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "federation", "force_leave"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Membership_ListMembers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "federation", "members"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Membership_Join_0 = runtime.ForwardResponseMessage + + forward_Membership_Leave_0 = runtime.ForwardResponseMessage + + forward_Membership_ForceLeave_0 = runtime.ForwardResponseMessage + + forward_Membership_ListMembers_0 = runtime.ForwardResponseMessage +) diff --git a/plugin/federation/federation.pb_mock.go b/plugin/federation/federation.pb_mock.go new file mode 100644 index 00000000..fab75f61 --- /dev/null +++ b/plugin/federation/federation.pb_mock.go @@ -0,0 +1,45 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: plugin/federation/federation.pb.go + +// Package federation is a generated GoMock package. +package federation + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockisEvent_Event is a mock of isEvent_Event interface +type MockisEvent_Event struct { + ctrl *gomock.Controller + recorder *MockisEvent_EventMockRecorder +} + +// MockisEvent_EventMockRecorder is the mock recorder for MockisEvent_Event +type MockisEvent_EventMockRecorder struct { + mock *MockisEvent_Event +} + +// NewMockisEvent_Event creates a new mock instance +func NewMockisEvent_Event(ctrl *gomock.Controller) *MockisEvent_Event { + mock := &MockisEvent_Event{ctrl: ctrl} + mock.recorder = &MockisEvent_EventMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisEvent_Event) EXPECT() *MockisEvent_EventMockRecorder { + return m.recorder +} + +// isEvent_Event mocks base method +func (m *MockisEvent_Event) isEvent_Event() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "isEvent_Event") +} + +// isEvent_Event indicates an expected call of isEvent_Event +func (mr *MockisEvent_EventMockRecorder) isEvent_Event() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isEvent_Event", reflect.TypeOf((*MockisEvent_Event)(nil).isEvent_Event)) +} diff --git a/plugin/federation/federation_grpc.pb.go b/plugin/federation/federation_grpc.pb.go new file mode 100644 index 00000000..743ca532 --- /dev/null +++ b/plugin/federation/federation_grpc.pb.go @@ -0,0 +1,378 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package federation + +import ( + context "context" + empty "github.com/golang/protobuf/ptypes/empty" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// MembershipClient is the client API for Membership service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MembershipClient interface { + // Join tells the local node to join the an existing cluster. + // See https://www.serf.io/docs/commands/join.html for details. + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Leave triggers a graceful leave for the local node. + // This is used to ensure other nodes see the node as "left" instead of "failed". + // Note that a leaved node cannot re-join the cluster unless you restart the leaved node. + Leave(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) + // ForceLeave force forces a member of a Serf cluster to enter the "left" state. + // Note that if the member is still actually alive, it will eventually rejoin the cluster. + // The true purpose of this method is to force remove "failed" nodes + // See https://www.serf.io/docs/commands/force-leave.html for details. + ForceLeave(ctx context.Context, in *ForceLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // ListMembers lists all known members in the Serf cluster. + ListMembers(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListMembersResponse, error) +} + +type membershipClient struct { + cc grpc.ClientConnInterface +} + +func NewMembershipClient(cc grpc.ClientConnInterface) MembershipClient { + return &membershipClient{cc} +} + +func (c *membershipClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/gmqtt.federation.api.Membership/Join", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *membershipClient) Leave(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/gmqtt.federation.api.Membership/Leave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *membershipClient) ForceLeave(ctx context.Context, in *ForceLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/gmqtt.federation.api.Membership/ForceLeave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *membershipClient) ListMembers(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListMembersResponse, error) { + out := new(ListMembersResponse) + err := c.cc.Invoke(ctx, "/gmqtt.federation.api.Membership/ListMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MembershipServer is the server API for Membership service. +// All implementations must embed UnimplementedMembershipServer +// for forward compatibility +type MembershipServer interface { + // Join tells the local node to join the an existing cluster. + // See https://www.serf.io/docs/commands/join.html for details. + Join(context.Context, *JoinRequest) (*empty.Empty, error) + // Leave triggers a graceful leave for the local node. + // This is used to ensure other nodes see the node as "left" instead of "failed". + // Note that a leaved node cannot re-join the cluster unless you restart the leaved node. + Leave(context.Context, *empty.Empty) (*empty.Empty, error) + // ForceLeave force forces a member of a Serf cluster to enter the "left" state. + // Note that if the member is still actually alive, it will eventually rejoin the cluster. + // The true purpose of this method is to force remove "failed" nodes + // See https://www.serf.io/docs/commands/force-leave.html for details. + ForceLeave(context.Context, *ForceLeaveRequest) (*empty.Empty, error) + // ListMembers lists all known members in the Serf cluster. + ListMembers(context.Context, *empty.Empty) (*ListMembersResponse, error) + mustEmbedUnimplementedMembershipServer() +} + +// UnimplementedMembershipServer must be embedded to have forward compatible implementations. +type UnimplementedMembershipServer struct { +} + +func (UnimplementedMembershipServer) Join(context.Context, *JoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Join not implemented") +} +func (UnimplementedMembershipServer) Leave(context.Context, *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Leave not implemented") +} +func (UnimplementedMembershipServer) ForceLeave(context.Context, *ForceLeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForceLeave not implemented") +} +func (UnimplementedMembershipServer) ListMembers(context.Context, *empty.Empty) (*ListMembersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMembers not implemented") +} +func (UnimplementedMembershipServer) mustEmbedUnimplementedMembershipServer() {} + +// UnsafeMembershipServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MembershipServer will +// result in compilation errors. +type UnsafeMembershipServer interface { + mustEmbedUnimplementedMembershipServer() +} + +func RegisterMembershipServer(s grpc.ServiceRegistrar, srv MembershipServer) { + s.RegisterService(&_Membership_serviceDesc, srv) +} + +func _Membership_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MembershipServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gmqtt.federation.api.Membership/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MembershipServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Membership_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MembershipServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gmqtt.federation.api.Membership/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MembershipServer).Leave(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Membership_ForceLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ForceLeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MembershipServer).ForceLeave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gmqtt.federation.api.Membership/ForceLeave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MembershipServer).ForceLeave(ctx, req.(*ForceLeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Membership_ListMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MembershipServer).ListMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gmqtt.federation.api.Membership/ListMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MembershipServer).ListMembers(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Membership_serviceDesc = grpc.ServiceDesc{ + ServiceName: "gmqtt.federation.api.Membership", + HandlerType: (*MembershipServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Join", + Handler: _Membership_Join_Handler, + }, + { + MethodName: "Leave", + Handler: _Membership_Leave_Handler, + }, + { + MethodName: "ForceLeave", + Handler: _Membership_ForceLeave_Handler, + }, + { + MethodName: "ListMembers", + Handler: _Membership_ListMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "federation.proto", +} + +// FederationClient is the client API for Federation service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type FederationClient interface { + Hello(ctx context.Context, in *ClientHello, opts ...grpc.CallOption) (*ServerHello, error) + EventStream(ctx context.Context, opts ...grpc.CallOption) (Federation_EventStreamClient, error) +} + +type federationClient struct { + cc grpc.ClientConnInterface +} + +func NewFederationClient(cc grpc.ClientConnInterface) FederationClient { + return &federationClient{cc} +} + +func (c *federationClient) Hello(ctx context.Context, in *ClientHello, opts ...grpc.CallOption) (*ServerHello, error) { + out := new(ServerHello) + err := c.cc.Invoke(ctx, "/gmqtt.federation.api.Federation/Hello", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *federationClient) EventStream(ctx context.Context, opts ...grpc.CallOption) (Federation_EventStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Federation_serviceDesc.Streams[0], "/gmqtt.federation.api.Federation/EventStream", opts...) + if err != nil { + return nil, err + } + x := &federationEventStreamClient{stream} + return x, nil +} + +type Federation_EventStreamClient interface { + Send(*Event) error + Recv() (*Ack, error) + grpc.ClientStream +} + +type federationEventStreamClient struct { + grpc.ClientStream +} + +func (x *federationEventStreamClient) Send(m *Event) error { + return x.ClientStream.SendMsg(m) +} + +func (x *federationEventStreamClient) Recv() (*Ack, error) { + m := new(Ack) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// FederationServer is the server API for Federation service. +// All implementations must embed UnimplementedFederationServer +// for forward compatibility +type FederationServer interface { + Hello(context.Context, *ClientHello) (*ServerHello, error) + EventStream(Federation_EventStreamServer) error + mustEmbedUnimplementedFederationServer() +} + +// UnimplementedFederationServer must be embedded to have forward compatible implementations. +type UnimplementedFederationServer struct { +} + +func (UnimplementedFederationServer) Hello(context.Context, *ClientHello) (*ServerHello, error) { + return nil, status.Errorf(codes.Unimplemented, "method Hello not implemented") +} +func (UnimplementedFederationServer) EventStream(Federation_EventStreamServer) error { + return status.Errorf(codes.Unimplemented, "method EventStream not implemented") +} +func (UnimplementedFederationServer) mustEmbedUnimplementedFederationServer() {} + +// UnsafeFederationServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to FederationServer will +// result in compilation errors. +type UnsafeFederationServer interface { + mustEmbedUnimplementedFederationServer() +} + +func RegisterFederationServer(s grpc.ServiceRegistrar, srv FederationServer) { + s.RegisterService(&_Federation_serviceDesc, srv) +} + +func _Federation_Hello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClientHello) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FederationServer).Hello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gmqtt.federation.api.Federation/Hello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FederationServer).Hello(ctx, req.(*ClientHello)) + } + return interceptor(ctx, in, info, handler) +} + +func _Federation_EventStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FederationServer).EventStream(&federationEventStreamServer{stream}) +} + +type Federation_EventStreamServer interface { + Send(*Ack) error + Recv() (*Event, error) + grpc.ServerStream +} + +type federationEventStreamServer struct { + grpc.ServerStream +} + +func (x *federationEventStreamServer) Send(m *Ack) error { + return x.ServerStream.SendMsg(m) +} + +func (x *federationEventStreamServer) Recv() (*Event, error) { + m := new(Event) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Federation_serviceDesc = grpc.ServiceDesc{ + ServiceName: "gmqtt.federation.api.Federation", + HandlerType: (*FederationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Hello", + Handler: _Federation_Hello_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "EventStream", + Handler: _Federation_EventStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "federation.proto", +} diff --git a/plugin/federation/federation_grpc.pb_mock.go b/plugin/federation/federation_grpc.pb_mock.go new file mode 100644 index 00000000..5237a9d7 --- /dev/null +++ b/plugin/federation/federation_grpc.pb_mock.go @@ -0,0 +1,213 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/DrmagicE/gmqtt/plugin/federation (interfaces: FederationClient,Federation_EventStreamClient) + +// Package federation is a generated GoMock package. +package federation + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" + reflect "reflect" +) + +// MockFederationClient is a mock of FederationClient interface +type MockFederationClient struct { + ctrl *gomock.Controller + recorder *MockFederationClientMockRecorder +} + +// MockFederationClientMockRecorder is the mock recorder for MockFederationClient +type MockFederationClientMockRecorder struct { + mock *MockFederationClient +} + +// NewMockFederationClient creates a new mock instance +func NewMockFederationClient(ctrl *gomock.Controller) *MockFederationClient { + mock := &MockFederationClient{ctrl: ctrl} + mock.recorder = &MockFederationClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFederationClient) EXPECT() *MockFederationClientMockRecorder { + return m.recorder +} + +// EventStream mocks base method +func (m *MockFederationClient) EventStream(arg0 context.Context, arg1 ...grpc.CallOption) (Federation_EventStreamClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EventStream", varargs...) + ret0, _ := ret[0].(Federation_EventStreamClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EventStream indicates an expected call of EventStream +func (mr *MockFederationClientMockRecorder) EventStream(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EventStream", reflect.TypeOf((*MockFederationClient)(nil).EventStream), varargs...) +} + +// Hello mocks base method +func (m *MockFederationClient) Hello(arg0 context.Context, arg1 *ClientHello, arg2 ...grpc.CallOption) (*ServerHello, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Hello", varargs...) + ret0, _ := ret[0].(*ServerHello) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Hello indicates an expected call of Hello +func (mr *MockFederationClientMockRecorder) Hello(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hello", reflect.TypeOf((*MockFederationClient)(nil).Hello), varargs...) +} + +// MockFederation_EventStreamClient is a mock of Federation_EventStreamClient interface +type MockFederation_EventStreamClient struct { + ctrl *gomock.Controller + recorder *MockFederation_EventStreamClientMockRecorder +} + +// MockFederation_EventStreamClientMockRecorder is the mock recorder for MockFederation_EventStreamClient +type MockFederation_EventStreamClientMockRecorder struct { + mock *MockFederation_EventStreamClient +} + +// NewMockFederation_EventStreamClient creates a new mock instance +func NewMockFederation_EventStreamClient(ctrl *gomock.Controller) *MockFederation_EventStreamClient { + mock := &MockFederation_EventStreamClient{ctrl: ctrl} + mock.recorder = &MockFederation_EventStreamClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFederation_EventStreamClient) EXPECT() *MockFederation_EventStreamClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method +func (m *MockFederation_EventStreamClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend +func (mr *MockFederation_EventStreamClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).CloseSend)) +} + +// Context mocks base method +func (m *MockFederation_EventStreamClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context +func (mr *MockFederation_EventStreamClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).Context)) +} + +// Header mocks base method +func (m *MockFederation_EventStreamClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header +func (mr *MockFederation_EventStreamClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).Header)) +} + +// Recv mocks base method +func (m *MockFederation_EventStreamClient) Recv() (*Ack, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*Ack) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv +func (mr *MockFederation_EventStreamClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).Recv)) +} + +// RecvMsg mocks base method +func (m *MockFederation_EventStreamClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg +func (mr *MockFederation_EventStreamClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).RecvMsg), arg0) +} + +// Send mocks base method +func (m *MockFederation_EventStreamClient) Send(arg0 *Event) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send +func (mr *MockFederation_EventStreamClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).Send), arg0) +} + +// SendMsg mocks base method +func (m *MockFederation_EventStreamClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg +func (mr *MockFederation_EventStreamClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method +func (m *MockFederation_EventStreamClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer +func (mr *MockFederation_EventStreamClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockFederation_EventStreamClient)(nil).Trailer)) +} diff --git a/plugin/federation/federation_test.go b/plugin/federation/federation_test.go new file mode 100644 index 00000000..bc588bf0 --- /dev/null +++ b/plugin/federation/federation_test.go @@ -0,0 +1,454 @@ +package federation + +import ( + "context" + "net" + "strconv" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/metadata" + + "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/persistence/subscription/mem" + "github.com/DrmagicE/gmqtt/pkg/packets" + "github.com/DrmagicE/gmqtt/server" +) + +func TestLocalSubStore_init(t *testing.T) { + a := assert.New(t) + var tt = struct { + clientID []string + topics []*gmqtt.Subscription + expected map[string]uint64 + }{ + clientID: []string{"client1", "client2", "client3"}, + topics: []*gmqtt.Subscription{ + { + ShareName: "abc", + TopicFilter: "filter1", + }, { + TopicFilter: "filter2", + }, { + TopicFilter: "filter3", + }, + }, + expected: map[string]uint64{ + "$share/abc/filter1": 3, + "filter2": 3, + "filter3": 3, + }, + } + l := &localSubStore{} + subStore := mem.NewStore() + for _, v := range tt.clientID { + _, err := subStore.Subscribe(v, tt.topics...) + a.Nil(err) + } + l.init(subStore) + l.Lock() + a.Equal(tt.expected, l.topics) + l.Unlock() +} + +func TestLocalSubStore_sub_unsub(t *testing.T) { + a := assert.New(t) + + l := &localSubStore{} + subStore := mem.NewStore() + l.init(subStore) + + a.True(l.subscribe("client1", "topic1")) + // test duplicated subscribe + a.False(l.subscribe("client1", "topic1")) + a.Equal(map[string]uint64{ + "topic1": 1, + }, l.topics) + a.Equal(map[string]map[string]struct{}{ + "client1": { + "topic1": struct{}{}, + }, + }, l.index) + + // test duplicated subscribe + a.False(l.subscribe("client2", "topic1")) + a.Equal(map[string]uint64{ + "topic1": 2, + }, l.topics) + a.Equal(map[string]map[string]struct{}{ + "client1": { + "topic1": struct{}{}, + }, + "client2": { + "topic1": struct{}{}, + }, + }, l.index) + + a.True(l.subscribe("client3", "topic2")) + a.Equal(map[string]uint64{ + "topic1": 2, + "topic2": 1, + }, l.topics) + a.Equal(map[string]map[string]struct{}{ + "client1": { + "topic1": struct{}{}, + }, + "client2": { + "topic1": struct{}{}, + }, + "client3": { + "topic2": struct{}{}, + }, + }, l.index) + + // test unsubscribe not exists topic + a.False(l.unsubscribe("client4", "topic1")) + a.Equal(map[string]uint64{ + "topic1": 2, + "topic2": 1, + }, l.topics) + + for i := 0; i < 1; i++ { + a.False(l.unsubscribe("client2", "topic1")) + a.Equal(map[string]uint64{ + "topic1": 1, + "topic2": 1, + }, l.topics) + a.Equal(map[string]map[string]struct{}{ + "client1": { + "topic1": struct{}{}, + }, + "client3": { + "topic2": struct{}{}, + }, + }, l.index) + } + + unsub := l.unsubscribeAll("client3") + a.Equal([]string{"topic2"}, unsub) + a.Equal(map[string]uint64{ + "topic1": 1, + }, l.topics) + + a.Equal(map[string]map[string]struct{}{ + "client1": { + "topic1": struct{}{}, + }, + }, l.index) + + a.Len(l.unsubscribeAll("client3"), 0) + + a.True(l.unsubscribe("client1", "topic1")) + a.False(l.unsubscribe("client1", "topic1")) +} + +func TestMessageToEvent(t *testing.T) { + a := assert.New(t) + var tt = []struct { + msg *gmqtt.Message + expected *Message + }{ + { + msg: &gmqtt.Message{ + Dup: true, + QoS: 1, + Retained: true, + Topic: "topic1", + Payload: []byte("topic1"), + PacketID: 1, + ContentType: "ct", + CorrelationData: []byte("data"), + MessageExpiry: 1, + PayloadFormat: 1, + ResponseTopic: "respTopic", + UserProperties: []packets.UserProperty{ + { + K: []byte("K"), + V: []byte("V"), + }, + }, + }, + expected: &Message{ + TopicName: "topic1", + Payload: "topic1", + Qos: 1, + Retained: true, + ContentType: "ct", + CorrelationData: "data", + MessageExpiry: 1, + PayloadFormat: 1, + ResponseTopic: "respTopic", + UserProperties: []*UserProperty{ + { + K: []byte("K"), + V: []byte("V"), + }, + }, + }, + }, + } + for _, v := range tt { + a.Equal(v.expected, messageToEvent(v.msg)) + } + +} + +func TestLRUCache(t *testing.T) { + a := assert.New(t) + lcache := newLRUCache(1) + a.False(lcache.set(1)) + a.True(lcache.set(1)) + a.False(lcache.set(2)) + a.Len(lcache.items, 1) + a.Equal(1, lcache.l.Len()) +} + +func TestFederation_eventStreamHandler(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + + pub := server.NewMockPublisher(ctrl) + f.publisher = pub + + sess := &session{ + id: "abc", + nodeName: "node1", + nextEventID: 0, + seenEvents: newLRUCache(3), + } + var ack *Ack + ack = f.eventStreamHandler(sess, &Event{ + Id: 0, + Event: &Event_Subscribe{ + Subscribe: &Subscribe{ + ShareName: "", + TopicFilter: "a", + }, + }, + }) + a.EqualValues(0, ack.EventId) + sts, _ := f.fedSubStore.GetClientStats("node1") + a.EqualValues(1, sts.SubscriptionsCurrent) + + msgEvent := &Event_Message{ + Message: &Message{ + TopicName: "a", + Payload: "b", + Qos: 1, + }, + } + pub.EXPECT().Publish(eventToMessage(msgEvent.Message)) + ack = f.eventStreamHandler(sess, &Event{ + Id: 1, + Event: msgEvent, + }) + a.EqualValues(1, ack.EventId) + ack = f.eventStreamHandler(sess, &Event{ + Id: 2, + Event: &Event_Unsubscribe{ + Unsubscribe: &Unsubscribe{ + TopicName: "a", + }, + }, + }) + sts, _ = f.fedSubStore.GetClientStats("node1") + a.EqualValues(0, sts.SubscriptionsCurrent) + a.EqualValues(2, ack.EventId) + + // send duplicated event + ack = f.eventStreamHandler(sess, &Event{ + Id: 0, + Event: &Event_Subscribe{ + Subscribe: &Subscribe{ + ShareName: "", + TopicFilter: "a", + }, + }, + }) + a.EqualValues(0, ack.EventId) + sts, _ = f.fedSubStore.GetClientStats("node1") + a.EqualValues(0, sts.SubscriptionsCurrent) + +} + +func TestFederation_getSerfConfig(t *testing.T) { + a := assert.New(t) + + cfg := &Config{ + NodeName: "node", + FedAddr: "127.0.0.1:1234", + AdvertiseFedAddr: "127.0.0.1:1235", + GossipAddr: "127.0.0.1:1236", + RetryInterval: 5 * time.Second, + RetryTimeout: 10 * time.Second, + SnapshotPath: "./path", + RejoinAfterLeave: true, + } + + serfCfg := getSerfConfig(cfg, nil, nil) + + a.Equal(cfg.NodeName, serfCfg.NodeName) + a.Equal(cfg.AdvertiseFedAddr, serfCfg.Tags["fed_addr"]) + host, port, _ := net.SplitHostPort(cfg.GossipAddr) + a.Equal(host, serfCfg.MemberlistConfig.BindAddr) + portNumber, _ := strconv.Atoi(port) + a.EqualValues(portNumber, serfCfg.MemberlistConfig.BindPort) + + host, port, _ = net.SplitHostPort(cfg.AdvertiseGossipAddr) + a.Equal(host, serfCfg.MemberlistConfig.AdvertiseAddr) + portNumber, _ = strconv.Atoi(port) + a.EqualValues(portNumber, serfCfg.MemberlistConfig.AdvertisePort) + + a.Equal(cfg.SnapshotPath, serfCfg.SnapshotPath) + a.Equal(cfg.RejoinAfterLeave, serfCfg.RejoinAfterLeave) +} + +func TestFederation_ListMembers(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + + mockSerf := NewMockiSerf(ctrl) + f.serf = mockSerf + mockSerf.EXPECT().Members().Return([]serf.Member{ + { + Name: "node1", + Addr: net.ParseIP("127.0.0.1"), + Port: 1234, + Tags: map[string]string{"k": "v"}, + Status: serf.StatusAlive, + }, { + Name: "node2", + Addr: net.ParseIP("127.0.0.2"), + Port: 1234, + Tags: map[string]string{"k": "v"}, + Status: serf.StatusAlive, + }, + }) + resp, err := f.ListMembers(context.Background(), nil) + a.NoError(err) + a.Equal([]*Member{ + { + Name: "node1", + Addr: "127.0.0.1:1234", + Tags: map[string]string{"k": "v"}, + Status: Status_STATUS_ALIVE, + }, { + Name: "node2", + Addr: "127.0.0.2:1234", + Tags: map[string]string{"k": "v"}, + Status: Status_STATUS_ALIVE, + }, + }, resp.Members) +} + +func TestFederation_Join(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + + mockSerf := NewMockiSerf(ctrl) + f.serf = mockSerf + mockSerf.EXPECT().Join([]string{"127.0.0.1:" + DefaultGossipPort, "127.0.0.2:1234"}, true).Return(2, nil) + _, err := f.Join(context.Background(), &JoinRequest{ + Hosts: []string{ + "127.0.0.1", + "127.0.0.2:1234", + }, + }) + a.NoError(err) +} + +func TestFederation_Leave(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + mockSerf := NewMockiSerf(ctrl) + f.serf = mockSerf + mockSerf.EXPECT().Leave() + _, err := f.Leave(context.Background(), nil) + a.NoError(err) +} + +func TestFederation_ForceLeave(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + mockSerf := NewMockiSerf(ctrl) + f.serf = mockSerf + mockSerf.EXPECT().RemoveFailedNode("node1") + _, err := f.ForceLeave(context.Background(), &ForceLeaveRequest{ + NodeName: "node1", + }) + a.NoError(err) +} + +func mockMetaContext(nodeName string) context.Context { + ctx := context.Background() + md := metadata.New(map[string]string{ + "node_name": nodeName, + }) + return metadata.NewIncomingContext(ctx, md) +} + +func TestFederation_Hello(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + clientNodeName := "node1" + clientSid := "session_id" + f.fedSubStore.Subscribe(clientNodeName, &gmqtt.Subscription{ + TopicFilter: "topicA", + }) + ctx := mockMetaContext(clientNodeName) + resp, err := f.Hello(ctx, &ClientHello{ + SessionId: clientSid, + }) + a.NoError(err) + // cleanStart == true on first time + a.True(resp.CleanStart) + a.Zero(resp.NextEventId) + // clean subscription tree if cleanStart == true + a.EqualValues(0, f.fedSubStore.GetStats().SubscriptionsCurrent) + + f.fedSubStore.Subscribe(clientNodeName, &gmqtt.Subscription{ + TopicFilter: "topicA", + }) + resp, err = f.Hello(ctx, &ClientHello{ + SessionId: clientSid, + }) + a.NoError(err) + // cleanStart == true on second time + a.False(resp.CleanStart) + a.Zero(resp.NextEventId) + a.EqualValues(1, f.fedSubStore.GetStats().SubscriptionsCurrent) + a.Equal(clientNodeName, f.sessionMgr.sessions[clientNodeName].nodeName) + a.Equal(clientSid, f.sessionMgr.sessions[clientNodeName].id) + a.EqualValues(f.sessionMgr.sessions[clientNodeName].nextEventID, 0) + + // test next eventID + f.sessionMgr.sessions[clientNodeName].nextEventID = 2 + + resp, err = f.Hello(ctx, &ClientHello{ + SessionId: clientSid, + }) + a.NoError(err) + + a.EqualValues(2, resp.NextEventId) +} diff --git a/plugin/federation/hooks.go b/plugin/federation/hooks.go new file mode 100644 index 00000000..50824a1e --- /dev/null +++ b/plugin/federation/hooks.go @@ -0,0 +1,232 @@ +package federation + +import ( + "context" + "sort" + + "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/persistence/subscription" + "github.com/DrmagicE/gmqtt/server" +) + +func (f *Federation) HookWrapper() server.HookWrapper { + return server.HookWrapper{ + OnSubscribedWrapper: f.OnSubscribedWrapper, + OnUnsubscribedWrapper: f.OnUnsubscribedWrapper, + OnMsgArrivedWrapper: f.OnMsgArrivedWrapper, + OnSessionTerminatedWrapper: f.OnSessionTerminatedWrapper, + OnWillPublishWrapper: f.OnWillPublishWrapper, + } +} + +func (f *Federation) OnSubscribedWrapper(pre server.OnSubscribed) server.OnSubscribed { + return func(ctx context.Context, client server.Client, subscription *gmqtt.Subscription) { + pre(ctx, client, subscription) + if subscription != nil { + if !f.localSubStore.subscribe(client.ClientOptions().ClientID, subscription.GetFullTopicName()) { + return + } + // only send new subscription + f.memberMu.Lock() + defer f.memberMu.Unlock() + for _, v := range f.peers { + sub := &Subscribe{ + ShareName: subscription.ShareName, + TopicFilter: subscription.TopicFilter, + } + v.queue.add(&Event{ + Event: &Event_Subscribe{ + Subscribe: sub, + }}) + } + } + } +} + +func (f *Federation) OnUnsubscribedWrapper(pre server.OnUnsubscribed) server.OnUnsubscribed { + return func(ctx context.Context, client server.Client, topicName string) { + pre(ctx, client, topicName) + if !f.localSubStore.unsubscribe(client.ClientOptions().ClientID, topicName) { + return + } + // only unsubscribe topic if there is no local subscriber anymore. + f.memberMu.Lock() + defer f.memberMu.Unlock() + for _, v := range f.peers { + unsub := &Unsubscribe{ + TopicName: topicName, + } + v.queue.add(&Event{ + Event: &Event_Unsubscribe{ + Unsubscribe: unsub, + }}) + } + } +} + +func sendSharedMsg(fs *fedSubStore, sharedList map[string][]string, send func(nodeName string, topicName string)) { + // shared subscription + fs.sharedMu.Lock() + defer fs.sharedMu.Unlock() + for topicName, v := range sharedList { + sort.Strings(v) + mod := fs.sharedSent[topicName] % (uint64(len(v))) + fs.sharedSent[topicName]++ + send(v[mod], topicName) + } +} + +// sendMessage sends messages to cluster nodes. +// For retained message, broadcasts the message to all nodes to update their local retained store. +// For none retained message , send it to the nodes which have matched topics. +// For shared subscription, we should either only send the message to local subscriber or only send the message to one node. +// If drop is true, the local node will drop the message. +// If options is not nil, the local node will apply the options to topic matching process. +func (f *Federation) sendMessage(msg *gmqtt.Message) (drop bool, options *subscription.IterationOptions) { + f.memberMu.Lock() + defer f.memberMu.Unlock() + + if msg.Retained { + eventMsg := messageToEvent(msg) + for _, v := range f.peers { + v.queue.add(&Event{ + Event: &Event_Message{ + Message: eventMsg, + }}) + } + return + } + + // shared topic => []nodeName. + sharedList := make(map[string][]string) + // append local shared subscription + f.localSubStore.localStore.Iterate(func(clientID string, sub *gmqtt.Subscription) bool { + fullTopic := sub.GetFullTopicName() + sharedList[fullTopic] = append(sharedList[fullTopic], f.nodeName) + return true + }, subscription.IterationOptions{ + Type: subscription.TypeShared, + TopicName: msg.Topic, + MatchType: subscription.MatchFilter, + }) + + // store non-shared topic, key by nodeName + nonShared := make(map[string]struct{}) + + f.fedSubStore.Iterate(func(nodeName string, sub *gmqtt.Subscription) bool { + if sub.ShareName != "" { + fullTopic := sub.GetFullTopicName() + sharedList[fullTopic] = append(sharedList[fullTopic], nodeName) + return true + } + nonShared[nodeName] = struct{}{} + return true + }, subscription.IterationOptions{ + Type: subscription.TypeAll, + TopicName: msg.Topic, + MatchType: subscription.MatchFilter, + }) + + sent := make(map[string]struct{}) + // shared subscription + sendSharedMsg(f.fedSubStore, sharedList, func(nodeName string, topicName string) { + // Do nothing if it is the local node. + if nodeName == f.nodeName { + return + } + if _, ok := sent[nodeName]; ok { + return + } + sent[nodeName] = struct{}{} + if p, ok := f.peers[nodeName]; ok { + eventMsg := messageToEvent(msg) + p.queue.add(&Event{ + Event: &Event_Message{ + Message: eventMsg, + }}) + drop = true + nonSharedOpts := subscription.IterationOptions{ + Type: subscription.TypeAll ^ subscription.TypeShared, + TopicName: msg.Topic, + MatchType: subscription.MatchFilter, + } + f.localSubStore.localStore.Iterate(func(clientID string, sub *gmqtt.Subscription) bool { + // If the message also matches non-shared subscription in local node, it can not be dropped. + // But the broker must not match any local shared subscriptions for this message, + // so we modify the iterationOptions to ignore shared subscriptions. + drop = false + options = &nonSharedOpts + return false + }, nonSharedOpts) + } + }) + // non-shared subscription + for nodeName := range nonShared { + if _, ok := sent[nodeName]; ok { + continue + } + if p, ok := f.peers[nodeName]; ok { + eventMsg := messageToEvent(msg) + p.queue.add(&Event{ + Event: &Event_Message{ + Message: eventMsg, + }}) + } + } + return +} +func (f *Federation) OnMsgArrivedWrapper(pre server.OnMsgArrived) server.OnMsgArrived { + return func(ctx context.Context, client server.Client, req *server.MsgArrivedRequest) error { + err := pre(ctx, client, req) + if err != nil { + return err + } + if req.Message != nil { + drop, opts := f.sendMessage(req.Message) + if drop { + req.Drop() + } + if opts != nil { + req.IterationOptions = *opts + } + } + return nil + } +} + +func (f *Federation) OnSessionTerminatedWrapper(pre server.OnSessionTerminated) server.OnSessionTerminated { + return func(ctx context.Context, clientID string, reason server.SessionTerminatedReason) { + pre(ctx, clientID, reason) + if unsubs := f.localSubStore.unsubscribeAll(clientID); len(unsubs) != 0 { + f.memberMu.Lock() + defer f.memberMu.Unlock() + for _, v := range f.peers { + for _, topicName := range unsubs { + unsub := &Unsubscribe{ + TopicName: topicName, + } + v.queue.add(&Event{ + Event: &Event_Unsubscribe{ + Unsubscribe: unsub, + }}) + } + } + } + } + +} + +func (f *Federation) OnWillPublishWrapper(pre server.OnWillPublish) server.OnWillPublish { + return func(ctx context.Context, clientID string, req *server.WillMsgRequest) { + pre(ctx, clientID, req) + if req.Message != nil { + drop, opts := f.sendMessage(req.Message) + if drop { + req.Drop() + } + if opts != nil { + req.IterationOptions = *opts + } + } + } +} diff --git a/plugin/federation/hooks_test.go b/plugin/federation/hooks_test.go new file mode 100644 index 00000000..c5abd11e --- /dev/null +++ b/plugin/federation/hooks_test.go @@ -0,0 +1,356 @@ +package federation + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/config" + "github.com/DrmagicE/gmqtt/persistence/subscription" + "github.com/DrmagicE/gmqtt/persistence/subscription/mem" + "github.com/DrmagicE/gmqtt/server" +) + +func init() { + log = zap.NewNop() + servePeerEventStream = func(p *peer) { + return + } +} + +var testConfig = config.Config{ + Plugins: map[string]config.Configuration{ + Name: &Config{ + NodeName: "node0", + }, + }, +} + +func TestFederation_OnMsgArrivedWrapper(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + f.localSubStore.localStore = mem.NewStore() + + onMsgArrived := f.OnMsgArrivedWrapper(func(ctx context.Context, client server.Client, req *server.MsgArrivedRequest) error { + return nil + }) + mockCli := server.NewMockClient(ctrl) + mockCli.EXPECT().ClientOptions().Return(&server.ClientOptions{ + ClientID: "client1", + }).AnyTimes() + + // must not send the message if there are no matched topic. + msg := &gmqtt.Message{ + QoS: 1, + Topic: "/topicA", + Payload: []byte("payload"), + } + a.NoError(onMsgArrived(context.Background(), mockCli, &server.MsgArrivedRequest{ + Message: msg, + })) + + f.nodeJoin(serf.MemberEvent{ + Members: []serf.Member{ + { + Name: "node2", + }, + }, + }) + + mockQueue := NewMockqueue(ctrl) + f.peers["node2"].queue = mockQueue + + // always send retained messages + retainedMsg := &gmqtt.Message{ + QoS: 1, + Topic: "/topicA", + Payload: []byte("payload"), + Retained: true, + } + mockQueue.EXPECT().add(&Event{ + Event: &Event_Message{ + Message: messageToEvent(retainedMsg), + }, + }) + a.NoError(onMsgArrived(context.Background(), mockCli, &server.MsgArrivedRequest{ + Message: retainedMsg, + })) + + // send the message only once even the message has multiple matched topics. + f.fedSubStore.Subscribe("node2", &gmqtt.Subscription{ + TopicFilter: "/topicA", + }, &gmqtt.Subscription{ + TopicFilter: "#", + }) + mockQueue.EXPECT().add(&Event{ + Event: &Event_Message{ + Message: messageToEvent(msg), + }, + }) + a.NoError(onMsgArrived(context.Background(), mockCli, &server.MsgArrivedRequest{ + Message: msg, + })) + + // send only once if a retained message also has matched topic + mockQueue.EXPECT().add(&Event{ + Event: &Event_Message{ + Message: messageToEvent(retainedMsg), + }, + }) + + a.NoError(onMsgArrived(context.Background(), mockCli, &server.MsgArrivedRequest{ + Message: retainedMsg, + })) + +} + +func TestFederation_OnMsgArrivedWrapper_SharedSubscription(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + f.localSubStore.localStore = mem.NewStore() + + onMsgArrived := f.OnMsgArrivedWrapper(func(ctx context.Context, client server.Client, req *server.MsgArrivedRequest) error { + return nil + }) + mockCli := server.NewMockClient(ctrl) + mockCli.EXPECT().ClientOptions().Return(&server.ClientOptions{ + ClientID: "client1", + }).AnyTimes() + var nodes = []string{"node1", "node2"} + var mockQueues []*Mockqueue + for _, v := range nodes { + f.nodeJoin(serf.MemberEvent{ + Members: []serf.Member{ + { + Name: v, + }, + }, + }) + // prepare shared subscriptions + f.fedSubStore.Subscribe(v, &gmqtt.Subscription{ + ShareName: "abc", + TopicFilter: "/topicA", + }) + mq := NewMockqueue(ctrl) + mockQueues = append(mockQueues, mq) + f.peers[v].queue = mq + } + // add the same shared subscription for the local node + f.localSubStore.localStore.Subscribe("client1", &gmqtt.Subscription{ + ShareName: "abc", + TopicFilter: "/topicA", + }) + + msg := &gmqtt.Message{ + QoS: 1, + Topic: "/topicA", + Payload: []byte("payload"), + } + // send to local node, nothing is expected with mockQueue + a.NoError(onMsgArrived(context.Background(), mockCli, &server.MsgArrivedRequest{ + Message: msg, + })) + + // round-robin + for k := range nodes { + mockQueues[k].EXPECT().add(&Event{ + Event: &Event_Message{ + Message: messageToEvent(msg), + }, + }) + a.NoError(onMsgArrived(context.Background(), mockCli, &server.MsgArrivedRequest{ + Message: msg, + })) + } + + // send to local node, nothing is expected with mockQueue + a.NoError(onMsgArrived(context.Background(), mockCli, &server.MsgArrivedRequest{ + Message: msg, + })) + + // add non-shared subscription to node1 + f.fedSubStore.Subscribe(nodes[0], &gmqtt.Subscription{ + TopicFilter: "/topicA", + }) + // add overlap subscription to local node + f.localSubStore.localStore.Subscribe("client1", &gmqtt.Subscription{ + TopicFilter: "/topicA", + }) + msgReq := &server.MsgArrivedRequest{ + Message: msg, + } + mockQueues[0].EXPECT().add(&Event{ + Event: &Event_Message{ + Message: messageToEvent(msg), + }, + }) + a.NoError(onMsgArrived(context.Background(), mockCli, msgReq)) + a.Equal(subscription.IterationOptions{ + Type: subscription.TypeSYS | subscription.TypeNonShared, + TopicName: msgReq.Message.Topic, + MatchType: subscription.MatchFilter, + }, msgReq.IterationOptions) +} + +func TestFederation_OnSubscribedWrapper(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + f.localSubStore.init(mem.NewStore()) + f.nodeJoin(serf.MemberEvent{ + Members: []serf.Member{ + { + Name: "node2", + }, + }, + }) + mockQueue := NewMockqueue(ctrl) + f.peers["node2"].queue = mockQueue + onSubscribed := f.OnSubscribedWrapper(func(ctx context.Context, client server.Client, subscription *gmqtt.Subscription) { + return + }) + + client1 := server.NewMockClient(ctrl) + client1.EXPECT().ClientOptions().Return(&server.ClientOptions{ + ClientID: "client1", + }).AnyTimes() + + client2 := server.NewMockClient(ctrl) + client2.EXPECT().ClientOptions().Return(&server.ClientOptions{ + ClientID: "client2", + }).AnyTimes() + + // only subscribe once + mockQueue.EXPECT().add(&Event{ + Event: &Event_Subscribe{ + Subscribe: &Subscribe{ + TopicFilter: "/topicA", + }, + }, + }) + onSubscribed(context.Background(), client1, &gmqtt.Subscription{ + TopicFilter: "/topicA", + }) + onSubscribed(context.Background(), client2, &gmqtt.Subscription{ + TopicFilter: "/topicA", + }) + + a.EqualValues(2, f.localSubStore.topics["/topicA"]) +} + +func TestFederation_OnUnsubscribedWrapper(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + f.localSubStore.init(mem.NewStore()) + f.nodeJoin(serf.MemberEvent{ + Members: []serf.Member{ + { + Name: "node2", + }, + }, + }) + + mockQueue := NewMockqueue(ctrl) + f.peers["node2"].queue = mockQueue + + // 2 subscription for /topicA + f.localSubStore.subscribe("client1", "/topicA") + f.localSubStore.subscribe("client2", "/topicA") + + onUnsubscribed := f.OnUnsubscribedWrapper(func(ctx context.Context, client server.Client, topicName string) { + return + }) + client1 := server.NewMockClient(ctrl) + client1.EXPECT().ClientOptions().Return(&server.ClientOptions{ + ClientID: "client1", + }).AnyTimes() + + client2 := server.NewMockClient(ctrl) + client2.EXPECT().ClientOptions().Return(&server.ClientOptions{ + ClientID: "client2", + }).AnyTimes() + onUnsubscribed(context.Background(), client1, "/topicA") + + // only unsubscribe when all local subscription for /topicA have been unsubscribed + mockQueue.EXPECT().add(&Event{ + Event: &Event_Unsubscribe{ + Unsubscribe: &Unsubscribe{ + TopicName: "/topicA", + }, + }, + }) + onUnsubscribed(context.Background(), client2, "/topicA") + // should not send unsubscribe event if the unsubscribed topic not exists. + onUnsubscribed(context.Background(), client2, "/topicA") +} + +func TestFederation_OnSessionTerminatedWrapper(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p, _ := New(testConfig) + f := p.(*Federation) + f.localSubStore.init(mem.NewStore()) + f.nodeJoin(serf.MemberEvent{ + Members: []serf.Member{ + { + Name: "node2", + }, + }, + }) + + mockQueue := NewMockqueue(ctrl) + f.peers["node2"].queue = mockQueue + + // 2 subscription for /topicA + f.localSubStore.subscribe("client1", "/topicA") + f.localSubStore.subscribe("client2", "/topicA") + // 1 for /topicB & /topicC + f.localSubStore.subscribe("client3", "/topicB") + f.localSubStore.subscribe("client3", "/topicC") + + onSessionTerminated := f.OnSessionTerminatedWrapper(func(ctx context.Context, clientID string, reason server.SessionTerminatedReason) { + return + }) + + onSessionTerminated(context.Background(), "client1", 0) + + mockQueue.EXPECT().add(&Event{ + Event: &Event_Unsubscribe{ + Unsubscribe: &Unsubscribe{ + TopicName: "/topicA", + }, + }, + }) + onSessionTerminated(context.Background(), "client2", 0) + + mockQueue.EXPECT().add(&Event{ + Event: &Event_Unsubscribe{ + Unsubscribe: &Unsubscribe{ + TopicName: "/topicB", + }, + }, + }) + mockQueue.EXPECT().add(&Event{ + Event: &Event_Unsubscribe{ + Unsubscribe: &Unsubscribe{ + TopicName: "/topicC", + }, + }, + }) + onSessionTerminated(context.Background(), "client3", 0) +} diff --git a/plugin/federation/membership.go b/plugin/federation/membership.go new file mode 100644 index 00000000..cbb6c366 --- /dev/null +++ b/plugin/federation/membership.go @@ -0,0 +1,99 @@ +package federation + +import ( + "time" + + "github.com/google/uuid" + "github.com/hashicorp/serf/serf" + "go.uber.org/zap" +) + +// iSerf is the interface for *serf.Serf. +// It is used for test. +type iSerf interface { + Join(existing []string, ignoreOld bool) (int, error) + RemoveFailedNode(node string) error + Leave() error + Members() []serf.Member + Shutdown() error +} + +var servePeerEventStream = func(p *peer) { + p.serveEventStream() +} + +func (f *Federation) startSerf(t *time.Timer) error { + defer func() { + t.Reset(f.config.RetryInterval) + }() + if _, err := f.serf.Join(f.config.RetryJoin, true); err != nil { + return err + } + go f.eventHandler() + return nil +} + +func (f *Federation) eventHandler() { + for { + select { + case evt := <-f.serfEventCh: + switch evt.EventType() { + case serf.EventMemberJoin: + f.nodeJoin(evt.(serf.MemberEvent)) + case serf.EventMemberLeave, serf.EventMemberFailed, serf.EventMemberReap: + f.nodeFail(evt.(serf.MemberEvent)) + case serf.EventUser: + case serf.EventMemberUpdate: + // TODO + case serf.EventQuery: // Ignore + default: + } + case <-f.exit: + f.memberMu.Lock() + for _, v := range f.peers { + v.stop() + } + f.memberMu.Unlock() + return + } + } +} + +func (f *Federation) nodeJoin(member serf.MemberEvent) { + f.memberMu.Lock() + defer f.memberMu.Unlock() + for _, v := range member.Members { + if v.Name == f.nodeName { + continue + } + log.Info("member joined", zap.String("node_name", v.Name)) + if _, ok := f.peers[v.Name]; !ok { + p := &peer{ + fed: f, + member: v, + exit: make(chan struct{}), + sessionID: uuid.New().String(), + queue: newEventQueue(), + localName: f.nodeName, + } + f.peers[v.Name] = p + go servePeerEventStream(p) + } + } +} + +func (f *Federation) nodeFail(member serf.MemberEvent) { + f.memberMu.Lock() + defer f.memberMu.Unlock() + for _, v := range member.Members { + if v.Name == f.nodeName { + continue + } + if p, ok := f.peers[v.Name]; ok { + log.Error("node failed, close stream client", zap.String("node_name", v.Name)) + p.stop() + delete(f.peers, v.Name) + _ = f.fedSubStore.UnsubscribeAll(v.Name) + } + } +} diff --git a/plugin/federation/membership_mock.go b/plugin/federation/membership_mock.go new file mode 100644 index 00000000..cdf7ee2d --- /dev/null +++ b/plugin/federation/membership_mock.go @@ -0,0 +1,105 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: plugin/federation/membership.go + +// Package federation is a generated GoMock package. +package federation + +import ( + gomock "github.com/golang/mock/gomock" + serf "github.com/hashicorp/serf/serf" + reflect "reflect" +) + +// MockiSerf is a mock of iSerf interface +type MockiSerf struct { + ctrl *gomock.Controller + recorder *MockiSerfMockRecorder +} + +// MockiSerfMockRecorder is the mock recorder for MockiSerf +type MockiSerfMockRecorder struct { + mock *MockiSerf +} + +// NewMockiSerf creates a new mock instance +func NewMockiSerf(ctrl *gomock.Controller) *MockiSerf { + mock := &MockiSerf{ctrl: ctrl} + mock.recorder = &MockiSerfMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockiSerf) EXPECT() *MockiSerfMockRecorder { + return m.recorder +} + +// Join mocks base method +func (m *MockiSerf) Join(existing []string, ignoreOld bool) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Join", existing, ignoreOld) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Join indicates an expected call of Join +func (mr *MockiSerfMockRecorder) Join(existing, ignoreOld interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Join", reflect.TypeOf((*MockiSerf)(nil).Join), existing, ignoreOld) +} + +// RemoveFailedNode mocks base method +func (m *MockiSerf) RemoveFailedNode(node string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveFailedNode", node) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveFailedNode indicates an expected call of RemoveFailedNode +func (mr *MockiSerfMockRecorder) RemoveFailedNode(node interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveFailedNode", reflect.TypeOf((*MockiSerf)(nil).RemoveFailedNode), node) +} + +// Leave mocks base method +func (m *MockiSerf) Leave() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Leave") + ret0, _ := ret[0].(error) + return ret0 +} + +// Leave indicates an expected call of Leave +func (mr *MockiSerfMockRecorder) Leave() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Leave", reflect.TypeOf((*MockiSerf)(nil).Leave)) +} + +// Members mocks base method +func (m *MockiSerf) Members() []serf.Member { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Members") + ret0, _ := ret[0].([]serf.Member) + return ret0 +} + +// Members indicates an expected call of Members +func (mr *MockiSerfMockRecorder) Members() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Members", reflect.TypeOf((*MockiSerf)(nil).Members)) +} + +// Shutdown mocks base method +func (m *MockiSerf) Shutdown() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Shutdown") + ret0, _ := ret[0].(error) + return ret0 +} + +// Shutdown indicates an expected call of Shutdown +func (mr *MockiSerfMockRecorder) Shutdown() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockiSerf)(nil).Shutdown)) +} diff --git a/plugin/federation/peer.go b/plugin/federation/peer.go new file mode 100644 index 00000000..809ebe3b --- /dev/null +++ b/plugin/federation/peer.go @@ -0,0 +1,373 @@ +package federation + +import ( + "container/list" + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/hashicorp/serf/serf" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/persistence/subscription" +) + +const ( + peerStateStopped = iota + 1 + peerStateStreaming +) + +// peer represents a remote node which act as the event stream server. +type peer struct { + fed *Federation + localName string + member serf.Member + exit chan struct{} + // local session id + sessionID string + queue queue + // client-side stream + stateMu sync.Mutex + state int + stream *stream +} + +type stream struct { + queue queue + client Federation_EventStreamClient + close chan struct{} + errOnce sync.Once + err error + wg sync.WaitGroup +} + +// interface for testing +type queue interface { + clear() + close() + open() + setReadPosition(id uint64) + add(event *Event) + fetchEvents() []*Event + ack(id uint64) +} + +// eventQueue store the events that are ready to send. +// TODO add max buffer size +type eventQueue struct { + cond *sync.Cond + nextID uint64 + l *list.List + nextRead *list.Element + closed bool +} + +func newEventQueue() *eventQueue { + return &eventQueue{ + cond: sync.NewCond(&sync.Mutex{}), + nextID: 0, + l: list.New(), + closed: false, + } +} + +func (e *eventQueue) clear() { + e.cond.L.Lock() + defer e.cond.L.Unlock() + e.nextID = 0 + e.l = list.New() + e.nextRead = nil + e.closed = false +} + +func (e *eventQueue) close() { + e.cond.L.Lock() + defer e.cond.L.Unlock() + e.closed = true + e.cond.Signal() +} + +func (e *eventQueue) open() { + e.cond.L.Lock() + defer e.cond.L.Unlock() + e.closed = false + e.cond.Signal() +} + +func (e *eventQueue) setReadPosition(id uint64) { + e.cond.L.Lock() + defer e.cond.L.Unlock() + for elem := e.l.Front(); elem != nil; elem = elem.Next() { + ev := elem.Value.(*Event) + if ev.Id == id { + e.nextRead = elem + return + } + } +} + +func (e *eventQueue) add(event *Event) { + e.cond.L.Lock() + defer func() { + e.cond.L.Unlock() + e.cond.Signal() + }() + event.Id = e.nextID + e.nextID++ + elem := e.l.PushBack(event) + if e.nextRead == nil { + e.nextRead = elem + } +} + +func (e *eventQueue) fetchEvents() []*Event { + e.cond.L.Lock() + defer e.cond.L.Unlock() + + for (e.l.Len() == 0 || e.nextRead == nil) && !e.closed { + e.cond.Wait() + } + if e.closed { + return nil + } + ev := make([]*Event, 0) + var elem *list.Element + elem = e.nextRead + for i := 0; i < 100; i++ { + ev = append(ev, elem.Value.(*Event)) + elem = elem.Next() + if elem == nil { + break + } + } + e.nextRead = elem + return ev +} + +func (e *eventQueue) ack(id uint64) { + e.cond.L.Lock() + defer func() { + e.cond.L.Unlock() + e.cond.Signal() + }() + var next *list.Element + for elem := e.l.Front(); elem != nil; elem = next { + next = elem.Next() + req := elem.Value.(*Event) + if req.Id <= id { + e.l.Remove(elem) + } + if req.Id == id { + return + } + } +} + +func (p *peer) stop() { + select { + case <-p.exit: + default: + close(p.exit) + } + p.stateMu.Lock() + if p.state == peerStateStreaming { + _ = p.stream.client.CloseSend() + } + p.state = peerStateStopped + p.stateMu.Unlock() + p.stream.wg.Wait() +} + +func (p *peer) serveEventStream() { + timer := time.NewTimer(0) + var reconnectCount int + for { + select { + case <-p.exit: + return + case <-timer.C: + err := p.serveStream(reconnectCount, timer) + select { + case <-p.exit: + return + default: + } + if err != nil { + log.Error("stream broken, reconnecting", zap.Error(err), + zap.Int("reconnect_count", reconnectCount)) + reconnectCount++ + continue + } + return + } + } +} + +func (p *peer) initStream(client FederationClient) (s *stream, err error) { + p.stateMu.Lock() + defer func() { + if err == nil { + p.state = peerStateStreaming + } + p.stateMu.Unlock() + }() + if p.state == peerStateStopped { + return nil, errors.New("peer has been stopped") + } + helloMD := metadata.Pairs("node_name", p.localName) + helloCtx := metadata.NewOutgoingContext(context.Background(), helloMD) + sh, err := client.Hello(helloCtx, &ClientHello{ + SessionId: p.sessionID, + }) + if err != nil { + return nil, fmt.Errorf("handshake error: %s", err.Error()) + } + log.Info("handshake succeed", zap.String("remote_node", p.member.Name), zap.Bool("clean_start", sh.CleanStart)) + if sh.CleanStart { + p.queue.clear() + // sync full state + p.fed.localSubStore.Lock() + for k := range p.fed.localSubStore.topics { + shareName, topicFilter := subscription.SplitTopic(k) + p.queue.add(&Event{ + Event: &Event_Subscribe{Subscribe: &Subscribe{ + ShareName: shareName, + TopicFilter: topicFilter, + }}, + }) + } + p.fed.localSubStore.Unlock() + + p.fed.retainedStore.Iterate(func(message *gmqtt.Message) bool { + // TODO add timestamp to retained message and use Last Write Wins (LWW) to resolve write conflicts. + p.queue.add(&Event{ + Event: &Event_Message{ + Message: messageToEvent(message.Copy()), + }, + }) + return true + }) + } + p.queue.setReadPosition(sh.NextEventId) + md := metadata.Pairs("node_name", p.localName) + ctx := metadata.NewOutgoingContext(context.Background(), md) + c, err := client.EventStream(ctx) + if err != nil { + return nil, err + } + p.queue.open() + s = &stream{ + queue: p.queue, + client: c, + close: make(chan struct{}), + } + p.stream = s + return s, nil +} + +func (p *peer) serveStream(reconnectCount int, backoff *time.Timer) (err error) { + defer func() { + if err != nil { + du := time.Duration(0) + if reconnectCount != 0 { + du = time.Duration(reconnectCount) * 500 * time.Millisecond + } + if max := 2 * time.Second; du > max { + du = max + } + backoff.Reset(du) + } + }() + addr := p.member.Tags["fed_addr"] + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return err + } + client := NewFederationClient(conn) + s, err := p.initStream(client) + if err != nil { + return err + } + return s.serve() +} + +func (s *stream) serve() error { + s.wg.Add(2) + go s.readLoop() + go s.sendEvents() + s.wg.Wait() + return s.err +} + +func (s *stream) setError(err error) { + s.errOnce.Do(func() { + s.queue.close() + s.client.CloseSend() + close(s.close) + if err != nil && err != io.EOF { + log.Error("stream error", zap.Error(err)) + s.err = err + } + }) +} + +func (s *stream) readLoop() { + var err error + var resp *Ack + defer func() { + if re := recover(); re != nil { + err = errors.New(fmt.Sprint(re)) + } + s.setError(err) + s.wg.Done() + }() + for { + select { + case <-s.close: + return + default: + resp, err = s.client.Recv() + if err != nil { + return + } + s.queue.ack(resp.EventId) + if ce := log.Check(zapcore.DebugLevel, "event acked"); ce != nil { + ce.Write(zap.Uint64("id", resp.EventId)) + } + } + } +} + +func (s *stream) sendEvents() { + var err error + defer func() { + if re := recover(); re != nil { + err = errors.New(fmt.Sprint(re)) + } + s.setError(err) + s.wg.Done() + }() + for { + events := s.queue.fetchEvents() + // stream has been closed + if events == nil { + return + } + for _, v := range events { + err := s.client.Send(v) + if err != nil { + return + } + if ce := log.Check(zapcore.DebugLevel, "event sent"); ce != nil { + ce.Write(zap.String("event", v.String())) + } + } + } +} diff --git a/plugin/federation/peer_mock.go b/plugin/federation/peer_mock.go new file mode 100644 index 00000000..135ceb53 --- /dev/null +++ b/plugin/federation/peer_mock.go @@ -0,0 +1,119 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: plugin/federation/peer.go + +// Package federation is a generated GoMock package. +package federation + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// Mockqueue is a mock of queue interface +type Mockqueue struct { + ctrl *gomock.Controller + recorder *MockqueueMockRecorder +} + +// MockqueueMockRecorder is the mock recorder for Mockqueue +type MockqueueMockRecorder struct { + mock *Mockqueue +} + +// NewMockqueue creates a new mock instance +func NewMockqueue(ctrl *gomock.Controller) *Mockqueue { + mock := &Mockqueue{ctrl: ctrl} + mock.recorder = &MockqueueMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *Mockqueue) EXPECT() *MockqueueMockRecorder { + return m.recorder +} + +// clear mocks base method +func (m *Mockqueue) clear() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "clear") +} + +// clear indicates an expected call of clear +func (mr *MockqueueMockRecorder) clear() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "clear", reflect.TypeOf((*Mockqueue)(nil).clear)) +} + +// close mocks base method +func (m *Mockqueue) close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "close") +} + +// close indicates an expected call of close +func (mr *MockqueueMockRecorder) close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "close", reflect.TypeOf((*Mockqueue)(nil).close)) +} + +// open mocks base method +func (m *Mockqueue) open() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "open") +} + +// open indicates an expected call of open +func (mr *MockqueueMockRecorder) open() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "open", reflect.TypeOf((*Mockqueue)(nil).open)) +} + +// setReadPosition mocks base method +func (m *Mockqueue) setReadPosition(id uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "setReadPosition", id) +} + +// setReadPosition indicates an expected call of setReadPosition +func (mr *MockqueueMockRecorder) setReadPosition(id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setReadPosition", reflect.TypeOf((*Mockqueue)(nil).setReadPosition), id) +} + +// add mocks base method +func (m *Mockqueue) add(event *Event) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "add", event) +} + +// add indicates an expected call of add +func (mr *MockqueueMockRecorder) add(event interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "add", reflect.TypeOf((*Mockqueue)(nil).add), event) +} + +// fetchEvents mocks base method +func (m *Mockqueue) fetchEvents() []*Event { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "fetchEvents") + ret0, _ := ret[0].([]*Event) + return ret0 +} + +// fetchEvents indicates an expected call of fetchEvents +func (mr *MockqueueMockRecorder) fetchEvents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "fetchEvents", reflect.TypeOf((*Mockqueue)(nil).fetchEvents)) +} + +// ack mocks base method +func (m *Mockqueue) ack(id uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ack", id) +} + +// ack indicates an expected call of ack +func (mr *MockqueueMockRecorder) ack(id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ack", reflect.TypeOf((*Mockqueue)(nil).ack), id) +} diff --git a/plugin/federation/peer_test.go b/plugin/federation/peer_test.go new file mode 100644 index 00000000..4777dfb6 --- /dev/null +++ b/plugin/federation/peer_test.go @@ -0,0 +1,176 @@ +package federation + +import ( + "reflect" + "testing" + + "github.com/golang/mock/gomock" + "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/assert" + + "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/persistence/subscription/mem" + "github.com/DrmagicE/gmqtt/retained" + "github.com/DrmagicE/gmqtt/retained/trie" +) + +func TestPeer_initStream_CleanStart(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockQueue := NewMockqueue(ctrl) + + ls := &localSubStore{} + ls.init(mem.NewStore()) + + retained := trie.NewStore() + p := &peer{ + fed: &Federation{ + localSubStore: ls, + retainedStore: retained, + }, + localName: "", + member: serf.Member{ + Name: "node2", + }, + exit: nil, + sessionID: "sessionID", + queue: mockQueue, + stream: nil, + } + ls.subscribe("c1", "topicA") + ls.subscribe("c2", "topicB") + + m1 := &gmqtt.Message{ + Topic: "topicA", + } + m2 := &gmqtt.Message{ + Topic: "topicB", + } + retained.AddOrReplace(m1) + retained.AddOrReplace(m2) + + client := NewMockFederationClient(ctrl) + + client.EXPECT().Hello(gomock.Any(), &ClientHello{ + SessionId: p.sessionID, + }).Return(&ServerHello{ + CleanStart: true, + NextEventId: 0, + }, nil) + + gomock.InOrder( + mockQueue.EXPECT().clear(), + mockQueue.EXPECT().setReadPosition(uint64(0)), + mockQueue.EXPECT().open(), + ) + + // The order of the events is not significant and also is not grantee to be sorted in any way. + // So we had to collect them into map. + subEvents := make(map[string]string) + msgEvents := make(map[string]string) + + expectedSubEvents := map[string]*Event{ + "topicA": { + Event: &Event_Subscribe{ + Subscribe: &Subscribe{ + TopicFilter: "topicA", + }, + }, + }, + "topicB": { + Event: &Event_Subscribe{ + Subscribe: &Subscribe{ + TopicFilter: "topicB", + }, + }, + }, + } + expectedMsgEvents := map[string]*Event{ + "topicA": { + Event: &Event_Message{ + Message: messageToEvent(m1), + }, + }, + "topicB": { + Event: &Event_Message{ + Message: messageToEvent(m2), + }, + }, + } + mockQueue.EXPECT().add(gomock.Any()).Do(func(event *Event) { + switch event.Event.(type) { + case *Event_Subscribe: + sub := event.Event.(*Event_Subscribe) + subEvents[sub.Subscribe.TopicFilter] = event.String() + case *Event_Message: + msg := event.Event.(*Event_Message) + msgEvents[msg.Message.TopicName] = event.String() + default: + a.FailNow("unexpected event type: %s", reflect.TypeOf(event.Event)) + } + }).Times(4) + + client.EXPECT().EventStream(gomock.Any()) + _, err := p.initStream(client) + + a.NoError(err) + for k, v := range msgEvents { + a.Equal(expectedMsgEvents[k].String(), v) + } + for k, v := range subEvents { + a.Equal(expectedSubEvents[k].String(), v) + } + +} + +func TestPeer_initStream_CleanStartFalse(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockQueue := NewMockqueue(ctrl) + + ls := &localSubStore{} + ls.init(mem.NewStore()) + + rt := retained.NewMockStore(ctrl) + p := &peer{ + fed: &Federation{ + localSubStore: ls, + retainedStore: rt, + }, + localName: "", + member: serf.Member{ + Name: "node2", + }, + exit: nil, + sessionID: "sessionID", + queue: mockQueue, + stream: nil, + } + + client := NewMockFederationClient(ctrl) + client.EXPECT().Hello(gomock.Any(), &ClientHello{ + SessionId: p.sessionID, + }).Return(&ServerHello{ + CleanStart: false, + NextEventId: 10, + }, nil) + + gomock.InOrder( + mockQueue.EXPECT().setReadPosition(uint64(10)), + mockQueue.EXPECT().open(), + ) + + client.EXPECT().EventStream(gomock.Any()) + + _, err := p.initStream(client) + a.NoError(err) + +} + +func TestEventQueue(t *testing.T) { + +} diff --git a/plugin/federation/protos/federation.proto b/plugin/federation/protos/federation.proto new file mode 100644 index 00000000..79bc2d69 --- /dev/null +++ b/plugin/federation/protos/federation.proto @@ -0,0 +1,129 @@ +syntax ="proto3"; + +package gmqtt.federation.api; +option go_package = ".;federation"; + +import "google/api/annotations.proto"; +import "google/protobuf/empty.proto"; + +message Event { + uint64 id = 1; + oneof Event { + Subscribe Subscribe = 2; + Message message = 3; + Unsubscribe unsubscribe = 4; + } +} + +// Subscribe represents the subscription for a node, it is used to route message among nodes, +// so only shared_name and topic_filter is required. +message Subscribe { + string share_name = 1; + string topic_filter = 2; +} + +message Message{ + string topic_name = 1; + string payload = 2; + uint32 qos = 3; + bool retained = 4; + // the following fields are using in v5 client. + string content_type = 5; + string correlation_data = 6; + uint32 message_expiry = 7; + uint32 payload_format = 8; + string response_topic = 9; + repeated UserProperty user_properties = 10; +} + +message UserProperty { + bytes K = 1; + bytes V = 2; +} +message Unsubscribe{ + string topic_name = 1; +} + +message Ack { + uint64 event_id = 1; +} + +// ClientHello is the request message in handshake process. +message ClientHello { + string session_id =1; +} + +// ServerHello is the response message in handshake process. +message ServerHello{ + bool clean_start = 1; + uint64 next_event_id = 2; +} + +message JoinRequest { + repeated string hosts = 1; +} + + +message Member { + string name = 1; + string addr = 2; + map tags = 3; + Status status = 4; +} + +enum Status { + STATUS_UNSPECIFIED = 0; + STATUS_ALIVE = 1; + STATUS_LEAVING=2; + STATUS_LEFT = 3; + STATUS_FAILED = 4; +} + +message ListMembersResponse { + repeated Member members = 1; +} + +message ForceLeaveRequest { + string node_name = 1; +} + +service Membership { + // Join tells the local node to join the an existing cluster. + // See https://www.serf.io/docs/commands/join.html for details. + rpc Join(JoinRequest) returns (google.protobuf.Empty){ + option (google.api.http) = { + post: "/v1/federation/join" + body:"*" + }; + } + // Leave triggers a graceful leave for the local node. + // This is used to ensure other nodes see the node as "left" instead of "failed". + // Note that a leaved node cannot re-join the cluster unless you restart the leaved node. + rpc Leave(google.protobuf.Empty) returns (google.protobuf.Empty){ + option (google.api.http) = { + post: "/v1/federation/leave" + body:"*" + }; + } + // ForceLeave force forces a member of a Serf cluster to enter the "left" state. + // Note that if the member is still actually alive, it will eventually rejoin the cluster. + // The true purpose of this method is to force remove "failed" nodes + // See https://www.serf.io/docs/commands/force-leave.html for details. + rpc ForceLeave(ForceLeaveRequest) returns (google.protobuf.Empty){ + option (google.api.http) = { + post: "/v1/federation/force_leave" + body:"*" + }; + } + // ListMembers lists all known members in the Serf cluster. + rpc ListMembers(google.protobuf.Empty) returns (ListMembersResponse){ + option (google.api.http) = { + get: "/v1/federation/members" + }; + } +} + +service Federation { + rpc Hello(ClientHello) returns (ServerHello){} + rpc EventStream (stream Event) returns (stream Ack){} +} diff --git a/plugin/federation/protos/proto_gen.sh b/plugin/federation/protos/proto_gen.sh new file mode 100755 index 00000000..261d433a --- /dev/null +++ b/plugin/federation/protos/proto_gen.sh @@ -0,0 +1,8 @@ +protoc -I. \ +-I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway \ +-I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \ +--go-grpc_out=../ \ +--go_out=../ \ +--grpc-gateway_out=../ \ +--swagger_out=../swagger \ +*.proto \ No newline at end of file diff --git a/plugin/federation/swagger/federation.swagger.json b/plugin/federation/swagger/federation.swagger.json new file mode 100644 index 00000000..033dbf04 --- /dev/null +++ b/plugin/federation/swagger/federation.swagger.json @@ -0,0 +1,357 @@ +{ + "swagger": "2.0", + "info": { + "title": "federation.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/v1/federation/force_leave": { + "post": { + "summary": "ForceLeave force forces a member of a Serf cluster to enter the \"left\" state.\nNote that if the member is still actually alive, it will eventually rejoin the cluster.\nThe true purpose of this method is to force remove \"failed\" nodes\nSee https://www.serf.io/docs/commands/force-leave.html for details.", + "operationId": "ForceLeave", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + }, + "default": { + "description": "An unexpected error response", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/apiForceLeaveRequest" + } + } + ], + "tags": [ + "Membership" + ] + } + }, + "/v1/federation/join": { + "post": { + "summary": "Join tells the local node to join the an existing cluster.\nSee https://www.serf.io/docs/commands/join.html for details.", + "operationId": "Join", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + }, + "default": { + "description": "An unexpected error response", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/apiJoinRequest" + } + } + ], + "tags": [ + "Membership" + ] + } + }, + "/v1/federation/leave": { + "post": { + "summary": "Leave triggers a graceful leave for the local node.\nThis is used to ensure other nodes see the node as \"left\" instead of \"failed\".\nNote that a leaved node cannot re-join the cluster unless you restart the leaved node.", + "operationId": "Leave", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + }, + "default": { + "description": "An unexpected error response", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "properties": {} + } + } + ], + "tags": [ + "Membership" + ] + } + }, + "/v1/federation/members": { + "get": { + "summary": "ListMembers lists all known members in the Serf cluster.", + "operationId": "ListMembers", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/apiListMembersResponse" + } + }, + "default": { + "description": "An unexpected error response", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "tags": [ + "Membership" + ] + } + } + }, + "definitions": { + "apiAck": { + "type": "object", + "properties": { + "event_id": { + "type": "string", + "format": "uint64" + } + } + }, + "apiForceLeaveRequest": { + "type": "object", + "properties": { + "node_name": { + "type": "string" + } + } + }, + "apiJoinRequest": { + "type": "object", + "properties": { + "hosts": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "apiListMembersResponse": { + "type": "object", + "properties": { + "members": { + "type": "array", + "items": { + "$ref": "#/definitions/apiMember" + } + } + } + }, + "apiMember": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "addr": { + "type": "string" + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "status": { + "$ref": "#/definitions/apiStatus" + } + } + }, + "apiMessage": { + "type": "object", + "properties": { + "topic_name": { + "type": "string" + }, + "payload": { + "type": "string" + }, + "qos": { + "type": "integer", + "format": "int64" + }, + "retained": { + "type": "boolean", + "format": "boolean" + }, + "content_type": { + "type": "string", + "description": "the following fields are using in v5 client." + }, + "correlation_data": { + "type": "string" + }, + "message_expiry": { + "type": "integer", + "format": "int64" + }, + "payload_format": { + "type": "integer", + "format": "int64" + }, + "response_topic": { + "type": "string" + }, + "user_properties": { + "type": "array", + "items": { + "$ref": "#/definitions/apiUserProperty" + } + } + } + }, + "apiServerHello": { + "type": "object", + "properties": { + "clean_start": { + "type": "boolean", + "format": "boolean" + }, + "next_event_id": { + "type": "string", + "format": "uint64" + } + }, + "description": "ServerHello is the response message in handshake process." + }, + "apiStatus": { + "type": "string", + "enum": [ + "STATUS_UNSPECIFIED", + "STATUS_ALIVE", + "STATUS_LEAVING", + "STATUS_LEFT", + "STATUS_FAILED" + ], + "default": "STATUS_UNSPECIFIED" + }, + "apiSubscribe": { + "type": "object", + "properties": { + "share_name": { + "type": "string" + }, + "topic_filter": { + "type": "string" + } + }, + "description": "Subscribe represents the subscription for a node, it is used to route message among nodes,\nso only shared_name and topic_filter is required." + }, + "apiUnsubscribe": { + "type": "object", + "properties": { + "topic_name": { + "type": "string" + } + } + }, + "apiUserProperty": { + "type": "object", + "properties": { + "K": { + "type": "string", + "format": "byte" + }, + "V": { + "type": "string", + "format": "byte" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "type_url": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpc_code": { + "type": "integer", + "format": "int32" + }, + "http_code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "http_status": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + } + } +} diff --git a/plugin_generate.go b/plugin_generate.go new file mode 100644 index 00000000..78d4f8a8 --- /dev/null +++ b/plugin_generate.go @@ -0,0 +1,85 @@ +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io" + "io/ioutil" + "log" + "strings" + "text/template" + + "gopkg.in/yaml.v2" +) + +var tmpl = `//go:generate sh -c "cd ../../ && go run plugin_generate.go" +// generated by plugin_generate.go; DO NOT EDIT + +package main + +import ( + {{- range $index, $element := .}} + _ "{{$element}}" + {{- end}} +) +` + +const ( + pluginFile = "./cmd/gmqttd/plugins.go" + pluginCfg = "plugin_imports.yml" + importPath = "github.com/DrmagicE/gmqtt/plugin" +) + +type ymlCfg struct { + Packages []string `yaml:"packages"` +} + +func main() { + b, err := ioutil.ReadFile(pluginCfg) + if err != nil { + log.Fatalf("ReadFile error %s", err) + return + } + + var cfg ymlCfg + err = yaml.Unmarshal(b, &cfg) + if err != nil { + log.Fatalf("Unmarshal error: %s", err) + return + } + t, err := template.New("plugin_gen").Parse(tmpl) + if err != nil { + log.Fatalf("Parse template error: %s", err) + return + } + + for k, v := range cfg.Packages { + if !strings.Contains(v, "/") { + cfg.Packages[k] = importPath + "/" + v + } + } + + if err != nil && err != io.EOF { + log.Fatalf("read error: %s", err) + return + } + buf := &bytes.Buffer{} + err = t.Execute(buf, cfg.Packages) + if err != nil { + log.Fatalf("excute template error: %s", err) + return + } + rs, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatalf("format error: %s", err) + return + } + err = ioutil.WriteFile(pluginFile, rs, 0666) + if err != nil { + log.Fatalf("writeFile error: %s", err) + return + } + return +} diff --git a/plugin_imports.yml b/plugin_imports.yml new file mode 100644 index 00000000..1d7b64d0 --- /dev/null +++ b/plugin_imports.yml @@ -0,0 +1,7 @@ +packages: + - admin + - prometheus + - federation + - auth + # for external plugin, use full import path + # - github.com/DrmagicE/gmqtt/plugin/prometheus \ No newline at end of file diff --git a/server/client.go b/server/client.go index ca6589ef..9cff265d 100644 --- a/server/client.go +++ b/server/client.go @@ -1,4 +1,3 @@ -// Package server provides an MQTT v3.1.1 server library. package server import ( @@ -328,11 +327,8 @@ func (client *client) writePacket(packet packets.Packet) error { ) } } - err := client.packetWriter.WritePacket(packet) - if err != nil { - return err - } - return client.packetWriter.Flush() + + return client.packetWriter.WriteAndFlush(packet) } func (client *client) addServerQuota() { @@ -957,17 +953,20 @@ func (client *client) publishHandler(pub *packets.Publish) *codes.Error { var err error var topicMatched bool if !dup { + opts := defaultIterateOptions(msg.Topic) if srv.hooks.OnMsgArrived != nil { req := &MsgArrivedRequest{ - Publish: pub, - Message: msg, + Publish: pub, + Message: msg, + IterationOptions: opts, } err = srv.hooks.OnMsgArrived(context.Background(), client, req) msg = req.Message + opts = req.IterationOptions } if msg != nil && err == nil { srv.mu.Lock() - topicMatched = srv.deliverMessageHandler(client.opts.ClientID, msg) + topicMatched = srv.deliverMessageHandler(client.opts.ClientID, msg, opts) srv.mu.Unlock() } } diff --git a/server/client_test.go b/server/client_test.go index 5358f2fc..13e1f971 100644 --- a/server/client_test.go +++ b/server/client_test.go @@ -852,7 +852,7 @@ func TestClient_publishHandler_common(t *testing.T) { var deliverMessageCalled bool - srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message) (matched bool) { + srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message, options subscription.IterationOptions) (matched bool) { a.Equal(v.clientID, srcClientID) a.Equal(gmqtt.MessageFromPublish(v.in), msg) deliverMessageCalled = true @@ -973,7 +973,7 @@ func TestClient_publishHandler_retainedMessage(t *testing.T) { config: config.DefaultConfig(), retainedDB: retainedDB, } - srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message) (matched bool) { + srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message, options subscription.IterationOptions) (matched bool) { a.Equal(v.clientID, srcClientID) a.Equal(gmqtt.MessageFromPublish(v.in), msg) return v.topicMatched @@ -1079,7 +1079,7 @@ func TestClient_publishHandler_topicAlias(t *testing.T) { srv := &server{ config: config.DefaultConfig(), } - srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message) (matched bool) { + srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message, options subscription.IterationOptions) (matched bool) { a.Equal(v.clientID, srcClientID) a.Equal(gmqtt.MessageFromPublish(v.in), msg) return true @@ -1142,7 +1142,7 @@ func TestClient_publishHandler_matchTopicAlias(t *testing.T) { config: config.DefaultConfig(), } var deliveredMsg []*gmqtt.Message - srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message) (matched bool) { + srv.deliverMessageHandler = func(srcClientID string, msg *gmqtt.Message, options subscription.IterationOptions) (matched bool) { a.Equal("cid", srcClientID) deliveredMsg = append(deliveredMsg, msg) return true diff --git a/server/hook.go b/server/hook.go index eefac80a..e7899415 100644 --- a/server/hook.go +++ b/server/hook.go @@ -5,6 +5,7 @@ import ( "net" "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/persistence/subscription" "github.com/DrmagicE/gmqtt/pkg/packets" ) @@ -26,8 +27,38 @@ type Hooks struct { OnDelivered OnClosed OnMsgDropped + OnWillPublish + OnWillPublished } +// WillMsgRequest is the input param for OnWillPublish hook. +type WillMsgRequest struct { + // Message is the message that is going to send. + // The caller can edit this field to modify the will message. + // If nil, the broker will drop the message. + Message *gmqtt.Message + // IterationOptions is the same as MsgArrivedRequest.IterationOptions, + // see MsgArrivedRequest for details + IterationOptions subscription.IterationOptions +} + +// Drop drops the will message, so the message will not be delivered to any clients. +func (w *WillMsgRequest) Drop() { + w.Message = nil +} + +// OnWillPublish will be called before the client with the given clientID sending the will message. +// It provides the ability to modify the message before sending. +type OnWillPublish func(ctx context.Context, clientID string, req *WillMsgRequest) + +type OnWillPublishWrapper func(OnWillPublish) OnWillPublish + +// OnWillPublished will be called after the will message has been sent by the client. +// The msg param is immutable, DO NOT EDIT. +type OnWillPublished func(ctx context.Context, clientID string, msg *gmqtt.Message) + +type OnWillPublishedWrapper func(OnWillPublished) OnWillPublished + // OnAccept will be called after a new connection established in TCP server. // If returns false, the connection will be close directly. type OnAccept func(ctx context.Context, conn net.Conn) bool @@ -143,6 +174,18 @@ type MsgArrivedRequest struct { // Message is the message that is going to be passed to topic match process. // The caller can modify it. Message *gmqtt.Message + // IterationOptions provides the the ability to change the options of topic matching process. + // In most of cases, you don't need to modify it. + // The default value is: + // subscription.IterationOptions{ + // Type: subscription.TypeAll, + // MatchType: subscription.MatchFilter, + // TopicName: msg.Topic, + // } + // The user of this field is the federation plugin. + // It will change the Type from subscription.TypeAll to subscription.subscription.TypeAll ^ subscription.TypeShared + // that will prevent publishing the shared message to local client. + IterationOptions subscription.IterationOptions } // Drop drops the message, so the message will not be delivered to any clients. diff --git a/server/plugin.go b/server/plugin.go index d92c48dc..906461a1 100644 --- a/server/plugin.go +++ b/server/plugin.go @@ -23,6 +23,8 @@ type HookWrapper struct { OnClosedWrapper OnClosedWrapper OnAcceptWrapper OnAcceptWrapper OnStopWrapper OnStopWrapper + OnWillPublishWrapper OnWillPublishWrapper + OnWillPublishedWrapper OnWillPublishedWrapper } // NewPlugin is the constructor of a plugin. diff --git a/server/publish_service.go b/server/publish_service.go index 71db652b..7a050910 100644 --- a/server/publish_service.go +++ b/server/publish_service.go @@ -8,6 +8,6 @@ type publishService struct { func (p *publishService) Publish(message *gmqtt.Message) { p.server.mu.Lock() - p.server.deliverMessageHandler("", message) + p.server.deliverMessageHandler("", message, defaultIterateOptions(message.Topic)) p.server.mu.Unlock() } diff --git a/server/server.go b/server/server.go index f93bcd1b..5d5f6101 100644 --- a/server/server.go +++ b/server/server.go @@ -37,6 +37,14 @@ var ( persistenceFactories = make(map[string]NewPersistence) ) +func defaultIterateOptions(topicName string) subscription.IterationOptions { + return subscription.IterationOptions{ + Type: subscription.TypeAll, + TopicName: topicName, + MatchType: subscription.MatchFilter, + } +} + func RegisterPersistenceFactory(name string, new NewPersistence) { if _, ok := persistenceFactories[name]; ok { panic("duplicated persistence factory: " + name) @@ -174,21 +182,18 @@ type server struct { unackStore map[string]unack.Store sessionStore session.Store - // gard config - configMu sync.RWMutex - config config.Config - hooks Hooks - plugins []Plugin - - statsManager *statsManager - publishService Publisher - + // guards config + configMu sync.RWMutex + config config.Config + hooks Hooks + plugins []Plugin + statsManager *statsManager + publishService Publisher newTopicAliasManager NewTopicAliasManager // for testing - deliverMessageHandler func(srcClientID string, msg *gmqtt.Message) (matched bool) - - clientService *clientService - apiRegistrar *apiRegistrar + deliverMessageHandler func(srcClientID string, msg *gmqtt.Message, options subscription.IterationOptions) (matched bool) + clientService *clientService + apiRegistrar *apiRegistrar } func (srv *server) APIRegistrar() APIRegistrar { @@ -506,6 +511,24 @@ func (w *willMsg) signal(send bool) { } } +// sendWillLocked sends the will message for the client, this function must be guard by srv.Lock. +func (srv *server) sendWillLocked(msg *gmqtt.Message, clientID string) { + req := &WillMsgRequest{ + Message: msg, + } + if srv.hooks.OnWillPublish != nil { + srv.hooks.OnWillPublish(context.Background(), clientID, req) + } + // the will message is dropped + if req.Message == nil { + return + } + srv.deliverMessageHandler(clientID, msg, defaultIterateOptions(msg.Topic)) + if srv.hooks.OnWillPublished != nil { + srv.hooks.OnWillPublished(context.Background(), clientID, req.Message) + } +} + func (srv *server) unregisterClient(client *client) { if !client.IsConnected() { return @@ -524,6 +547,7 @@ func (srv *server) unregisterClient(client *client) { storeSession = true } } + // need to send will message if !client.cleanWillFlag && sess.Will != nil { willDelayInterval := sess.WillDelayInterval if sess.ExpiryInterval <= sess.WillDelayInterval { @@ -547,13 +571,14 @@ func (srv *server) unregisterClient(client *client) { } srv.mu.Lock() defer srv.mu.Unlock() - if send { - srv.deliverMessageHandler(clientID, msg) - } delete(srv.willMessage, clientID) + if !send { + return + } + srv.sendWillLocked(msg, clientID) }(client.opts.ClientID) } else { - srv.deliverMessageHandler(client.opts.ClientID, msg) + srv.sendWillLocked(msg, client.opts.ClientID) } } if storeSession { @@ -618,79 +643,109 @@ func (srv *server) addMsgToQueueLocked(now time.Time, clientID string, msg *gmqt } -// deliverMessage send msg to matched client, must call under srv.mu.Lock -func (srv *server) deliverMessage(srcClientID string, msg *gmqtt.Message) (matched bool) { - // subscriber (client id) list of shared subscriptions, key by share name. - sharedList := make(map[string][]struct { - clientID string - sub *gmqtt.Subscription - }) - // key by clientid - maxQos := make(map[string]*struct { - sub *gmqtt.Subscription - subIDs []uint32 - }) - now := time.Now() - // Iterate all matched topics - srv.subscriptionsDB.Iterate(func(clientID string, sub *gmqtt.Subscription) bool { +// sharedList is the subscriber (client id) list of shared subscriptions. (key by topic name). +type sharedList map[string][]struct { + clientID string + sub *gmqtt.Subscription +} + +// maxQos records the maximum qos subscription for the non-shared topic. (key by topic name). +type maxQos map[string]*struct { + sub *gmqtt.Subscription + subIDs []uint32 +} + +// deliverHandler controllers the delivery behaviors according to the DeliveryMode config. (overlap or onlyonce) +type deliverHandler struct { + fn subscription.IterateFn + sl sharedList + mq maxQos + matched bool + now time.Time + msg *gmqtt.Message + srv *server +} + +func newDeliverHandler(mode string, srcClientID string, msg *gmqtt.Message, now time.Time, srv *server) *deliverHandler { + d := &deliverHandler{ + sl: make(sharedList), + mq: make(maxQos), + msg: msg, + srv: srv, + now: now, + } + var iterateFn subscription.IterateFn + d.fn = func(clientID string, sub *gmqtt.Subscription) bool { if sub.NoLocal && clientID == srcClientID { return true } - matched = true - if qs := srv.queueStore[clientID]; qs != nil { - // shared - if sub.ShareName != "" { - sharedList[sub.ShareName] = append(sharedList[sub.ShareName], struct { - clientID string - sub *gmqtt.Subscription - }{clientID: clientID, sub: sub}) - } else { - if srv.config.MQTT.DeliveryMode == Overlap { - srv.addMsgToQueueLocked(now, clientID, msg.Copy(), sub, []uint32{sub.ID}, qs) - } else { - // OnlyOnce - if maxQos[clientID] == nil { - maxQos[clientID] = &struct { - sub *gmqtt.Subscription - subIDs []uint32 - }{sub: sub, subIDs: []uint32{sub.ID}} - } else { - if maxQos[clientID].sub.QoS < sub.QoS { - maxQos[clientID].sub = sub - } - maxQos[clientID].subIDs = append(maxQos[clientID].subIDs, sub.ID) - } - - } - } + d.matched = true + if sub.ShareName != "" { + fullTopic := sub.GetFullTopicName() + d.sl[fullTopic] = append(d.sl[fullTopic], struct { + clientID string + sub *gmqtt.Subscription + }{clientID: clientID, sub: sub}) + return true } - return true - }, subscription.IterationOptions{ - Type: subscription.TypeAll, - MatchType: subscription.MatchFilter, - TopicName: msg.Topic, - }) - if srv.config.MQTT.DeliveryMode == OnlyOnce { - for clientID, v := range maxQos { + return iterateFn(clientID, sub) + } + if mode == Overlap { + iterateFn = func(clientID string, sub *gmqtt.Subscription) bool { if qs := srv.queueStore[clientID]; qs != nil { - srv.addMsgToQueueLocked(now, clientID, msg.Copy(), v.sub, v.subIDs, qs) + srv.addMsgToQueueLocked(now, clientID, msg.Copy(), sub, []uint32{sub.ID}, qs) } + return true + } + } else { + iterateFn = func(clientID string, sub *gmqtt.Subscription) bool { + // If the delivery mode is onlyOnce, set the message qos to the maximum qos in matched subscriptions. + if d.mq[clientID] == nil { + d.mq[clientID] = &struct { + sub *gmqtt.Subscription + subIDs []uint32 + }{sub: sub, subIDs: []uint32{sub.ID}} + return true + } + if d.mq[clientID].sub.QoS < sub.QoS { + d.mq[clientID].sub = sub + } + d.mq[clientID].subIDs = append(d.mq[clientID].subIDs, sub.ID) + return true } } + return d +} + +func (d *deliverHandler) flush() { // shared subscription // TODO enable customize balance strategy of shared subscription - for _, v := range sharedList { + for _, v := range d.sl { var rs struct { clientID string sub *gmqtt.Subscription } // random rs = v[rand.Intn(len(v))] - if c, ok := srv.queueStore[rs.clientID]; ok { - srv.addMsgToQueueLocked(now, rs.clientID, msg.Copy(), rs.sub, []uint32{rs.sub.ID}, c) + if c, ok := d.srv.queueStore[rs.clientID]; ok { + d.srv.addMsgToQueueLocked(d.now, rs.clientID, d.msg.Copy(), rs.sub, []uint32{rs.sub.ID}, c) + } + } + // For onlyonce mode, send the non-shared messages. + for clientID, v := range d.mq { + if qs := d.srv.queueStore[clientID]; qs != nil { + d.srv.addMsgToQueueLocked(d.now, clientID, d.msg.Copy(), v.sub, v.subIDs, qs) } } - return +} + +// deliverMessage send msg to matched client, must call under srv.mu.Lock +func (srv *server) deliverMessage(srcClientID string, msg *gmqtt.Message, options subscription.IterationOptions) (matched bool) { + now := time.Now() + d := newDeliverHandler(srv.config.MQTT.DeliveryMode, srcClientID, msg, now, srv) + srv.subscriptionsDB.Iterate(d.fn, options) + d.flush() + return d.matched } func (srv *server) removeSessionLocked(clientID string) (err error) { @@ -1058,6 +1113,8 @@ func (srv *server) initPluginHooks() error { OnClosedWrappers []OnClosedWrapper onStopWrappers []OnStopWrapper onMsgDroppedWrappers []OnMsgDroppedWrapper + onWillPublishWrappers []OnWillPublishWrapper + onWillPublishedWrappers []OnWillPublishedWrapper ) for _, v := range srv.config.PluginOrder { plg, err := plugins[v](srv.config) @@ -1127,6 +1184,12 @@ func (srv *server) initPluginHooks() error { if hooks.OnStopWrapper != nil { onStopWrappers = append(onStopWrappers, hooks.OnStopWrapper) } + if hooks.OnWillPublishWrapper != nil { + onWillPublishWrappers = append(onWillPublishWrappers, hooks.OnWillPublishWrapper) + } + if hooks.OnWillPublishedWrapper != nil { + onWillPublishedWrappers = append(onWillPublishedWrappers, hooks.OnWillPublishedWrapper) + } } if onAcceptWrappers != nil { onAccept := func(ctx context.Context, conn net.Conn) bool { @@ -1255,6 +1318,20 @@ func (srv *server) initPluginHooks() error { } srv.hooks.OnMsgDropped = onMsgDropped } + if onWillPublishWrappers != nil { + onWillPublish := func(ctx context.Context, clientID string, req *WillMsgRequest) {} + for i := len(onWillPublishWrappers); i > 0; i-- { + onWillPublish = onWillPublishWrappers[i-1](onWillPublish) + } + srv.hooks.OnWillPublish = onWillPublish + } + if onWillPublishedWrappers != nil { + onWillPublished := func(ctx context.Context, clientID string, msg *gmqtt.Message) {} + for i := len(onWillPublishedWrappers); i > 0; i-- { + onWillPublished = onWillPublishedWrappers[i-1](onWillPublished) + } + srv.hooks.OnWillPublished = onWillPublished + } return nil } @@ -1308,7 +1385,7 @@ func (srv *server) Run() (err error) { for _, v := range srv.websocketServer { ws = append(ws, v.Server.Addr) } - zaplog.Info("starting gmqtt server", zap.Strings("tcp server listen on", tcps), zap.Strings("websocket server listen on", ws)) + zaplog.Info("gmqtt server started", zap.Strings("tcp server listen on", tcps), zap.Strings("websocket server listen on", ws)) srv.status = serverStatusStarted srv.wg.Add(2) diff --git a/server/server_test.go b/server/server_test.go new file mode 100644 index 00000000..9df8f6df --- /dev/null +++ b/server/server_test.go @@ -0,0 +1,154 @@ +package server + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/DrmagicE/gmqtt" + "github.com/DrmagicE/gmqtt/config" + "github.com/DrmagicE/gmqtt/persistence/queue" + "github.com/DrmagicE/gmqtt/persistence/subscription/mem" + "github.com/DrmagicE/gmqtt/pkg/packets" +) + +type testDeliverMsg struct { + srv *server +} + +func newTestDeliverMsg(ctrl *gomock.Controller, subscriber string) *testDeliverMsg { + sub := mem.NewStore() + srv := &server{ + subscriptionsDB: sub, + queueStore: make(map[string]queue.Store), + config: config.DefaultConfig(), + statsManager: newStatsManager(sub), + } + mockQueue := queue.NewMockStore(ctrl) + srv.queueStore[subscriber] = mockQueue + return &testDeliverMsg{ + srv: srv, + } +} + +func TestServer_deliverMessage(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + subscriber := "subCli" + ts := newTestDeliverMsg(ctrl, subscriber) + srcCli := "srcCli" + msg := &gmqtt.Message{ + Topic: "/abc", + Payload: []byte("abc"), + QoS: 2, + } + srv := ts.srv + srv.subscriptionsDB.Subscribe(subscriber, &gmqtt.Subscription{ + ShareName: "", + TopicFilter: "/abc", + QoS: 1, + }, &gmqtt.Subscription{ + ShareName: "", + TopicFilter: "/+", + QoS: 2, + }) + + mockQueue := srv.queueStore[subscriber].(*queue.MockStore) + // test only once + srv.config.MQTT.DeliveryMode = OnlyOnce + mockQueue.EXPECT().Add(gomock.Any()).Do(func(elem *queue.Elem) { + a.EqualValues(elem.MessageWithID.(*queue.Publish).QoS, 2) + }) + + a.True(srv.deliverMessage(srcCli, msg, defaultIterateOptions(msg.Topic))) + + // test overlap + srv.config.MQTT.DeliveryMode = Overlap + qos := map[byte]int{ + packets.Qos1: 0, + packets.Qos2: 0, + } + mockQueue.EXPECT().Add(gomock.Any()).Do(func(elem *queue.Elem) { + _, ok := qos[elem.MessageWithID.(*queue.Publish).QoS] + a.True(ok) + qos[elem.MessageWithID.(*queue.Publish).QoS]++ + }).Times(2) + + a.True(srv.deliverMessage(srcCli, msg, defaultIterateOptions(msg.Topic))) + + a.Equal(1, qos[packets.Qos1]) + a.Equal(1, qos[packets.Qos2]) + + msg = &gmqtt.Message{ + Topic: "abcd", + } + a.False(srv.deliverMessage(srcCli, msg, defaultIterateOptions(msg.Topic))) + +} + +func TestServer_deliverMessage_sharedSubscription(t *testing.T) { + a := assert.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + subscriber := "subCli" + ts := newTestDeliverMsg(ctrl, subscriber) + srcCli := "srcCli" + msg := &gmqtt.Message{ + Topic: "/abc", + Payload: []byte("abc"), + QoS: 2, + } + srv := ts.srv + // add 2 shared and 2 non-shared subscription which both match the message topic: /abc + srv.subscriptionsDB.Subscribe(subscriber, &gmqtt.Subscription{ + ShareName: "abc", + TopicFilter: "/abc", + QoS: 1, + }, &gmqtt.Subscription{ + ShareName: "abc", + TopicFilter: "/+", + QoS: 2, + }, &gmqtt.Subscription{ + TopicFilter: "#", + QoS: 2, + }, &gmqtt.Subscription{ + TopicFilter: "/abc", + QoS: 1, + }) + + mockQueue := srv.queueStore[subscriber].(*queue.MockStore) + // test only once + qos := map[byte]int{ + packets.Qos1: 0, + packets.Qos2: 0, + } + srv.config.MQTT.DeliveryMode = OnlyOnce + mockQueue.EXPECT().Add(gomock.Any()).Do(func(elem *queue.Elem) { + _, ok := qos[elem.MessageWithID.(*queue.Publish).QoS] + a.True(ok) + qos[elem.MessageWithID.(*queue.Publish).QoS]++ + + }).Times(3) + + a.True(srv.deliverMessage(srcCli, msg, defaultIterateOptions(msg.Topic))) + a.Equal(1, qos[packets.Qos1]) + a.Equal(2, qos[packets.Qos2]) + + // test overlap + srv.config.MQTT.DeliveryMode = Overlap + qos = map[byte]int{ + packets.Qos1: 0, + packets.Qos2: 0, + } + mockQueue.EXPECT().Add(gomock.Any()).Do(func(elem *queue.Elem) { + _, ok := qos[elem.MessageWithID.(*queue.Publish).QoS] + a.True(ok) + qos[elem.MessageWithID.(*queue.Publish).QoS]++ + }).Times(4) + a.True(srv.deliverMessage(srcCli, msg, defaultIterateOptions(msg.Topic))) + a.Equal(2, qos[packets.Qos1]) + a.Equal(2, qos[packets.Qos2]) + +}