diff --git a/README.md b/README.md
index 5dab8da4..3152bb79 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,12 @@
[![GitHub release](https://img.shields.io/github/release/ergo-services/ergo.svg)](https://github.com/ergo-services/ergo/releases/latest)
-[![Go Report Card](https://goreportcard.com/badge/github.com/ergo-services/ergo)](https://goreportcard.com/report/github.com/ergo-services/ergo)
+[![Gitbook Documentation](https://img.shields.io/badge/GitBook-Documentation-f37f40?style=plastic&logo=gitbook&logoColor=white&style=flat)](https://docs.ergo.services)
[![GoDoc](https://pkg.go.dev/badge/ergo-services/ergo)](https://pkg.go.dev/github.com/ergo-services/ergo)
[![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT)
[![Build Status](https://img.shields.io/github/workflow/status/ergo-services/ergo/TestLinuxWindowsMacOS)](https://github.com/ergo-services/ergo/actions/)
+[![Go Report Card](https://goreportcard.com/badge/github.com/ergo-services/ergo)](https://goreportcard.com/report/github.com/ergo-services/ergo)
+[![Slack Community](https://img.shields.io/badge/Slack-Community-3f0e40?style=flat&logo=slack)](https://ergoservices.slack.com)
Technologies and design patterns of Erlang/OTP have been proven over the years. Now in Golang.
Up to x5 times faster than original Erlang/OTP in terms of network messaging.
@@ -59,13 +61,13 @@ The goal of this project is to leverage Erlang/OTP experience with Golang perfor
### Versioning ###
-Golang introduced [v2 rule](https://go.dev/blog/v2-go-modules) a while ago to solve complicated dependency issues. We found this solution very controversial and there is still a lot of discussion around it. So, we decided to keep the old way for the versioning, but have to use the git tag versioning with v1 as a major version (due to "v2 rule" restrictions) . As a starting point for the v2.0.0 we use git tag v1.999.200. Since now, the only "patch version" will be increased for the next releases (e.g. v2.0.1 will be tagged in git as v.1.999.201 and so on, but never be above git tag v1.999 until the moment when Golang developers change the versioning approach)
+Golang introduced [v2 rule](https://go.dev/blog/v2-go-modules) a while ago to solve complicated dependency issues. We found this solution very controversial and there is still a lot of discussion around it. So, we decided to keep the old way for the versioning, but have to use the git tag with v1 as a major version (due to "v2 rule" restrictions) . As a starting point for the v2.0.0 we use git tag v1.999.200. Since now, the only "patch version" will be increased for the next releases (e.g. v2.0.1 will be tagged in git as v.1.999.201 and so on, but never be above git tag v1.999 until the moment when Golang developers change the versioning approach)
### Changelog ###
Here are the changes of latest release. For more details see the [ChangeLog](ChangeLog.md)
-#### [v2.0.0](https://github.com/ergo-services/ergo/releases/tag/v1.999.200) tag version v1.999.200 - 2021-10-12 ####
+#### [v2.0.0](https://github.com/ergo-services/ergo/releases/tag/v1.999.200) 2021-10-12 [tag version v1.999.200] ####
* Added support of Erlang/OTP 24 (including [Alias](https://blog.erlang.org/My-OTP-24-Highlights/#eep-53-process-aliases) feature and [Remote Spawn](https://blog.erlang.org/OTP-23-Highlights/#distributed-spawn-and-the-new-erpc-module) introduced in Erlang/OTP 23)
* **Important**: This release includes refined API (without backward compatibility) for a more convenient way to create OTP-designed microservices. Make sure to update your code.
diff --git a/cloud/cloudapp.go b/cloud/cloudapp.go
new file mode 100644
index 00000000..9d628d33
--- /dev/null
+++ b/cloud/cloudapp.go
@@ -0,0 +1,89 @@
+package cloud
+
+import (
+ "github.com/ergo-services/ergo/etf"
+ "github.com/ergo-services/ergo/gen"
+ "github.com/ergo-services/ergo/lib"
+ "github.com/ergo-services/ergo/node"
+)
+
+type CloudApp struct {
+ gen.Application
+ options node.Cloud
+}
+
+func CreateApp(options node.Cloud) gen.ApplicationBehavior {
+ return &CloudApp{
+ options: options,
+ }
+}
+
+func (ca *CloudApp) Load(args ...etf.Term) (gen.ApplicationSpec, error) {
+ return gen.ApplicationSpec{
+ Name: "cloud_app",
+ Description: "Ergo Cloud Support Application",
+ Version: "v.1.0",
+ Children: []gen.ApplicationChildSpec{
+ gen.ApplicationChildSpec{
+ Child: &cloudAppSup{},
+ Name: "cloud_app_sup",
+ },
+ },
+ }, nil
+}
+
+func (ca *CloudApp) Start(p gen.Process, args ...etf.Term) {
+ // add static route with custom handshake
+ // cloudHandshake = CreateCloudHandshake()
+ // node.AddStaticRoute("cloud.ergo.services", node.StaticRouteOptions)
+}
+
+type cloudAppSup struct {
+ gen.Supervisor
+}
+
+func (cas *cloudAppSup) Init(args ...etf.Term) (gen.SupervisorSpec, error) {
+ return gen.SupervisorSpec{
+ Children: []gen.SupervisorChildSpec{
+ gen.SupervisorChildSpec{
+ Name: "cloud_client",
+ Child: &cloudClient{},
+ },
+ },
+ Strategy: gen.SupervisorStrategy{
+ Type: gen.SupervisorStrategyOneForOne,
+ Intensity: 10,
+ Period: 5,
+ Restart: gen.SupervisorStrategyRestartPermanent,
+ },
+ }, nil
+}
+
+type cloudClient struct {
+ gen.Server
+}
+
+func (cc *cloudClient) Init(process *gen.ServerProcess, args ...etf.Term) error {
+ lib.Log("CLOUD_CLIENT: Init: %#v", args)
+ // initiate connection with the cloud
+ return nil
+}
+
+func (cc *cloudClient) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
+ lib.Log("CLOUD_CLIENT: HandleCall: %#v, From: %#v", message, from)
+ return nil, gen.ServerStatusOK
+}
+
+func (cc *cloudClient) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus {
+ lib.Log("CLOUD_CLIENT: HandleCast: %#v", message)
+ return gen.ServerStatusOK
+}
+
+func (cc *cloudClient) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus {
+ lib.Log("CLOUD_CLIENT: HandleInfo: %#v", message)
+ return gen.ServerStatusOK
+}
+func (cc *cloudClient) Terminate(process *gen.ServerProcess, reason string) {
+ lib.Log("CLOUD_CLIENT: Terminated with reason: %v", reason)
+ return
+}
diff --git a/cloud/handshake.go b/cloud/handshake.go
new file mode 100644
index 00000000..9ca2ff41
--- /dev/null
+++ b/cloud/handshake.go
@@ -0,0 +1,18 @@
+package cloud
+
+import (
+ "net"
+
+ "github.com/ergo-services/ergo/node"
+)
+
+type CloudHandshake struct {
+ node.Handshake
+}
+
+func (ch *CloudHandshake) Init(nodename string, creation uint32, enabledTLS bool) error {
+ return nil
+}
+func (ch *CloudHandshake) Start(c net.Conn) (node.ProtoOptions, error) {
+ return node.ProtoOptions{}, nil
+}
diff --git a/debug.go b/debug.go
index 8fd6347a..89b4e8a6 100644
--- a/debug.go
+++ b/debug.go
@@ -1,4 +1,5 @@
-//+build debug
+//go:build debug
+// +build debug
package ergo
diff --git a/ergo.go b/ergo.go
index a45555f0..fb571c72 100644
--- a/ergo.go
+++ b/ergo.go
@@ -3,9 +3,11 @@ package ergo
import (
"context"
+ "github.com/ergo-services/ergo/cloud"
"github.com/ergo-services/ergo/erlang"
"github.com/ergo-services/ergo/gen"
"github.com/ergo-services/ergo/node"
+ "github.com/ergo-services/ergo/proto/dist"
)
// StartNode create new node with name and cookie string
@@ -13,16 +15,46 @@ func StartNode(name string, cookie string, opts node.Options) (node.Node, error)
return StartNodeWithContext(context.Background(), name, cookie, opts)
}
-// CreateNodeWithContext create new node with specified context, name and cookie string
+// StartNodeWithContext create new node with specified context, name and cookie string
func StartNodeWithContext(ctx context.Context, name string, cookie string, opts node.Options) (node.Node, error) {
version := node.Version{
Release: Version,
Prefix: VersionPrefix,
OTP: VersionOTP,
}
+ if opts.Env == nil {
+ opts.Env = make(map[gen.EnvKey]interface{})
+ }
+ opts.Env[node.EnvKeyVersion] = version
// add erlang support application
opts.Applications = append([]gen.ApplicationBehavior{&erlang.KernelApp{}}, opts.Applications...)
- return node.StartWithContext(context.WithValue(ctx, "version", version), name, cookie, opts)
+ // add cloud support if it's enabled
+ if opts.Cloud.Enabled {
+ cloudApp := cloud.CreateApp(opts.Cloud)
+ opts.Applications = append([]gen.ApplicationBehavior{cloudApp}, opts.Applications...)
+ }
+
+ if opts.Handshake == nil {
+ handshakeOptions := dist.HandshakeOptions{
+ Cookie: cookie,
+ }
+ // create default handshake for the node (Erlang Dist Handshake)
+ opts.Handshake = dist.CreateHandshake(handshakeOptions)
+ }
+
+ if opts.Proto == nil {
+ // create default proto handler (Erlang Dist Proto)
+ protoOptions := node.DefaultProtoOptions()
+ protoOptions.Compression = opts.Compression
+ opts.Proto = dist.CreateProto(name, protoOptions)
+ }
+
+ if opts.StaticRoutesOnly == false && opts.Resolver == nil {
+ // create default resolver (with enabled Erlang EPMD server)
+ opts.Resolver = dist.CreateResolverWithEPMD(ctx, "", dist.DefaultEPMDPort)
+ }
+
+ return node.StartWithContext(ctx, name, cookie, opts)
}
diff --git a/erlang/appmon.go b/erlang/appmon.go
index 81925d14..4d912500 100644
--- a/erlang/appmon.go
+++ b/erlang/appmon.go
@@ -26,7 +26,6 @@ type jobDetails struct {
}
// Init initializes process state using arbitrary arguments
-// Init -> state
func (am *appMon) Init(process *gen.ServerProcess, args ...etf.Term) error {
lib.Log("APP_MON: Init %#v", args)
from := args[0]
@@ -37,10 +36,11 @@ func (am *appMon) Init(process *gen.ServerProcess, args ...etf.Term) error {
return nil
}
+// HandleCast
func (am *appMon) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus {
var appState *appMonState = process.State.(*appMonState)
lib.Log("APP_MON: HandleCast: %#v", message)
- node := process.Env("ergo:Node").(node.Node)
+ node := process.Env(node.EnvKeyNode).(node.Node)
switch message {
case "sendStat":
@@ -137,7 +137,7 @@ func (am *appMon) HandleCast(process *gen.ServerProcess, message etf.Term) gen.S
}
func (am *appMon) makeAppTree(process gen.Process, app etf.Atom) etf.Tuple {
- node := process.Env("ergo:Node").(node.Node)
+ node := process.Env(node.EnvKeyNode).(node.Node)
appInfo, err := node.ApplicationInfo(string(app))
if err != nil {
return nil
diff --git a/erlang/erlang.go b/erlang/erlang.go
index f2e924df..bd02ade2 100644
--- a/erlang/erlang.go
+++ b/erlang/erlang.go
@@ -12,11 +12,13 @@ type erlang struct {
gen.Server
}
+// Init
func (e *erlang) Init(process *gen.ServerProcess, args ...etf.Term) error {
lib.Log("ERLANG: Init: %#v", args)
return nil
}
+// HandleCall
func (e *erlang) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
lib.Log("ERLANG: HandleCall: %#v, From: %#v", message, from)
diff --git a/erlang/global_name_server.go b/erlang/global_name_server.go
index 5acb21e8..e1099654 100644
--- a/erlang/global_name_server.go
+++ b/erlang/global_name_server.go
@@ -11,6 +11,7 @@ type globalNameServer struct {
gen.Server
}
+// HandleCast
func (gns *globalNameServer) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus {
return gen.ServerStatusOK
}
diff --git a/erlang/net_kernel.go b/erlang/net_kernel.go
index d11816b8..a4552f62 100644
--- a/erlang/net_kernel.go
+++ b/erlang/net_kernel.go
@@ -13,17 +13,19 @@ import (
"github.com/ergo-services/ergo/lib/osdep"
)
+// KernelApp
type KernelApp struct {
gen.Application
}
+// Load
func (nka *KernelApp) Load(args ...etf.Term) (gen.ApplicationSpec, error) {
return gen.ApplicationSpec{
Name: "erlang",
Description: "Erlang support app",
Version: "v.1.0",
Children: []gen.ApplicationChildSpec{
- gen.ApplicationChildSpec{
+ {
Child: &netKernelSup{},
Name: "net_kernel_sup",
},
@@ -31,32 +33,34 @@ func (nka *KernelApp) Load(args ...etf.Term) (gen.ApplicationSpec, error) {
}, nil
}
+// Start
func (nka *KernelApp) Start(p gen.Process, args ...etf.Term) {}
type netKernelSup struct {
gen.Supervisor
}
+// Init
func (nks *netKernelSup) Init(args ...etf.Term) (gen.SupervisorSpec, error) {
return gen.SupervisorSpec{
Children: []gen.SupervisorChildSpec{
- gen.SupervisorChildSpec{
+ {
Name: "net_kernel",
Child: &netKernel{},
},
- gen.SupervisorChildSpec{
+ {
Name: "global_name_server",
Child: &globalNameServer{},
},
- gen.SupervisorChildSpec{
+ {
Name: "rex",
Child: &rex{},
},
- gen.SupervisorChildSpec{
+ {
Name: "observer_backend",
Child: &observerBackend{},
},
- gen.SupervisorChildSpec{
+ {
Name: "erlang",
Child: &erlang{},
},
@@ -75,12 +79,14 @@ type netKernel struct {
routinesCtx map[etf.Pid]context.CancelFunc
}
+// Init
func (nk *netKernel) Init(process *gen.ServerProcess, args ...etf.Term) error {
lib.Log("NET_KERNEL: Init: %#v", args)
nk.routinesCtx = make(map[etf.Pid]context.CancelFunc)
return nil
}
+// HandleCall
func (nk *netKernel) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (reply etf.Term, status gen.ServerStatus) {
lib.Log("NET_KERNEL: HandleCall: %#v, From: %#v", message, from)
status = gen.ServerStatusOK
@@ -124,6 +130,7 @@ func (nk *netKernel) HandleCall(process *gen.ServerProcess, from gen.ServerFrom,
return
}
+// HandleInfo
func (nk *netKernel) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus {
lib.Log("NET_KERNEL: HandleInfo: %#v", message)
switch m := message.(type) {
diff --git a/erlang/observer_backend.go b/erlang/observer_backend.go
index 0598ee3c..bb72644a 100644
--- a/erlang/observer_backend.go
+++ b/erlang/observer_backend.go
@@ -19,14 +19,13 @@ type observerBackend struct {
}
// Init initializes process state using arbitrary arguments
-// Init(...) -> state
func (o *observerBackend) Init(process *gen.ServerProcess, args ...etf.Term) error {
lib.Log("OBSERVER: Init: %#v", args)
funProcLibInitialCall := func(a ...etf.Term) etf.Term {
return etf.Tuple{etf.Atom("proc_lib"), etf.Atom("init_p"), 5}
}
- node := process.Env("ergo:Node").(node.Node)
+ node := process.Env(node.EnvKeyNode).(node.Node)
node.ProvideRPC("proc_lib", "translate_initial_call", funProcLibInitialCall)
funAppmonInfo := func(a ...etf.Term) etf.Term {
@@ -42,6 +41,7 @@ func (o *observerBackend) Init(process *gen.ServerProcess, args ...etf.Term) err
return nil
}
+// HandleCall
func (o *observerBackend) HandleCall(state *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
lib.Log("OBSERVER: HandleCall: %v, From: %#v", message, from)
function := message.(etf.Tuple).Element(1).(etf.Atom)
@@ -68,7 +68,7 @@ func (o *observerBackend) HandleCall(state *gen.ServerProcess, from gen.ServerFr
func (o *observerBackend) sysInfo(p gen.Process) etf.List {
// observer_backend:sys_info()
- node := p.Env("ergo:Node").(node.Node)
+ node := p.Env(node.EnvKeyNode).(node.Node)
processCount := etf.Tuple{etf.Atom("process_count"), len(p.ProcessList())}
processLimit := etf.Tuple{etf.Atom("process_limit"), 262144}
atomCount := etf.Tuple{etf.Atom("atom_count"), 0}
diff --git a/erlang/rex.go b/erlang/rex.go
index 80fa975b..27db92b0 100644
--- a/erlang/rex.go
+++ b/erlang/rex.go
@@ -28,6 +28,7 @@ type rex struct {
methods map[modFun]gen.RPC
}
+// Init
func (r *rex) Init(process *gen.ServerProcess, args ...etf.Term) error {
lib.Log("REX: Init: %#v", args)
// Do not overwrite existing methods if this process restarted
@@ -42,11 +43,12 @@ func (r *rex) Init(process *gen.ServerProcess, args ...etf.Term) error {
}
r.methods[mf] = nil
}
- node := process.Env("ergo:Node").(node.Node)
+ node := process.Env(node.EnvKeyNode).(node.Node)
node.ProvideRemoteSpawn("erpc", &erpc{})
return nil
}
+// HandleCall
func (r *rex) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
lib.Log("REX: HandleCall: %#v, From: %#v", message, from)
switch m := message.(type) {
@@ -63,7 +65,7 @@ func (r *rex) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, messag
return reply, gen.ServerStatusOK
}
- to := gen.ProcessID{string(module), process.NodeName()}
+ to := gen.ProcessID{Name: string(module), Node: process.NodeName()}
m := etf.Tuple{m.Element(3), m.Element(4)}
reply, err := process.Call(to, m)
if err != nil {
@@ -78,11 +80,13 @@ func (r *rex) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, messag
return reply, gen.ServerStatusOK
}
+// HandleInfo
func (r *rex) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus {
// add this handler to suppres any messages from erlang
return gen.ServerStatusOK
}
+// HandleDirect
func (r *rex) HandleDirect(process *gen.ServerProcess, message interface{}) (interface{}, error) {
switch m := message.(type) {
case gen.MessageManageRPC:
@@ -179,6 +183,7 @@ type erpc struct {
gen.Server
}
+// Init
func (e *erpc) Init(process *gen.ServerProcess, args ...etf.Term) error {
lib.Log("ERPC [%v]: Init: %#v", process.Self(), args)
mfa := erpcMFA{
@@ -192,6 +197,7 @@ func (e *erpc) Init(process *gen.ServerProcess, args ...etf.Term) error {
}
+// HandleCast
func (e *erpc) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus {
lib.Log("ERPC [%v]: HandleCast: %#v", process.Self(), message)
mfa := message.(erpcMFA)
diff --git a/etf/cache.go b/etf/cache.go
index 2dc2c27f..1da75ea5 100644
--- a/etf/cache.go
+++ b/etf/cache.go
@@ -1,7 +1,6 @@
package etf
import (
- "context"
"sync"
)
@@ -9,20 +8,24 @@ const (
maxCacheItems = int16(2048)
)
+// AtomCache
type AtomCache struct {
cacheMap map[Atom]int16
update chan Atom
+ stop chan struct{}
lastID int16
cacheList [maxCacheItems]Atom
sync.Mutex
}
+// CacheItem
type CacheItem struct {
ID int16
Encoded bool
Name Atom
}
+// ListAtomCache
type ListAtomCache struct {
L []CacheItem
original []CacheItem
@@ -41,6 +44,7 @@ var (
}
)
+// Append
func (a *AtomCache) Append(atom Atom) {
a.Lock()
id := a.lastID
@@ -51,6 +55,7 @@ func (a *AtomCache) Append(atom Atom) {
// otherwise ignore
}
+// GetLastID
func (a *AtomCache) GetLastID() int16 {
a.Lock()
id := a.lastID
@@ -58,12 +63,18 @@ func (a *AtomCache) GetLastID() int16 {
return id
}
-func NewAtomCache(ctx context.Context) *AtomCache {
+func (a *AtomCache) Stop() {
+ close(a.stop)
+}
+
+// StartAtomCache
+func StartAtomCache() *AtomCache {
var id int16
a := &AtomCache{
cacheMap: make(map[Atom]int16),
update: make(chan Atom, 100),
+ stop: make(chan struct{}, 1),
lastID: -1,
}
@@ -84,7 +95,7 @@ func NewAtomCache(ctx context.Context) *AtomCache {
a.lastID = id
a.Unlock()
- case <-ctx.Done():
+ case <-a.stop:
return
}
}
@@ -93,6 +104,7 @@ func NewAtomCache(ctx context.Context) *AtomCache {
return a
}
+// List
func (a *AtomCache) List() [maxCacheItems]Atom {
a.Lock()
l := a.cacheList
@@ -100,22 +112,29 @@ func (a *AtomCache) List() [maxCacheItems]Atom {
return l
}
+// ListSince
func (a *AtomCache) ListSince(id int16) []Atom {
return a.cacheList[id:]
}
+// TakeListAtomCache
func TakeListAtomCache() *ListAtomCache {
return listAtomCachePool.Get().(*ListAtomCache)
}
+// ReleaseListAtomCache
func ReleaseListAtomCache(l *ListAtomCache) {
l.L = l.original[:0]
listAtomCachePool.Put(l)
}
+
+// Reset
func (l *ListAtomCache) Reset() {
l.L = l.original[:0]
l.HasLongAtom = false
}
+
+// Append
func (l *ListAtomCache) Append(a CacheItem) {
l.L = append(l.L, a)
if !a.Encoded && len(a.Name) > 255 {
@@ -123,6 +142,7 @@ func (l *ListAtomCache) Append(a CacheItem) {
}
}
+// Len
func (l *ListAtomCache) Len() int {
return len(l.L)
}
diff --git a/etf/cache_test.go b/etf/cache_test.go
index e7c04152..abbf53b3 100644
--- a/etf/cache_test.go
+++ b/etf/cache_test.go
@@ -1,7 +1,6 @@
package etf
import (
- "context"
"reflect"
"testing"
"time"
@@ -9,9 +8,8 @@ import (
func TestAtomCache(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- a := NewAtomCache(ctx)
+ a := StartAtomCache()
+ defer a.Stop()
a.Append(Atom("test1"))
time.Sleep(100 * time.Millisecond)
diff --git a/etf/decode.go b/etf/decode.go
index b7a36802..0ffaacc1 100644
--- a/etf/decode.go
+++ b/etf/decode.go
@@ -55,12 +55,12 @@ var (
errInternal = fmt.Errorf("Internal error")
)
+// DecodeOptions
type DecodeOptions struct {
- FlagV4NC bool
- FlagBigCreation bool
+ FlagBigPidRef bool
}
-// stackless implementation is speeding up it up to x25 times
+// stackless implementation is speeding up decoding function up to x25 times
// it might looks hard to understand the logic, but
// there are only two stages
@@ -69,6 +69,7 @@ type DecodeOptions struct {
//
// see comments within this function
+// Decode
func Decode(packet []byte, cache []Atom, options DecodeOptions) (retTerm Term, retByte []byte, retErr error) {
var term Term
var stack *stackElement
@@ -525,7 +526,7 @@ func Decode(packet []byte, cache []Atom, options DecodeOptions) (retTerm Term, r
id := uint64(binary.BigEndian.Uint32(packet[:4]))
serial := uint64(binary.BigEndian.Uint32(packet[4:8]))
- if options.FlagV4NC {
+ if options.FlagBigPidRef {
id = id | (serial << 32)
} else {
// id 15 bits only 2**15 - 1 = 32767
@@ -554,7 +555,7 @@ func Decode(packet []byte, cache []Atom, options DecodeOptions) (retTerm Term, r
Node: name,
Creation: binary.BigEndian.Uint32(packet[8:12]),
}
- if options.FlagV4NC {
+ if options.FlagBigPidRef {
id = id | (serial << 32)
} else {
// id 15 bits only 2**15 - 1 = 32767
@@ -578,7 +579,7 @@ func Decode(packet []byte, cache []Atom, options DecodeOptions) (retTerm Term, r
if l > 5 {
return nil, nil, errMalformedRef
}
- if l > 3 && !options.FlagV4NC {
+ if l > 3 && !options.FlagBigPidRef {
return nil, nil, errMalformedRef
}
stack.tmp = nil
@@ -621,7 +622,7 @@ func Decode(packet []byte, cache []Atom, options DecodeOptions) (retTerm Term, r
if l > 5 {
return nil, nil, errMalformedRef
}
- if l > 3 && !options.FlagV4NC {
+ if l > 3 && !options.FlagBigPidRef {
return nil, nil, errMalformedRef
}
stack.tmp = nil
diff --git a/etf/encode.go b/etf/encode.go
index e0c5f965..a5da75b5 100644
--- a/etf/encode.go
+++ b/etf/encode.go
@@ -21,26 +21,28 @@ var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
)
+// EncodeOptions
type EncodeOptions struct {
LinkAtomCache *AtomCache
WriterAtomCache map[Atom]CacheItem
EncodingAtomCache *ListAtomCache
- // FlagV4NC The node accepts a larger amount of data in pids
+ // FlagBigPidRef The node accepts a larger amount of data in pids
// and references (node container types version 4).
// In the pid case full 32-bit ID and Serial fields in NEW_PID_EXT
// and in the reference case up to 5 32-bit ID words are now
// accepted in NEWER_REFERENCE_EXT. Introduced in OTP 24.
- FlagV4NC bool
+ FlagBigPidRef bool
// FlagBigCreation The node understands big node creation tags NEW_PID_EXT,
// NEWER_REFERENCE_EXT.
FlagBigCreation bool
}
+// Encode
func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) {
defer func() {
- // We should catch any panic happend during encoding Golang types.
+ // We should catch any panic happened during encoding Golang types.
if r := recover(); r != nil {
retErr = fmt.Errorf("%v", r)
}
@@ -62,6 +64,7 @@ func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) {
// 3. if found
// add options.EncodingAtomCache[i] = CacheItem, where i is just a counter
// within this encoding process.
+
// encode atom as ettCacheRef with value = i
for {
@@ -101,20 +104,20 @@ func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) {
buf := b.Extend(9)
- // ID a 32-bit big endian unsigned integer. If distribution
- // flag DFLAG_V4_NC is not set, only 15 bits may be used
+ // ID a 32-bit big endian unsigned integer.
+ // If FlagBigPidRef is not set, only 15 bits may be used
// and the rest must be 0.
- if options.FlagV4NC {
+ if options.FlagBigPidRef {
binary.BigEndian.PutUint32(buf[:4], uint32(p.ID))
} else {
// 15 bits only 2**15 - 1 = 32767
binary.BigEndian.PutUint32(buf[:4], uint32(p.ID)&32767)
}
- // Serial a 32-bit big endian unsigned integer. If distribution
- // flag DFLAG_V4_NC is not set, only 13 bits may be used
+ // Serial a 32-bit big endian unsigned integer.
+ // If distribution FlagBigPidRef is not set, only 13 bits may be used
// and the rest must be 0.
- if options.FlagV4NC {
+ if options.FlagBigPidRef {
binary.BigEndian.PutUint32(buf[4:8], uint32(p.ID>>32))
} else {
// 13 bits only 2**13 - 1 = 8191
@@ -138,14 +141,14 @@ func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) {
buf := b.Extend(12)
// ID
- if options.FlagV4NC {
+ if options.FlagBigPidRef {
binary.BigEndian.PutUint32(buf[:4], uint32(p.ID))
} else {
// 15 bits only 2**15 - 1 = 32767
binary.BigEndian.PutUint32(buf[:4], uint32(p.ID)&32767)
}
// Serial
- if options.FlagV4NC {
+ if options.FlagBigPidRef {
binary.BigEndian.PutUint32(buf[4:8], uint32(p.ID>>32))
} else {
// 13 bits only 2**13 - 1 = 8191
@@ -165,9 +168,6 @@ func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) {
}
lenID := 3
- if options.FlagV4NC {
- lenID = 5
- }
buf := b.Extend(1 + lenID*4)
// Only one byte long and only two bits are significant, the rest must be 0.
buf[0] = byte(r.Creation & 3)
@@ -194,10 +194,10 @@ func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) {
break
}
- lenID := 3
- // FIXME Erlang 24 has a bug https://github.com/erlang/otp/issues/5097
+ // // FIXME Erlang 24 has a bug https://github.com/erlang/otp/issues/5097
// uncomment once they fix it
- //if options.FlagV4NC {
+ lenID := 3
+ //if options.FlagBigPidRef {
// lenID = 5
//}
buf := b.Extend(4 + lenID*4)
@@ -609,11 +609,11 @@ func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) {
}
// LEN a 16-bit big endian unsigned integer not larger
- // than 5 when the DFLAG_V4_NC has been set; otherwise not larger than 3.
+ // than 5 when the FlagBigPidRef has been set; otherwise not larger than 3.
// FIXME Erlang 24 has a bug https://github.com/erlang/otp/issues/5097
// uncomment once they fix it
- //if options.FlagV4NC {
+ //if options.FlagBigPidRef {
// binary.BigEndian.PutUint16(buf[1:3], 5)
//} else {
binary.BigEndian.PutUint16(buf[1:3], 3)
diff --git a/etf/encode_test.go b/etf/encode_test.go
index 553f5807..9ce4e8f6 100644
--- a/etf/encode_test.go
+++ b/etf/encode_test.go
@@ -1,7 +1,6 @@
package etf
import (
- "context"
"fmt"
"math/big"
"reflect"
@@ -27,13 +26,11 @@ func TestEncodeBoolWithAtomCache(t *testing.T) {
b := lib.TakeBuffer()
defer lib.ReleaseBuffer(b)
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
ci := CacheItem{ID: 499, Encoded: true, Name: "false"}
writerAtomCache["false"] = ci
@@ -75,71 +72,71 @@ func integerCases() []integerCase {
//
// unsigned integers
//
- integerCase{"uint8::255", uint8(255), []byte{ettSmallInteger, 255}},
- integerCase{"uint16::255", uint16(255), []byte{ettSmallInteger, 255}},
- integerCase{"uint32::255", uint32(255), []byte{ettSmallInteger, 255}},
- integerCase{"uint64::255", uint64(255), []byte{ettSmallInteger, 255}},
- integerCase{"uint::255", uint(255), []byte{ettSmallInteger, 255}},
+ {"uint8::255", uint8(255), []byte{ettSmallInteger, 255}},
+ {"uint16::255", uint16(255), []byte{ettSmallInteger, 255}},
+ {"uint32::255", uint32(255), []byte{ettSmallInteger, 255}},
+ {"uint64::255", uint64(255), []byte{ettSmallInteger, 255}},
+ {"uint::255", uint(255), []byte{ettSmallInteger, 255}},
- integerCase{"uint16::256", uint16(256), []byte{ettInteger, 0, 0, 1, 0}},
+ {"uint16::256", uint16(256), []byte{ettInteger, 0, 0, 1, 0}},
- integerCase{"uint16::65535", uint16(65535), []byte{ettInteger, 0, 0, 255, 255}},
- integerCase{"uint32::65535", uint32(65535), []byte{ettInteger, 0, 0, 255, 255}},
- integerCase{"uint64::65535", uint64(65535), []byte{ettInteger, 0, 0, 255, 255}},
+ {"uint16::65535", uint16(65535), []byte{ettInteger, 0, 0, 255, 255}},
+ {"uint32::65535", uint32(65535), []byte{ettInteger, 0, 0, 255, 255}},
+ {"uint64::65535", uint64(65535), []byte{ettInteger, 0, 0, 255, 255}},
- integerCase{"uint64::65536", uint64(65536), []byte{ettInteger, 0, 1, 0, 0}},
+ {"uint64::65536", uint64(65536), []byte{ettInteger, 0, 1, 0, 0}},
// treat as an int32
- integerCase{"uint32::2147483647", uint32(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
- integerCase{"uint64::2147483647", uint64(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
- integerCase{"uint64::2147483648", uint64(2147483648), []byte{ettSmallBig, 4, 0, 0, 0, 0, 128}},
+ {"uint32::2147483647", uint32(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
+ {"uint64::2147483647", uint64(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
+ {"uint64::2147483648", uint64(2147483648), []byte{ettSmallBig, 4, 0, 0, 0, 0, 128}},
- integerCase{"uint32::4294967295", uint32(4294967295), []byte{ettSmallBig, 4, 0, 255, 255, 255, 255}},
- integerCase{"uint64::4294967295", uint64(4294967295), []byte{ettSmallBig, 4, 0, 255, 255, 255, 255}},
- integerCase{"uint64::4294967296", uint64(4294967296), []byte{ettSmallBig, 5, 0, 0, 0, 0, 0, 1}},
+ {"uint32::4294967295", uint32(4294967295), []byte{ettSmallBig, 4, 0, 255, 255, 255, 255}},
+ {"uint64::4294967295", uint64(4294967295), []byte{ettSmallBig, 4, 0, 255, 255, 255, 255}},
+ {"uint64::4294967296", uint64(4294967296), []byte{ettSmallBig, 5, 0, 0, 0, 0, 0, 1}},
- integerCase{"uint64::18446744073709551615", uint64(18446744073709551615), []byte{ettSmallBig, 8, 0, 255, 255, 255, 255, 255, 255, 255, 255}},
+ {"uint64::18446744073709551615", uint64(18446744073709551615), []byte{ettSmallBig, 8, 0, 255, 255, 255, 255, 255, 255, 255, 255}},
//
// signed integers
//
// negative is always ettInteger for the numbers within the range of int32
- integerCase{"int8::-127", int8(-127), []byte{ettInteger, 255, 255, 255, 129}},
- integerCase{"int16::-127", int16(-127), []byte{ettInteger, 255, 255, 255, 129}},
- integerCase{"int32::-127", int32(-127), []byte{ettInteger, 255, 255, 255, 129}},
- integerCase{"int64::-127", int64(-127), []byte{ettInteger, 255, 255, 255, 129}},
- integerCase{"int::-127", int(-127), []byte{ettInteger, 255, 255, 255, 129}},
+ {"int8::-127", int8(-127), []byte{ettInteger, 255, 255, 255, 129}},
+ {"int16::-127", int16(-127), []byte{ettInteger, 255, 255, 255, 129}},
+ {"int32::-127", int32(-127), []byte{ettInteger, 255, 255, 255, 129}},
+ {"int64::-127", int64(-127), []byte{ettInteger, 255, 255, 255, 129}},
+ {"int::-127", int(-127), []byte{ettInteger, 255, 255, 255, 129}},
// positive within range of int8 treats as ettSmallInteger
- integerCase{"int8::127", int8(127), []byte{ettSmallInteger, 127}},
- integerCase{"int16::127", int16(127), []byte{ettSmallInteger, 127}},
- integerCase{"int32::127", int32(127), []byte{ettSmallInteger, 127}},
- integerCase{"int64::127", int64(127), []byte{ettSmallInteger, 127}},
+ {"int8::127", int8(127), []byte{ettSmallInteger, 127}},
+ {"int16::127", int16(127), []byte{ettSmallInteger, 127}},
+ {"int32::127", int32(127), []byte{ettSmallInteger, 127}},
+ {"int64::127", int64(127), []byte{ettSmallInteger, 127}},
// a positive int[16,32,64] value within the range of uint8 treats as an uint8
- integerCase{"int16::128", int16(128), []byte{ettSmallInteger, 128}},
- integerCase{"int32::128", int32(128), []byte{ettSmallInteger, 128}},
- integerCase{"int64::128", int64(128), []byte{ettSmallInteger, 128}},
- integerCase{"int::128", int(128), []byte{ettSmallInteger, 128}},
+ {"int16::128", int16(128), []byte{ettSmallInteger, 128}},
+ {"int32::128", int32(128), []byte{ettSmallInteger, 128}},
+ {"int64::128", int64(128), []byte{ettSmallInteger, 128}},
+ {"int::128", int(128), []byte{ettSmallInteger, 128}},
// whether its positive or negative value within the range of int16 its treating as an int32
- integerCase{"int16::-32767", int16(-32767), []byte{ettInteger, 255, 255, 128, 1}},
- integerCase{"int16::32767", int16(32767), []byte{ettInteger, 0, 0, 127, 255}},
+ {"int16::-32767", int16(-32767), []byte{ettInteger, 255, 255, 128, 1}},
+ {"int16::32767", int16(32767), []byte{ettInteger, 0, 0, 127, 255}},
// treat as an int32
- integerCase{"int32::2147483647", int32(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
- integerCase{"int32::-2147483648", int32(-2147483648), []byte{ettInteger, 128, 0, 0, 0}},
- integerCase{"int64::2147483647", int64(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
- integerCase{"int64::-2147483648", int64(-2147483648), []byte{ettInteger, 128, 0, 0, 0}},
+ {"int32::2147483647", int32(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
+ {"int32::-2147483648", int32(-2147483648), []byte{ettInteger, 128, 0, 0, 0}},
+ {"int64::2147483647", int64(2147483647), []byte{ettInteger, 127, 255, 255, 255}},
+ {"int64::-2147483648", int64(-2147483648), []byte{ettInteger, 128, 0, 0, 0}},
- integerCase{"int64::2147483648", int64(2147483648), []byte{ettSmallBig, 4, 0, 0, 0, 0, 128}},
+ {"int64::2147483648", int64(2147483648), []byte{ettSmallBig, 4, 0, 0, 0, 0, 128}},
// int64 treats as ettSmallBig whether its positive or negative
- integerCase{"int64::9223372036854775807", int64(9223372036854775807), []byte{ettSmallBig, 8, 0, 255, 255, 255, 255, 255, 255, 255, 127}},
- integerCase{"int64::-9223372036854775808", int64(-9223372036854775808), []byte{ettSmallBig, 8, 1, 0, 0, 0, 0, 0, 0, 0, 128}},
+ {"int64::9223372036854775807", int64(9223372036854775807), []byte{ettSmallBig, 8, 0, 255, 255, 255, 255, 255, 255, 255, 127}},
+ {"int64::-9223372036854775808", int64(-9223372036854775808), []byte{ettSmallBig, 8, 1, 0, 0, 0, 0, 0, 0, 0, 128}},
- integerCase{"big.int::-9223372036854775807123456789", bigIntNegative, []byte{ettSmallBig, 12, 1, 21, 3, 193, 203, 255, 255, 255, 255, 255, 100, 205, 29}},
+ {"big.int::-9223372036854775807123456789", bigIntNegative, []byte{ettSmallBig, 12, 1, 21, 3, 193, 203, 255, 255, 255, 255, 255, 100, 205, 29}},
}
}
@@ -260,13 +257,12 @@ func TestEncodeAtomWithCache(t *testing.T) {
b := lib.TakeBuffer()
defer lib.ReleaseBuffer(b)
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
ci := CacheItem{ID: 2020, Encoded: true, Name: "cached atom"}
writerAtomCache["cached atom"] = ci
@@ -641,7 +637,7 @@ func TestEncodeStructWithTags(t *testing.T) {
Key1: "Hello World!",
Key2: []*Charlist{&value2, &value3, &value4},
Key3: &nested,
- Key4: [][]*Charlist{[]*Charlist{&value2, &value3, &value4}, []*Charlist{&value2, &value3, &value4}},
+ Key4: [][]*Charlist{{&value2, &value3, &value4}, {&value2, &value3, &value4}},
}
err := Encode(term, b, EncodeOptions{})
if err != nil {
@@ -658,7 +654,7 @@ func TestEncodePid(t *testing.T) {
b := lib.TakeBuffer()
defer lib.ReleaseBuffer(b)
- // V4NC disabled. max value for ID (15 bits), serial 0
+ // FlagBigPidRef disabled. max value for ID (15 bits), serial 0
expected := []byte{ettPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50,
55, 46, 48, 46, 48, 46, 49, 0, 0, 127, 255, 0, 0, 0, 0, 2}
term := Pid{Node: "erl-demo@127.0.0.1", ID: 32767, Creation: 2}
@@ -674,7 +670,7 @@ func TestEncodePid(t *testing.T) {
t.Fatal("incorrect value")
}
- // V4NC disabled. overflowed 15 bit. ID 0, serial 1
+ // FlagBigPidRef disabled. overflowed 15 bit. ID 0, serial 1
b.Reset()
expected = []byte{ettPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50,
55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 0, 0, 0, 0, 1, 2}
@@ -691,7 +687,7 @@ func TestEncodePid(t *testing.T) {
t.Fatal("incorrect value")
}
- // BigCreation, V4NC enabled. max value for ID (32 bits), serial 0
+ // BigCreation, FlagBigPidRef enabled. max value for ID (32 bits), serial 0
b.Reset()
expected = []byte{ettNewPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50,
55, 46, 48, 46, 48, 46, 49, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 2}
@@ -699,7 +695,7 @@ func TestEncodePid(t *testing.T) {
options := EncodeOptions{
FlagBigCreation: true,
- FlagV4NC: true,
+ FlagBigPidRef: true,
}
err = Encode(term, b, options)
if err != nil {
@@ -712,7 +708,7 @@ func TestEncodePid(t *testing.T) {
t.Fatal("incorrect value")
}
- // BigCreation, V4NC enabled. max value for ID (32 bits), max value for Serial (32 bits)
+ // BigCreation, FlagBigPidRef enabled. max value for ID (32 bits), max value for Serial (32 bits)
b.Reset()
expected = []byte{ettNewPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50,
55, 46, 48, 46, 48, 46, 49, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 2}
@@ -720,7 +716,7 @@ func TestEncodePid(t *testing.T) {
options = EncodeOptions{
FlagBigCreation: true,
- FlagV4NC: true,
+ FlagBigPidRef: true,
}
err = Encode(term, b, options)
if err != nil {
@@ -741,13 +737,11 @@ func TestEncodePidWithAtomCache(t *testing.T) {
expected := []byte{103, 82, 0, 0, 0, 1, 56, 0, 0, 0, 0, 2}
term := Pid{Node: "erl-demo@127.0.0.1", ID: 312, Creation: 2}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"}
writerAtomCache["erl-demo@127.0.0.1"] = ci
@@ -777,7 +771,7 @@ func TestEncodeRef(t *testing.T) {
b := lib.TakeBuffer()
defer lib.ReleaseBuffer(b)
- // FlagBigCreation = false, FlagV4NC = false
+ // FlagBigCreation = false, FlagBigPidRef = false
expected := []byte{ettNewRef, 0, 3, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64,
49, 50, 55, 46, 48, 46, 48, 46, 49, 3, 0, 1, 30, 228, 183, 192, 0, 1, 141,
122, 203, 35}
@@ -801,7 +795,7 @@ func TestEncodeRef(t *testing.T) {
t.Fatal("incorrect value")
}
- // FlagBigCreation = true, FlagV4NC = false
+ // FlagBigCreation = true, FlagBigPidRef = false
b.Reset()
expected = []byte{ettNewerRef, 0, 3, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64,
49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 8, 0, 1, 30, 228, 183, 192, 0, 1, 141,
@@ -831,7 +825,7 @@ func TestEncodeRef(t *testing.T) {
// FIXME Erlang 24 has a bug https://github.com/erlang/otp/issues/5097
// uncomment once they fix it
//
- // FlagBigCreation = true, FlagV4NC = true
+ // FlagBigCreation = true, FlagBigPidRef = true
//b.Reset()
//expected = []byte{ettNewerRef, 0, 5, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64,
// 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 8, 0, 1, 30, 228, 183, 192, 0, 1, 141,
@@ -845,7 +839,7 @@ func TestEncodeRef(t *testing.T) {
//options = EncodeOptions{
// FlagBigCreation: true,
- // FlagV4NC: true,
+ // FlagBigPidRef: true,
//}
//err = Encode(term, b, options)
//if err != nil {
@@ -952,13 +946,11 @@ func BenchmarkEncodeBoolWithAtomCache(b *testing.B) {
buf := lib.TakeBuffer()
defer lib.ReleaseBuffer(buf)
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
writerAtomCache["false"] = CacheItem{ID: 499, Encoded: true, Name: "false"}
@@ -1050,13 +1042,11 @@ func BenchmarkEncodeAtomWithCache(b *testing.B) {
buf := lib.TakeBuffer()
defer lib.ReleaseBuffer(buf)
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
ci := CacheItem{ID: 2020, Encoded: true, Name: "cached atom"}
writerAtomCache["cached atom"] = ci
@@ -1275,13 +1265,11 @@ func BenchmarkEncodePidWithAtomCache(b *testing.B) {
term := Pid{Node: "erl-demo@127.0.0.1", ID: 312, Creation: 2}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"}
writerAtomCache["erl-demo@127.0.0.1"] = ci
@@ -1332,13 +1320,11 @@ func BenchmarkEncodeRefWithAtomCache(b *testing.B) {
ID: [5]uint32{73444, 3082813441, 2373634851},
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"}
writerAtomCache["erl-demo@127.0.0.1"] = ci
@@ -1398,13 +1384,11 @@ func BenchmarkEncodeTupleRefPidWithAtomCache(b *testing.B) {
ID: 312,
Creation: 2}}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
writerAtomCache := make(map[Atom]CacheItem)
encodingAtomCache := TakeListAtomCache()
defer ReleaseListAtomCache(encodingAtomCache)
- linkAtomCache := NewAtomCache(ctx)
+ linkAtomCache := StartAtomCache()
+ defer linkAtomCache.Stop()
ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"}
writerAtomCache["erl-demo@127.0.0.1"] = ci
diff --git a/etf/etf.go b/etf/etf.go
index b94501a5..a7333e8d 100644
--- a/etf/etf.go
+++ b/etf/etf.go
@@ -7,15 +7,25 @@ import (
"strings"
)
+// Term
type Term interface{}
+
+// Tuple
type Tuple []Term
+
+// List
type List []Term
+
+// Alias
type Alias Ref
// ListImproper as a workaround for the Erlang's improper list [a|b]. Intended to be used to interact with Erlang.
type ListImproper []Term
+// Atom
type Atom string
+
+// Map
type Map map[Term]Term
// String this type is intended to be used to interact with Erlang. String value encodes as a binary (Erlang type: <<...>>)
@@ -24,18 +34,21 @@ type String string
// Charlist this type is intended to be used to interact with Erlang. Charlist value encodes as a list of int32 numbers in order to support Erlang string with UTF-8 symbols on an Erlang side (Erlang type: [...])
type Charlist string
+// Pid
type Pid struct {
Node Atom
ID uint64
Creation uint32
}
+// Port
type Port struct {
Node Atom
ID uint32
Creation uint32
}
+// Ref
type Ref struct {
Node Atom
Creation uint32
@@ -72,6 +85,7 @@ type Unmarshaler interface {
UnmarshalETF([]byte) error
}
+// Function
type Function struct {
Arity byte
Unique [16]byte
@@ -88,6 +102,7 @@ var (
hasher32 = fnv.New32a()
)
+// Export
type Export struct {
Module Atom
Function Atom
@@ -140,18 +155,22 @@ const (
ettFloat = byte(99) // legacy
)
+// Element
func (m Map) Element(k Term) Term {
return m[k]
}
+// Element
func (l List) Element(i int) Term {
return l[i-1]
}
+// Element
func (t Tuple) Element(i int) Term {
return t[i-1]
}
+// String
func (p Pid) String() string {
empty := Pid{}
if p == empty {
@@ -167,6 +186,7 @@ func (p Pid) String() string {
return fmt.Sprintf("<%X.%d.%d>", n, int32(p.ID>>32), int32(p.ID))
}
+// String
func (r Ref) String() string {
n := uint32(0)
if r.Node != "" {
@@ -177,6 +197,7 @@ func (r Ref) String() string {
return fmt.Sprintf("Ref#<%X.%d.%d.%d>", n, r.ID[0], r.ID[1], r.ID[2])
}
+// String
func (a Alias) String() string {
n := uint32(0)
if a.Node != "" {
@@ -187,6 +208,7 @@ func (a Alias) String() string {
return fmt.Sprintf("Ref#<%X.%d.%d.%d>", n, a.ID[0], a.ID[1], a.ID[2])
}
+// ProplistElement
type ProplistElement struct {
Name Atom
Value Term
@@ -215,7 +237,7 @@ func TermToString(t Term) (s string, ok bool) {
return
}
-// ProplistIntoStruct transorms given term into the provided struct 'dest'.
+// TermProplistIntoStruct transorms given term into the provided struct 'dest'.
// Proplist is the list of Tuple values with two items { Name , Value },
// where Name can be string or Atom and Value must be the same type as
// it has the field of 'dest' struct with the equivalent name. Its also
@@ -463,7 +485,7 @@ func setProplistField(list List, dest reflect.Value) error {
t := dest.Type()
numField := t.NumField()
fields := make([]reflect.StructField, numField)
- for i, _ := range fields {
+ for i := range fields {
fields[i] = t.Field(i)
}
@@ -496,7 +518,7 @@ func setProplistElementField(proplist []ProplistElement, dest reflect.Value) err
t := dest.Type()
numField := t.NumField()
fields := make([]reflect.StructField, numField)
- for i, _ := range fields {
+ for i := range fields {
fields[i] = t.Field(i)
}
@@ -554,7 +576,7 @@ func setMapStructField(term Map, dest reflect.Value) error {
t := dest.Type()
numField := t.NumField()
fields := make([]reflect.StructField, numField)
- for i, _ := range fields {
+ for i := range fields {
fields[i] = t.Field(i)
}
diff --git a/etf/etf_test.go b/etf/etf_test.go
index e78948c5..90ad888b 100644
--- a/etf/etf_test.go
+++ b/etf/etf_test.go
@@ -446,9 +446,7 @@ func TestEncodeDecodePid(t *testing.T) {
t.Fatal(err)
}
- decodeOptions := DecodeOptions{
- FlagBigCreation: true,
- }
+ decodeOptions := DecodeOptions{}
term, _, err = Decode(b.B, []Atom{}, decodeOptions)
pidOut, ok = term.(Pid)
if !ok {
@@ -461,10 +459,10 @@ func TestEncodeDecodePid(t *testing.T) {
t.Fatal("incorrect result")
}
- // enable V4NC
+ // enable FlagBigPidRef
b.Reset()
encodeOptions = EncodeOptions{
- FlagV4NC: true,
+ FlagBigPidRef: true,
}
err = Encode(pidIn, b, encodeOptions)
if err != nil {
@@ -472,7 +470,7 @@ func TestEncodeDecodePid(t *testing.T) {
}
decodeOptions = DecodeOptions{
- FlagV4NC: true,
+ FlagBigPidRef: true,
}
term, _, err = Decode(b.B, []Atom{}, decodeOptions)
pidOut, ok = term.(Pid)
@@ -486,10 +484,10 @@ func TestEncodeDecodePid(t *testing.T) {
t.Fatal("incorrect result")
}
- // enable BigCreation and V4NC
+ // enable BigCreation and FlagBigPidRef
b.Reset()
encodeOptions = EncodeOptions{
- FlagV4NC: true,
+ FlagBigPidRef: true,
FlagBigCreation: true,
}
err = Encode(pidIn, b, encodeOptions)
@@ -498,8 +496,7 @@ func TestEncodeDecodePid(t *testing.T) {
}
decodeOptions = DecodeOptions{
- FlagV4NC: true,
- FlagBigCreation: true,
+ FlagBigPidRef: true,
}
term, _, err = Decode(b.B, []Atom{}, decodeOptions)
pidOut, ok = term.(Pid)
diff --git a/examples/application/demoApplication.go b/examples/application/demoApplication.go
index dcf70ff6..8bcc1d08 100644
--- a/examples/application/demoApplication.go
+++ b/examples/application/demoApplication.go
@@ -31,7 +31,7 @@ func (da *demoApp) Load(args ...etf.Term) (gen.ApplicationSpec, error) {
Name: "demoApp",
Description: "Demo Applicatoin",
Version: "v.1.0",
- Environment: map[string]interface{}{
+ Environment: map[gen.EnvKey]interface{}{
"envName1": 123,
"envName2": "Hello world",
},
@@ -116,21 +116,14 @@ func (dgs *demoGenServ) HandleCall(process *gen.ServerProcess, from gen.ServerFr
}
func init() {
- flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
- flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
flag.StringVar(&NodeName, "name", "demo@127.0.0.1", "node name")
- flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
}
func main() {
flag.Parse()
- opts := node.Options{
- ListenRangeBegin: uint16(ListenRangeBegin),
- ListenRangeEnd: uint16(ListenRangeEnd),
- EPMDPort: uint16(ListenEPMD),
- }
+ opts := node.Options{}
// Initialize new node with given name, cookie, listening port range and epmd port
demoNode, _ := ergo.StartNode(NodeName, Cookie, opts)
diff --git a/examples/genserver/server.go b/examples/genserver/server.go
index 1b61ba9b..77af00e7 100644
--- a/examples/genserver/server.go
+++ b/examples/genserver/server.go
@@ -16,14 +16,12 @@ type demo struct {
}
var (
- ServerName string
- NodeName string
- Cookie string
- err error
- ListenRangeBegin int
- ListenRangeEnd int = 35000
- Listen string
- ListenEPMD int
+ ServerName string
+ NodeName string
+ Cookie string
+ err error
+ ListenBegin int
+ ListenEnd int = 35000
EnableRPC bool
)
@@ -61,11 +59,10 @@ func (dgs *demo) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, mes
}
func init() {
- flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
- flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
+ flag.IntVar(&ListenBegin, "listen_begin", 15151, "listen port range")
+ flag.IntVar(&ListenEnd, "listen_end", 25151, "listen port range")
flag.StringVar(&ServerName, "gen_server_name", "example", "gen_server name")
flag.StringVar(&NodeName, "name", "demo@127.0.0.1", "node name")
- flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
}
@@ -73,9 +70,8 @@ func main() {
flag.Parse()
opts := node.Options{
- ListenRangeBegin: uint16(ListenRangeBegin),
- ListenRangeEnd: uint16(ListenRangeEnd),
- EPMDPort: uint16(ListenEPMD),
+ ListenBegin: uint16(ListenBegin),
+ ListenEnd: uint16(ListenEnd),
}
// Initialize new node with given name, cookie, listening port range and epmd port
diff --git a/examples/genstage/consumer.go b/examples/genstage/consumer.go
index 8310b9ee..58c4e805 100644
--- a/examples/genstage/consumer.go
+++ b/examples/genstage/consumer.go
@@ -30,7 +30,7 @@ func (c *Consumer) InitStage(process *gen.StageProcess, args ...etf.Term) (gen.S
fmt.Println("Subscribe consumer", process.Name(), "[", process.Self(), "]",
"with min events =", opts.MinDemand,
"and max events", opts.MaxDemand)
- process.Subscribe(gen.ProcessID{"producer", "node_abc@localhost"}, opts)
+ process.Subscribe(gen.ProcessID{Name: "producer", Node: "node_abc@localhost"}, opts)
return gen.StageOptions{}, nil
}
func (c *Consumer) HandleEvents(process *gen.StageProcess, subscription gen.StageSubscription, events etf.List) gen.StageStatus {
diff --git a/examples/http/app.go b/examples/http/app.go
index 21f85401..94728017 100644
--- a/examples/http/app.go
+++ b/examples/http/app.go
@@ -20,7 +20,6 @@ func (a *App) Load(args ...etf.Term) (gen.ApplicationSpec, error) {
Name: "WebApp",
Description: "Demo Web Application",
Version: "v.1.0",
- Environment: map[string]interface{}{},
Children: []gen.ApplicationChildSpec{
gen.ApplicationChildSpec{
Child: handler_sup,
diff --git a/examples/http/main.go b/examples/http/main.go
index 83b40978..dba1cca5 100644
--- a/examples/http/main.go
+++ b/examples/http/main.go
@@ -19,21 +19,14 @@ var (
)
func init() {
- flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
- flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
flag.StringVar(&NodeName, "name", "web@127.0.0.1", "node name")
- flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
}
func main() {
flag.Parse()
- opts := node.Options{
- ListenRangeBegin: uint16(ListenRangeBegin),
- ListenRangeEnd: uint16(ListenRangeEnd),
- EPMDPort: uint16(ListenEPMD),
- }
+ opts := node.Options{}
// Initialize new node with given name, cookie, listening port range and epmd port
nodeHTTP, _ := ergo.StartNode(NodeName, Cookie, opts)
diff --git a/examples/nodetls/tlsGenServer.go b/examples/nodetls/tlsGenServer.go
index 20efba1c..51fba33f 100644
--- a/examples/nodetls/tlsGenServer.go
+++ b/examples/nodetls/tlsGenServer.go
@@ -16,14 +16,10 @@ type demoGenServ struct {
}
var (
- GenServerName string
- NodeName string
- Cookie string
- err error
- ListenRangeBegin int
- ListenRangeEnd int = 35000
- Listen string
- ListenEPMD int
+ GenServerName string
+ NodeName string
+ Cookie string
+ err error
EnableRPC bool
)
@@ -33,15 +29,26 @@ func (dgs *demoGenServ) HandleCast(process *gen.ServerProcess, message etf.Term)
switch message {
case etf.Atom("stop"):
return gen.ServerStatusStopWithReason("stop they said")
+ case "test":
+ node := process.Env(node.EnvKeyNode).(node.Node)
+ n := node.Nodes()
+ fmt.Println("nodes: ", n)
+ if err := node.Disconnect(n[0]); err != nil {
+ fmt.Println("Cant disconnect", err)
+ }
+ if err := node.Connect(n[0]); err != nil {
+ fmt.Println("Cant connect", err)
+ }
}
return gen.ServerStatusOK
}
-func (dgs *demoGenServ) HandleCall(state *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
+func (dgs *demoGenServ) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
fmt.Printf("HandleCall: %#v, From: %#v\n", message, from)
switch message {
case etf.Atom("hello"):
+ process.Cast(process.Self(), "test")
return etf.Term("hi"), gen.ServerStatusOK
}
reply := etf.Tuple{etf.Atom("error"), etf.Atom("unknown_request")}
@@ -49,11 +56,8 @@ func (dgs *demoGenServ) HandleCall(state *gen.ServerProcess, from gen.ServerFrom
}
func init() {
- flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
- flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
flag.StringVar(&GenServerName, "gen_server_name", "example", "gen_server name")
flag.StringVar(&NodeName, "name", "demo@127.0.0.1", "node name")
- flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
}
@@ -61,19 +65,8 @@ func main() {
flag.Parse()
opts := node.Options{
- ListenRangeBegin: uint16(ListenRangeBegin),
- ListenRangeEnd: uint16(ListenRangeEnd),
- EPMDPort: uint16(ListenEPMD),
-
// enables TLS encryption with self-signed certificate
- TLSMode: node.TLSModeAuto,
-
- // set TLSmode to TLSmodeStrict to use custom certificate
- // TLSmode: ergo.TLSmodeStrict,
- // TLScrtServer: "example.crt",
- // TLSkeyServer: "example.key",
- // TLScrtClient: "example.crt",
- // TLSkeyClient: "example.key",
+ TLS: node.TLS{Enabled: true},
}
// Initialize new node with given name, cookie, listening port range and epmd port
diff --git a/examples/supervisor/demoSupervisor.go b/examples/supervisor/demoSupervisor.go
index 2de2e9ad..4a192116 100644
--- a/examples/supervisor/demoSupervisor.go
+++ b/examples/supervisor/demoSupervisor.go
@@ -11,13 +11,9 @@ import (
)
var (
- NodeName string
- Cookie string
- err error
- ListenRangeBegin int
- ListenRangeEnd int = 35000
- Listen string
- ListenEPMD int
+ NodeName string
+ Cookie string
+ err error
EnableRPC bool
)
@@ -90,24 +86,15 @@ func (dgs *demoGenServ) Terminate(process *gen.ServerProcess, reason string) {
}
func init() {
- flag.IntVar(&ListenRangeBegin, "listen_begin", 15151, "listen port range")
- flag.IntVar(&ListenRangeEnd, "listen_end", 25151, "listen port range")
flag.StringVar(&NodeName, "name", "demo@127.0.0.1", "node name")
- flag.IntVar(&ListenEPMD, "epmd", 4369, "EPMD port")
flag.StringVar(&Cookie, "cookie", "123", "cookie for interaction with erlang cluster")
}
func main() {
flag.Parse()
- opts := node.Options{
- ListenRangeBegin: uint16(ListenRangeBegin),
- ListenRangeEnd: uint16(ListenRangeEnd),
- EPMDPort: uint16(ListenEPMD),
- }
-
// Initialize new node with given name, cookie, listening port range and epmd port
- node, _ := ergo.StartNode(NodeName, Cookie, opts)
+ node, _ := ergo.StartNode(NodeName, Cookie, node.Options{})
// Spawn supervisor process
process, _ := node.Spawn("demo_sup", gen.ProcessOptions{}, &demoSup{})
diff --git a/gen/application.go b/gen/application.go
index 81375540..ee71ca69 100644
--- a/gen/application.go
+++ b/gen/application.go
@@ -29,6 +29,9 @@ const (
// is with any other reason than normal, all other applications and
// the runtime system (node) are also terminated.
ApplicationStartTransient = "transient"
+
+ // EnvKeySpec
+ EnvKeySpec EnvKey = "ergo:AppSpec"
)
// ApplicationBehavior interface
@@ -38,6 +41,7 @@ type ApplicationBehavior interface {
Start(process Process, args ...etf.Term)
}
+// ApplicationSpec
type ApplicationSpec struct {
sync.Mutex
Name string
@@ -45,12 +49,13 @@ type ApplicationSpec struct {
Version string
Lifespan time.Duration
Applications []string
- Environment map[string]interface{}
+ Environment map[EnvKey]interface{}
Children []ApplicationChildSpec
Process Process
StartType ApplicationStartType
}
+// ApplicationChildSpec
type ApplicationChildSpec struct {
Child ProcessBehavior
Name string
@@ -61,6 +66,7 @@ type ApplicationChildSpec struct {
// Application is implementation of ProcessBehavior interface
type Application struct{}
+// ApplicationInfo
type ApplicationInfo struct {
Name string
Description string
@@ -68,13 +74,14 @@ type ApplicationInfo struct {
PID etf.Pid
}
+// ProcessInit
func (a *Application) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) {
- spec, ok := p.Env("spec").(*ApplicationSpec)
+ spec, ok := p.Env(EnvKeySpec).(*ApplicationSpec)
if !ok {
return ProcessState{}, fmt.Errorf("ProcessInit: not an ApplicationBehavior")
}
// remove variable from the env
- p.SetEnv("spec", nil)
+ p.SetEnv(EnvKeySpec, nil)
p.SetTrapExit(true)
@@ -102,6 +109,7 @@ func (a *Application) ProcessInit(p Process, args ...etf.Term) (ProcessState, er
}, nil
}
+// ProcessLoop
func (a *Application) ProcessLoop(ps ProcessState, started chan<- bool) string {
spec := ps.State.(*ApplicationSpec)
defer func() { spec.Process = nil }()
diff --git a/gen/saga.go b/gen/saga.go
index 747b9562..c67a22f9 100644
--- a/gen/saga.go
+++ b/gen/saga.go
@@ -80,6 +80,7 @@ const (
defaultLifespan = 60
)
+// SagaStatus
type SagaStatus error
var (
@@ -100,10 +101,12 @@ var (
ErrSagaNotAllowed = fmt.Errorf("Operation is not allowed")
)
+// Saga
type Saga struct {
Server
}
+// SagaTransactionOptions
type SagaTransactionOptions struct {
// HopLimit defines a number of hop within the transaction. Default limit
// is 0 (no limit).
@@ -117,6 +120,7 @@ type SagaTransactionOptions struct {
TwoPhaseCommit bool
}
+// SagaOptions
type SagaOptions struct {
// MaxTransactions defines the limit for the number of active transactions. Default: 0 (unlimited)
MaxTransactions uint
@@ -124,6 +128,7 @@ type SagaOptions struct {
Worker SagaWorkerBehavior
}
+// SagaProcess
type SagaProcess struct {
ServerProcess
options SagaOptions
@@ -142,13 +147,16 @@ type SagaProcess struct {
mutexJobs sync.Mutex
}
+// SagaTransactionID
type SagaTransactionID etf.Ref
+// String
func (id SagaTransactionID) String() string {
r := etf.Ref(id)
return fmt.Sprintf("TX#%d.%d.%d", r.ID[0], r.ID[1], r.ID[2])
}
+// SagaTransaction
type SagaTransaction struct {
sync.Mutex
id SagaTransactionID
@@ -164,13 +172,16 @@ type SagaTransaction struct {
cancelTimer context.CancelFunc
}
+// SagaNextID
type SagaNextID etf.Ref
+// String
func (id SagaNextID) String() string {
r := etf.Ref(id)
return fmt.Sprintf("Next#%d.%d.%d", r.ID[0], r.ID[1], r.ID[2])
}
+// SagaNext
type SagaNext struct {
// Saga etf.Pid, string (for the locally registered process), gen.ProcessID{process, node} (for the remote process)
Saga interface{}
@@ -186,13 +197,16 @@ type SagaNext struct {
cancelTimer context.CancelFunc
}
+// SagaJobID
type SagaJobID etf.Ref
+// String
func (id SagaJobID) String() string {
r := etf.Ref(id)
return fmt.Sprintf("Job#%d.%d.%d", r.ID[0], r.ID[1], r.ID[2])
}
+// SagaJob
type SagaJob struct {
ID SagaJobID
TransactionID SagaTransactionID
@@ -207,6 +221,7 @@ type SagaJob struct {
cancelTimer context.CancelFunc
}
+// SagaJobOptions
type SagaJobOptions struct {
Timeout uint
}
@@ -243,12 +258,14 @@ type messageSagaCommit struct {
Final interface{}
}
+// MessageSagaCancel
type MessageSagaCancel struct {
TransactionID SagaTransactionID
NextID SagaNextID
Reason string
}
+// MessageSagaError
type MessageSagaError struct {
TransactionID SagaTransactionID
NextID SagaNextID
@@ -280,6 +297,7 @@ func (gs *Saga) SetMaxTransactions(process Process, max uint) error {
// SagaProcess methods
//
+// StartTransaction
func (sp *SagaProcess) StartTransaction(options SagaTransactionOptions, value interface{}) SagaTransactionID {
id := sp.MakeRef()
@@ -310,6 +328,7 @@ func (sp *SagaProcess) StartTransaction(options SagaTransactionOptions, value in
return SagaTransactionID(id)
}
+// Next
func (sp *SagaProcess) Next(id SagaTransactionID, next SagaNext) (SagaNextID, error) {
sp.mutexTXS.Lock()
tx, ok := sp.txs[id]
@@ -379,6 +398,7 @@ func (sp *SagaProcess) Next(id SagaTransactionID, next SagaNext) (SagaNextID, er
return next_id, nil
}
+// StartJob
func (sp *SagaProcess) StartJob(id SagaTransactionID, options SagaJobOptions, value interface{}) (SagaJobID, error) {
if sp.options.Worker == nil {
@@ -441,6 +461,7 @@ func (sp *SagaProcess) StartJob(id SagaTransactionID, options SagaJobOptions, va
return job.ID, nil
}
+// SendResult
func (sp *SagaProcess) SendResult(id SagaTransactionID, result interface{}) error {
sp.mutexTXS.Lock()
tx, ok := sp.txs[id]
@@ -497,6 +518,7 @@ func (sp *SagaProcess) SendResult(id SagaTransactionID, result interface{}) erro
return nil
}
+// SendInterim
func (sp *SagaProcess) SendInterim(id SagaTransactionID, interim interface{}) error {
sp.mutexTXS.Lock()
tx, ok := sp.txs[id]
@@ -523,6 +545,7 @@ func (sp *SagaProcess) SendInterim(id SagaTransactionID, interim interface{}) er
return nil
}
+// CancelTransaction
func (sp *SagaProcess) CancelTransaction(id SagaTransactionID, reason string) error {
sp.mutexTXS.Lock()
tx, ok := sp.txs[id]
@@ -540,6 +563,7 @@ func (sp *SagaProcess) CancelTransaction(id SagaTransactionID, reason string) er
return nil
}
+// CancelJob
func (sp *SagaProcess) CancelJob(id SagaTransactionID, job SagaJobID, reason string) error {
sp.mutexTXS.Lock()
tx, ok := sp.txs[id]
@@ -1090,6 +1114,7 @@ func (sp *SagaProcess) handleSagaDown(down MessageDown) error {
// Server callbacks
//
+// Init
func (gs *Saga) Init(process *ServerProcess, args ...etf.Term) error {
var options SagaOptions
@@ -1124,11 +1149,13 @@ func (gs *Saga) Init(process *ServerProcess, args ...etf.Term) error {
return nil
}
+// HandleCall
func (gs *Saga) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) {
sp := process.State.(*SagaProcess)
return sp.behavior.HandleSagaCall(sp, from, message)
}
+// HandleDirect
func (gs *Saga) HandleDirect(process *ServerProcess, message interface{}) (interface{}, error) {
sp := process.State.(*SagaProcess)
switch m := message.(type) {
@@ -1140,6 +1167,7 @@ func (gs *Saga) HandleDirect(process *ServerProcess, message interface{}) (inter
}
}
+// HandleCast
func (gs *Saga) HandleCast(process *ServerProcess, message etf.Term) ServerStatus {
var status SagaStatus
@@ -1215,6 +1243,7 @@ func (gs *Saga) HandleCast(process *ServerProcess, message etf.Term) ServerStatu
}
}
+// HandleInfo
func (gs *Saga) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus {
var mSaga messageSaga
@@ -1258,42 +1287,59 @@ func (gs *Saga) HandleInfo(process *ServerProcess, message etf.Term) ServerStatu
// default Saga callbacks
//
+// HandleTxInterim
func (gs *Saga) HandleTxInterim(process *SagaProcess, id SagaTransactionID, from SagaNextID, interim interface{}) SagaStatus {
fmt.Printf("HandleTxInterim: [%v %v] unhandled message %#v\n", id, from, interim)
return ServerStatusOK
}
+
+// HandleTxCommit
func (gs *Saga) HandleTxCommit(process *SagaProcess, id SagaTransactionID, final interface{}) SagaStatus {
fmt.Printf("HandleTxCommit: [%v] unhandled message\n", id)
return ServerStatusOK
}
+
+// HandleTxDone
func (gs *Saga) HandleTxDone(process *SagaProcess, id SagaTransactionID, result interface{}) (interface{}, SagaStatus) {
- return nil, fmt.Errorf("Saga [%v:%v] has no implementaion of HandleTxDone method", process.Self(), process.Name())
+ return nil, fmt.Errorf("Saga [%v:%v] has no implementation of HandleTxDone method", process.Self(), process.Name())
}
+// HandleSagaCall
func (gs *Saga) HandleSagaCall(process *SagaProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) {
fmt.Printf("HandleSagaCall: unhandled message (from %#v) %#v\n", from, message)
return etf.Atom("ok"), ServerStatusOK
}
+
+// HandleSagaCast
func (gs *Saga) HandleSagaCast(process *SagaProcess, message etf.Term) ServerStatus {
fmt.Printf("HandleSagaCast: unhandled message %#v\n", message)
return ServerStatusOK
}
+
+// HandleSagaInfo
func (gs *Saga) HandleSagaInfo(process *SagaProcess, message etf.Term) ServerStatus {
fmt.Printf("HandleSagaInfo: unhandled message %#v\n", message)
return ServerStatusOK
}
+
+// HandleSagaDirect
func (gs *Saga) HandleSagaDirect(process *SagaProcess, message interface{}) (interface{}, error) {
return nil, ErrUnsupportedRequest
}
+// HandleJobResult
func (gs *Saga) HandleJobResult(process *SagaProcess, id SagaTransactionID, from SagaJobID, result interface{}) SagaStatus {
fmt.Printf("HandleJobResult: [%v %v] unhandled message %#v\n", id, from, result)
return SagaStatusOK
}
+
+// HandleJobInterim
func (gs *Saga) HandleJobInterim(process *SagaProcess, id SagaTransactionID, from SagaJobID, interim interface{}) SagaStatus {
fmt.Printf("HandleJobInterim: [%v %v] unhandled message %#v\n", id, from, interim)
return SagaStatusOK
}
+
+// HandleJobFailed
func (gs *Saga) HandleJobFailed(process *SagaProcess, id SagaTransactionID, from SagaJobID, reason string) SagaStatus {
fmt.Printf("HandleJobFailed: [%v %v] unhandled message. reason %q\n", id, from, reason)
return nil
diff --git a/gen/saga_worker.go b/gen/saga_worker.go
index d99c1626..5c82c3c3 100644
--- a/gen/saga_worker.go
+++ b/gen/saga_worker.go
@@ -6,6 +6,7 @@ import (
"github.com/ergo-services/ergo/etf"
)
+// SagaWorkerBehavior
type SagaWorkerBehavior interface {
ServerBehavior
// Mandatory callbacks
@@ -39,10 +40,12 @@ type SagaWorkerBehavior interface {
HandleWorkerTerminate(process *SagaWorkerProcess, reason string)
}
+// SagaWorker
type SagaWorker struct {
Server
}
+// SagaWorkerProcess
type SagaWorkerProcess struct {
ServerProcess
@@ -123,6 +126,7 @@ func (wp *SagaWorkerProcess) SendInterim(interim interface{}) error {
// Server callbacks
+// Init
func (w *SagaWorker) Init(process *ServerProcess, args ...etf.Term) error {
behavior, ok := process.Behavior().(SagaWorkerBehavior)
if !ok {
@@ -136,6 +140,7 @@ func (w *SagaWorker) Init(process *ServerProcess, args ...etf.Term) error {
return nil
}
+// HandleCast
func (w *SagaWorker) HandleCast(process *ServerProcess, message etf.Term) ServerStatus {
wp := process.State.(*SagaWorkerProcess)
switch m := message.(type) {
@@ -166,20 +171,25 @@ func (w *SagaWorker) HandleCast(process *ServerProcess, message etf.Term) Server
}
}
+// HandleCall
func (w *SagaWorker) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) {
p := process.State.(*SagaWorkerProcess)
return p.behavior.HandleWorkerCall(p, from, message)
}
+// HandleDirect
func (w *SagaWorker) HandleDirect(process *ServerProcess, message interface{}) (interface{}, error) {
p := process.State.(*SagaWorkerProcess)
return p.behavior.HandleWorkerDirect(p, message)
}
+
+// HandleInfo
func (w *SagaWorker) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus {
p := process.State.(*SagaWorkerProcess)
return p.behavior.HandleWorkerInfo(p, message)
}
+// Terminate
func (w *SagaWorker) Terminate(process *ServerProcess, reason string) {
p := process.State.(*SagaWorkerProcess)
p.behavior.HandleWorkerTerminate(p, reason)
@@ -187,27 +197,38 @@ func (w *SagaWorker) Terminate(process *ServerProcess, reason string) {
}
// default callbacks
+
+// HandleJobCommit
func (w *SagaWorker) HandleJobCommit(process *SagaWorkerProcess, final interface{}) {
fmt.Printf("HandleJobCommit: unhandled message %#v\n", final)
return
}
+
+// HandleWorkerInfo
func (w *SagaWorker) HandleWorkerInfo(process *SagaWorkerProcess, message etf.Term) ServerStatus {
fmt.Printf("HandleWorkerInfo: unhandled message %#v\n", message)
return ServerStatusOK
}
+
+// HandleWorkerCast
func (w *SagaWorker) HandleWorkerCast(process *SagaWorkerProcess, message etf.Term) ServerStatus {
fmt.Printf("HandleWorkerCast: unhandled message %#v\n", message)
return ServerStatusOK
}
+
+// HandleWorkerCall
func (w *SagaWorker) HandleWorkerCall(process *SagaWorkerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) {
fmt.Printf("HandleWorkerCall: unhandled message (from %#v) %#v\n", from, message)
return etf.Atom("ok"), ServerStatusOK
}
+
+// HandleWorkerDirect
func (w *SagaWorker) HandleWorkerDirect(process *SagaWorkerProcess, message interface{}) (interface{}, error) {
fmt.Printf("HandleWorkerDirect: unhandled message %#v\n", message)
return nil, nil
}
+// HandleWorkerTerminate
func (w *SagaWorker) HandleWorkerTerminate(process *SagaWorkerProcess, reason string) {
return
}
diff --git a/gen/server.go b/gen/server.go
index 98e5d21b..3eb7eec4 100644
--- a/gen/server.go
+++ b/gen/server.go
@@ -40,6 +40,7 @@ type ServerBehavior interface {
Terminate(process *ServerProcess, reason string)
}
+// ServerStatus
type ServerStatus error
var (
@@ -48,6 +49,7 @@ var (
ServerStatusIgnore ServerStatus = fmt.Errorf("ignore")
)
+// ServerStatusStopWithReason
func ServerStatusStopWithReason(s string) ServerStatus {
return ServerStatus(fmt.Errorf(s))
}
@@ -119,7 +121,10 @@ func (sp *ServerProcess) CallWithTimeout(to interface{}, message etf.Term, timeo
ref := sp.MakeRef()
from := etf.Tuple{sp.Self(), ref}
msg := etf.Term(etf.Tuple{etf.Atom("$gen_call"), from, message})
- if err := sp.SendSyncRequest(ref, to, msg); err != nil {
+
+ sp.PutSyncRequest(ref)
+ if err := sp.Send(to, msg); err != nil {
+ sp.CancelSyncRequest(ref)
return nil, err
}
sp.callbackWaitReply <- &ref
@@ -187,6 +192,7 @@ func (sp *ServerProcess) SendReply(from ServerFrom, reply etf.Term) error {
return sp.Send(to, rep)
}
+// ProcessInit
func (gs *Server) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) {
behavior, ok := p.Behavior().(ServerBehavior)
if !ok {
@@ -214,6 +220,7 @@ func (gs *Server) ProcessInit(p Process, args ...etf.Term) (ProcessState, error)
return ps, nil
}
+// ProcessLoop
func (gs *Server) ProcessLoop(ps ProcessState, started chan<- bool) string {
gsp, ok := ps.State.(*ServerProcess)
if !ok {
@@ -551,29 +558,36 @@ func (gsp *ServerProcess) handleInfo(m handleInfoMessage) {
//
// default callbacks for Server interface
//
+
+// Init
func (gs *Server) Init(process *ServerProcess, args ...etf.Term) error {
return nil
}
+// HanldeCast
func (gs *Server) HandleCast(process *ServerProcess, message etf.Term) ServerStatus {
fmt.Printf("Server [%s] HandleCast: unhandled message %#v \n", process.Name(), message)
return ServerStatusOK
}
+// HandleInfo
func (gs *Server) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) {
fmt.Printf("Server [%s] HandleCall: unhandled message %#v from %#v \n", process.Name(), message, from)
return "ok", ServerStatusOK
}
+// HandleDirect
func (gs *Server) HandleDirect(process *ServerProcess, message interface{}) (interface{}, error) {
return nil, ErrUnsupportedRequest
}
+// HandleInfo
func (gs *Server) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus {
fmt.Printf("Server [%s] HandleInfo: unhandled message %#v \n", process.Name(), message)
return ServerStatusOK
}
+// Terminate
func (gs *Server) Terminate(process *ServerProcess, reason string) {
return
}
diff --git a/gen/stage.go b/gen/stage.go
index b87ddb71..8a6bcd7f 100644
--- a/gen/stage.go
+++ b/gen/stage.go
@@ -524,55 +524,67 @@ func (gst *Stage) HandleInfo(process *ServerProcess, message etf.Term) ServerSta
// default callbacks
+// InitStage
func (gst *Stage) InitStage(process *StageProcess, args ...etf.Term) error {
return nil
}
+// HandleSagaCall
func (gst *Stage) HandleStageCall(process *StageProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) {
// default callback if it wasn't implemented
fmt.Printf("HandleStageCall: unhandled message (from %#v) %#v\n", from, message)
return etf.Atom("ok"), ServerStatusOK
}
+// HandleStageDirect
func (gst *Stage) HandleStageDirect(process *StageProcess, message interface{}) (interface{}, error) {
// default callback if it wasn't implemented
return nil, ErrUnsupportedRequest
}
+// HandleStageCast
func (gst *Stage) HandleStageCast(process *StageProcess, message etf.Term) ServerStatus {
// default callback if it wasn't implemented
fmt.Printf("HandleStageCast: unhandled message %#v\n", message)
return ServerStatusOK
}
+
+// HandleStageInfo
func (gst *Stage) HandleStageInfo(process *StageProcess, message etf.Term) ServerStatus {
// default callback if it wasn't implemnted
fmt.Printf("HandleStageInfo: unhandled message %#v\n", message)
return ServerStatusOK
}
+// HandleSubscribe
func (gst *Stage) HandleSubscribe(process *StageProcess, subscription StageSubscription, options StageSubscribeOptions) StageStatus {
return StageStatusNotAProducer
}
+// HandleSubscribed
func (gst *Stage) HandleSubscribed(process *StageProcess, subscription StageSubscription, opts StageSubscribeOptions) (bool, StageStatus) {
return opts.ManualDemand, StageStatusOK
}
+// HandleCancel
func (gst *Stage) HandleCancel(process *StageProcess, subscription StageSubscription, reason string) StageStatus {
// default callback if it wasn't implemented
return StageStatusOK
}
+// HandleCanceled
func (gst *Stage) HandleCanceled(process *StageProcess, subscription StageSubscription, reason string) StageStatus {
// default callback if it wasn't implemented
return StageStatusOK
}
+// HanndleEvents
func (gst *Stage) HandleEvents(process *StageProcess, subscription StageSubscription, events etf.List) StageStatus {
fmt.Printf("Stage HandleEvents: unhandled subscription (%#v) events %#v\n", subscription, events)
return StageStatusOK
}
+// HandleDemand
func (gst *Stage) HandleDemand(process *StageProcess, subscription StageSubscription, count uint) (etf.List, StageStatus) {
fmt.Printf("Stage HandleDemand: unhandled subscription (%#v) demand %#v\n", subscription, count)
return nil, StageStatusOK
diff --git a/gen/stage_dispatcher.go b/gen/stage_dispatcher.go
index bf06a234..0d37d665 100644
--- a/gen/stage_dispatcher.go
+++ b/gen/stage_dispatcher.go
@@ -27,6 +27,7 @@ type StageDispatcherBehavior interface {
Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error
}
+// StageDispatcher
type StageDispatcher int
type dispatcherDemand struct{}
type dispatcherBroadcast struct{}
@@ -126,6 +127,7 @@ type broadcastState struct {
bufferKeepLast bool
}
+// Init
func (dd *dispatcherDemand) Init(opts StageOptions) interface{} {
state := &demandState{
demands: make(map[etf.Pid]*demand),
@@ -137,6 +139,7 @@ func (dd *dispatcherDemand) Init(opts StageOptions) interface{} {
return state
}
+// Ask
func (dd *dispatcherDemand) Ask(state interface{}, subscription StageSubscription, count uint) {
st := state.(*demandState)
demand, ok := st.demands[subscription.Pid]
@@ -147,6 +150,7 @@ func (dd *dispatcherDemand) Ask(state interface{}, subscription StageSubscriptio
return
}
+// Cancel
func (dd *dispatcherDemand) Cancel(state interface{}, subscription StageSubscription) {
st := state.(*demandState)
delete(st.demands, subscription.Pid)
@@ -161,6 +165,7 @@ func (dd *dispatcherDemand) Cancel(state interface{}, subscription StageSubscrip
return
}
+// Dispatch
func (dd *dispatcherDemand) Dispatch(state interface{}, events etf.List) []StageDispatchItem {
st := state.(*demandState)
// put events into the buffer before we start dispatching
@@ -227,6 +232,7 @@ func (dd *dispatcherDemand) Dispatch(state interface{}, events etf.List) []Stage
return dispatchItems
}
+// Subscribe
func (dd *dispatcherDemand) Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error {
st := state.(*demandState)
newDemand := &demand{
@@ -243,6 +249,7 @@ func (dd *dispatcherDemand) Subscribe(state interface{}, subscription StageSubsc
// Dispatcher Broadcast implementation
//
+// Init
func (db *dispatcherBroadcast) Init(opts StageOptions) interface{} {
state := &broadcastState{
demands: make(map[etf.Pid]*demand),
@@ -253,6 +260,7 @@ func (db *dispatcherBroadcast) Init(opts StageOptions) interface{} {
return state
}
+// Ask
func (db *dispatcherBroadcast) Ask(state interface{}, subscription StageSubscription, count uint) {
st := state.(*broadcastState)
demand, ok := st.demands[subscription.Pid]
@@ -264,6 +272,7 @@ func (db *dispatcherBroadcast) Ask(state interface{}, subscription StageSubscrip
return
}
+// Cancel
func (db *dispatcherBroadcast) Cancel(state interface{}, subscription StageSubscription) {
st := state.(*broadcastState)
delete(st.demands, subscription.Pid)
@@ -271,6 +280,7 @@ func (db *dispatcherBroadcast) Cancel(state interface{}, subscription StageSubsc
return
}
+// Dispatch
func (db *dispatcherBroadcast) Dispatch(state interface{}, events etf.List) []StageDispatchItem {
st := state.(*broadcastState)
// put events into the buffer before we start dispatching
@@ -318,6 +328,7 @@ func (db *dispatcherBroadcast) Dispatch(state interface{}, events etf.List) []St
return dispatchItems
}
+// Subscribe
func (db *dispatcherBroadcast) Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error {
st := state.(*broadcastState)
newDemand := &demand{
@@ -357,6 +368,8 @@ func (db *dispatcherBroadcast) Subscribe(state interface{}, subscription StageSu
//
// Dispatcher Partition implementation
//
+
+// Init
func (dp *dispatcherPartition) Init(opts StageOptions) interface{} {
state := &partitionState{
demands: make(map[etf.Pid]*demand),
@@ -372,6 +385,7 @@ func (dp *dispatcherPartition) Init(opts StageOptions) interface{} {
return state
}
+// Ask
func (dp *dispatcherPartition) Ask(state interface{}, subscription StageSubscription, count uint) {
st := state.(*partitionState)
demand, ok := st.demands[subscription.Pid]
@@ -382,6 +396,7 @@ func (dp *dispatcherPartition) Ask(state interface{}, subscription StageSubscrip
return
}
+// Cancel
func (dp *dispatcherPartition) Cancel(state interface{}, subscription StageSubscription) {
st := state.(*partitionState)
demand, ok := st.demands[subscription.Pid]
@@ -400,6 +415,7 @@ func (dp *dispatcherPartition) Cancel(state interface{}, subscription StageSubsc
return
}
+// Dispatch
func (dp *dispatcherPartition) Dispatch(state interface{}, events etf.List) []StageDispatchItem {
st := state.(*partitionState)
// put events into the buffer before we start dispatching
@@ -473,6 +489,7 @@ func (dp *dispatcherPartition) Dispatch(state interface{}, events etf.List) []St
return dispatchItems
}
+// Subscribe
func (dp *dispatcherPartition) Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error {
st := state.(*partitionState)
if opts.Partition > dp.n-1 {
diff --git a/gen/supervisor.go b/gen/supervisor.go
index 47ed5225..bf3914aa 100644
--- a/gen/supervisor.go
+++ b/gen/supervisor.go
@@ -14,6 +14,7 @@ type SupervisorBehavior interface {
Init(args ...etf.Term) (SupervisorSpec, error)
}
+// SupervisorStrategy
type SupervisorStrategy struct {
Type SupervisorStrategyType
Intensity uint16
@@ -21,7 +22,10 @@ type SupervisorStrategy struct {
Restart SupervisorStrategyRestart
}
+// SupervisorStrategyType
type SupervisorStrategyType = string
+
+// SupervisorStrategyRestart
type SupervisorStrategyRestart = string
const (
@@ -76,6 +80,7 @@ const (
type supervisorChildState int
+// SupervisorSpec
type SupervisorSpec struct {
Name string
Children []SupervisorChildSpec
@@ -83,6 +88,7 @@ type SupervisorSpec struct {
restarts []int64
}
+// SupervisorChildSpec
type SupervisorChildSpec struct {
// Node to run child on remote node
Node string
@@ -101,6 +107,7 @@ type messageStartChild struct {
args []etf.Term
}
+// ProcessInit
func (sv *Supervisor) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) {
behavior, ok := p.Behavior().(SupervisorBehavior)
if !ok {
@@ -110,7 +117,7 @@ func (sv *Supervisor) ProcessInit(p Process, args ...etf.Term) (ProcessState, er
if err != nil {
return ProcessState{}, err
}
- lib.Log("Supervisor spec %#v\n", spec)
+ lib.Log("[%s] SUPERVISOR %q with restart strategy: %s[%s] ", p.NodeName(), p.Name(), spec.Strategy.Type, spec.Strategy.Restart)
p.SetTrapExit(true)
return ProcessState{
@@ -119,6 +126,7 @@ func (sv *Supervisor) ProcessInit(p Process, args ...etf.Term) (ProcessState, er
}, nil
}
+// ProcessLoop
func (sv *Supervisor) ProcessLoop(ps ProcessState, started chan<- bool) string {
spec := ps.State.(*SupervisorSpec)
if spec.Strategy.Type != SupervisorStrategySimpleOneForOne {
diff --git a/gen/types.go b/gen/types.go
index 2da6c68a..6ab7756e 100644
--- a/gen/types.go
+++ b/gen/types.go
@@ -13,13 +13,22 @@ var (
ErrServerTerminated = fmt.Errorf("Server terminated")
)
+// EnvKey
+type EnvKey string
+
+// Process
type Process interface {
- Registrar
+ Core
// Spawn create a new process with parent
Spawn(name string, opts ProcessOptions, object ProcessBehavior, args ...etf.Term) (Process, error)
- // RemoteSpawn creates a new process at a remote node. The object name is a regitered behavior on a remote name using RegisterBehavior(...). Init callback of the started remote process will receive gen.RemoteSpawnRequest as an argument.
+
+ // RemoteSpawn creates a new process at a remote node. The object name is a regitered
+ // behavior on a remote name using RegisterBehavior(...). The given options will stored
+ // in the process environment using node.EnvKeyRemoteSpawn as a key
RemoteSpawn(node string, object string, opts RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error)
+ RemoteSpawnWithTimeout(timeout int, node string, object string, opts RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error)
+
// Name returns process name used on starting.
Name() string
@@ -29,6 +38,15 @@ type Process interface {
// UnregisterName unregister named process. Unregistering name is allowed to the owner only
UnregisterName(name string) error
+ // NodeName returns node name
+ NodeName() string
+
+ // NodeStop stops the node
+ NodeStop()
+
+ // NodeUptime returns node lifespan
+ NodeUptime() int64
+
// Info returns process details
Info() ProcessInfo
@@ -55,7 +73,7 @@ type Process interface {
// Exit initiate a graceful stopping process
Exit(reason string) error
- // Kill immidiately stops process
+ // Kill immediately stops process
Kill()
// CreateAlias creates a new alias for the Process
@@ -65,15 +83,15 @@ type Process interface {
DeleteAlias(alias etf.Alias) error
// ListEnv returns a map of configured environment variables.
- // It also includes environment variables from the GroupLeader and Parent.
- // which are overlapped by priority: Process(Parent(GroupLeader))
- ListEnv() map[string]interface{}
+ // It also includes environment variables from the GroupLeader, Parent and Node.
+ // which are overlapped by priority: Process(Parent(GroupLeader(Node)))
+ ListEnv() map[EnvKey]interface{}
// SetEnv set environment variable with given name. Use nil value to remove variable with given name.
- SetEnv(name string, value interface{})
+ SetEnv(name EnvKey, value interface{})
// Env returns value associated with given environment name.
- Env(name string) interface{}
+ Env(name EnvKey) interface{}
// Wait waits until process stopped
Wait()
@@ -86,7 +104,7 @@ type Process interface {
// Links are bidirectional and there can only be one link between two processes.
// Repeated calls to Process.Link(Pid) have no effect. If one of the participants
// of a link terminates, it will send an exit signal to the other participant and caused
- // termination of the last one (if this process hasn't set a trap using Process.SetTrapExit(true)).
+ // termination of the last one. If process set a trap using Process.SetTrapExit(true) the exit signal transorms into the MessageExit and delivers as a regular message.
Link(with etf.Pid)
// Unlink removes the link, if there is one, between the calling process and
@@ -104,8 +122,14 @@ type Process interface {
// TrapExit returns whether the trap was enabled on this process
TrapExit() bool
+ // SetCompression enables/disables compression for the messages sent outside of this node
+ SetCompression(enabled bool)
+
+ // Compression returns true if compression is enabled for this process
+ Compression() bool
+
// MonitorNode creates monitor between the current process and node. If Node fails or does not exist,
- // the message {nodedown, Node} is delivered to the process.
+ // the message MessageNodeDown is delivered to the process.
MonitorNode(name string) etf.Ref
// DemonitorNode removes monitor. Returns false if the given reference wasn't found
@@ -144,12 +168,10 @@ type Process interface {
// Aliases returns list of aliases of this process.
Aliases() []etf.Alias
- // Methods below are intended to be used for the ProcessBehavior implementation
-
- SendSyncRequestRaw(ref etf.Ref, node etf.Atom, messages ...etf.Term) error
- PutSyncReply(ref etf.Ref, term etf.Term) error
- SendSyncRequest(ref etf.Ref, to interface{}, message etf.Term) error
+ PutSyncRequest(ref etf.Ref)
+ CancelSyncRequest(ref etf.Ref)
WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error)
+ PutSyncReply(ref etf.Ref, term etf.Term) error
ProcessChannels() ProcessChannels
}
@@ -169,66 +191,68 @@ type ProcessInfo struct {
TrapExit bool
GroupLeader etf.Pid
Reductions uint64
+ Compression bool
}
+// ProcessOptions
type ProcessOptions struct {
// Context allows mix the system context with the custom one. E.g. to limit
// the lifespan using context.WithTimeout
Context context.Context
- // MailboxSize defines the lenght of message queue for the process
+ // MailboxSize defines the length of message queue for the process
MailboxSize uint16
// GroupLeader
GroupLeader Process
// Env set the process environment variables
- Env map[string]interface{}
+ Env map[EnvKey]interface{}
+}
+
+// RemoteSpawnRequest
+type RemoteSpawnRequest struct {
+ From etf.Pid
+ Ref etf.Ref
+ Options RemoteSpawnOptions
}
// RemoteSpawnOptions defines options for RemoteSpawn method
type RemoteSpawnOptions struct {
- // RegisterName
- RegisterName string
+ // Name register associated name with spawned process
+ Name string
// Monitor enables monitor on the spawned process using provided reference
Monitor etf.Ref
// Link enables link between the calling and spawned processes
Link bool
// Function in order to support {M,F,A} request to the Erlang node
Function string
- // Timeout
- Timeout int
-}
-
-// RemoteSpawnRequest stores in process environment ("ergo:RemoteSpawnRequest") if it was spawned by RemoteSpawn request
-type RemoteSpawnRequest struct {
- // Ref request id
- Ref etf.Ref
- // PID of the process made RemoteSpawn request
- From etf.Pid
- // Function provided via RemoteSpawnOptions.Function
- Function string
}
+// ProcessChannels
type ProcessChannels struct {
Mailbox <-chan ProcessMailboxMessage
Direct <-chan ProcessDirectMessage
GracefulExit <-chan ProcessGracefulExitRequest
}
+// ProcessMailboxMessage
type ProcessMailboxMessage struct {
From etf.Pid
Message interface{}
}
+// ProcessDirectMessage
type ProcessDirectMessage struct {
Message interface{}
Err error
Reply chan ProcessDirectMessage
}
+// ProcessGracefulExitRequest
type ProcessGracefulExitRequest struct {
From etf.Pid
Reason string
}
+// ProcessState
type ProcessState struct {
Process
State interface{}
@@ -239,42 +263,50 @@ type ProcessBehavior interface {
ProcessInit(Process, ...etf.Term) (ProcessState, error)
ProcessLoop(ProcessState, chan<- bool) string // method which implements control flow of process
}
-type Registrar interface {
- Monitor
- NodeName() string
- NodeStop()
+// Core the common set of methods provided by Process and node.Node interfaces
+type Core interface {
- // ProcessByName returns Process struct for the given name.
- // Returns nil if it doesn't exist (not found)
+ // ProcessByName returns Process for the given name.
+ // Returns nil if it doesn't exist (not found) or terminated.
ProcessByName(name string) Process
- // ProcessByPid returns Process struct for the given Pid.
- // Returns nil if it doesn't exist (not found)
+
+ // ProcessByPid returns Process for the given Pid.
+ // Returns nil if it doesn't exist (not found) or terminated.
ProcessByPid(pid etf.Pid) Process
- // ProcessByAlias returns Process struct for the given alias.
- // Returns nil if it doesn't exist (not found)
+ // ProcessByAlias returns Process for the given alias.
+ // Returns nil if it doesn't exist (not found) or terminated
ProcessByAlias(alias etf.Alias) Process
// ProcessInfo returns the details about given Pid
ProcessInfo(pid etf.Pid) (ProcessInfo, error)
+
+ // ProcessList returns the list of running processes
ProcessList() []Process
- IsAlias(etf.Alias) bool
+
+ // MakeRef creates an unique reference within this node
MakeRef() etf.Ref
- // IsProcessAlive returns true if the process with given pid is alive
- IsProcessAlive(process Process) bool
+ // IsAlias checks whether the given alias is belongs to the alive process on this node.
+ // If the process died all aliases are cleaned up and this function returns
+ // false for the given alias. For alias from the remote node always returns false.
+ IsAlias(etf.Alias) bool
+ // IsMonitor returns true if the given references is a monitor
+ IsMonitor(ref etf.Ref) bool
+
+ // RegisterBehavior
RegisterBehavior(group, name string, behavior ProcessBehavior, data interface{}) error
+ // RegisteredBehavior
RegisteredBehavior(group, name string) (RegisteredBehavior, error)
+ // RegisteredBehaviorGroup
RegisteredBehaviorGroup(group string) []RegisteredBehavior
+ // UnregisterBehavior
UnregisterBehavior(group, name string) error
}
-type Monitor interface {
- IsMonitor(ref etf.Ref) bool
-}
-
+// RegisteredBehavior
type RegisteredBehavior struct {
Behavior ProcessBehavior
Data interface{}
@@ -286,6 +318,11 @@ type ProcessID struct {
Node string
}
+// String string representaion of ProcessID value
+func (p ProcessID) String() string {
+ return fmt.Sprintf("<%s:%s>", p.Name, p.Node)
+}
+
// MessageDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorProcess.
// Reason values:
@@ -322,8 +359,12 @@ type MessageManageRPC struct {
Fun RPC
}
+// MessageDirectChildren type intended to be used in Process.Children which returns []etf.Pid
+// You can handle this type of message in your HandleDirect callback to enable Process.Children
+// support for your gen.Server actor.
type MessageDirectChildren struct{}
+// IsMessageDown
func IsMessageDown(message etf.Term) (MessageDown, bool) {
var md MessageDown
switch m := message.(type) {
@@ -333,6 +374,7 @@ func IsMessageDown(message etf.Term) (MessageDown, bool) {
return md, false
}
+// IsMessageExit
func IsMessageExit(message etf.Term) (MessageExit, bool) {
var me MessageExit
switch m := message.(type) {
diff --git a/lib/osdep/bsd.go b/lib/osdep/bsd.go
index 89d4fecd..dc8c2278 100644
--- a/lib/osdep/bsd.go
+++ b/lib/osdep/bsd.go
@@ -1,3 +1,4 @@
+//go:build freebsd || openbsd || netbsd || dragonfly
// +build freebsd openbsd netbsd dragonfly
package osdep
@@ -6,6 +7,7 @@ import (
"syscall"
)
+// ResourceUsage
func ResourceUsage() (int64, int64) {
var usage syscall.Rusage
var utime, stime int64
diff --git a/lib/osdep/darwin.go b/lib/osdep/darwin.go
index 8b4a7dbf..30742c70 100644
--- a/lib/osdep/darwin.go
+++ b/lib/osdep/darwin.go
@@ -1,3 +1,4 @@
+//go:build darwin
// +build darwin
package osdep
@@ -6,6 +7,7 @@ import (
"syscall"
)
+// ResourceUsage
func ResourceUsage() (int64, int64) {
var usage syscall.Rusage
var utime, stime int64
diff --git a/lib/osdep/linux.go b/lib/osdep/linux.go
index 091968f1..1c2e1cdc 100644
--- a/lib/osdep/linux.go
+++ b/lib/osdep/linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package osdep
@@ -6,6 +7,7 @@ import (
"syscall"
)
+// ResourceUsage
func ResourceUsage() (int64, int64) {
var usage syscall.Rusage
var utime, stime int64
diff --git a/lib/osdep/windows.go b/lib/osdep/windows.go
index 337d7a80..37050775 100644
--- a/lib/osdep/windows.go
+++ b/lib/osdep/windows.go
@@ -1,7 +1,9 @@
+//go:build windows
// +build windows
package osdep
+// ResourceUsage
func ResourceUsage() (int64, int64) {
// FIXME Windows doesn't support syscall.Rusage. There should be another
// way to get this kind of data from the OS
diff --git a/lib/tools.go b/lib/tools.go
index 32d63258..a1e4bc73 100644
--- a/lib/tools.go
+++ b/lib/tools.go
@@ -11,6 +11,7 @@ import (
"time"
)
+// Buffer
type Buffer struct {
B []byte
original []byte
@@ -45,29 +46,35 @@ func init() {
flag.BoolVar(&ergoNoRecover, "ergo.norecover", false, "disable panic catching")
}
+// Log
func Log(f string, a ...interface{}) {
if ergoTrace {
log.Printf(f, a...)
}
}
+// CatchPanic
func CatchPanic() bool {
return ergoNoRecover == false
}
+// TakeTimer
func TakeTimer() *time.Timer {
return timers.Get().(*time.Timer)
}
+// ReleaseTimer
func ReleaseTimer(t *time.Timer) {
t.Stop()
timers.Put(t)
}
+// TakeBuffer
func TakeBuffer() *Buffer {
return buffers.Get().(*Buffer)
}
+// ReleaseBuffer
func ReleaseBuffer(b *Buffer) {
// do not return it to the pool if its grew up too big
if cap(b.B) > 65536 {
@@ -79,31 +86,38 @@ func ReleaseBuffer(b *Buffer) {
buffers.Put(b)
}
+// Reset
func (b *Buffer) Reset() {
// use the original start point of the slice
b.B = b.original[:0]
}
+// Set
func (b *Buffer) Set(v []byte) {
b.B = append(b.B[:0], v...)
}
+// AppendByte
func (b *Buffer) AppendByte(v byte) {
b.B = append(b.B, v)
}
+// Append
func (b *Buffer) Append(v []byte) {
b.B = append(b.B, v...)
}
+// String
func (b *Buffer) String() string {
return string(b.B)
}
+// Len
func (b *Buffer) Len() int {
return len(b.B)
}
+// WriteDataTo
func (b *Buffer) WriteDataTo(w io.Writer) error {
l := len(b.B)
if l == 0 {
@@ -128,6 +142,7 @@ func (b *Buffer) WriteDataTo(w io.Writer) error {
return nil
}
+// ReadDataFrom
func (b *Buffer) ReadDataFrom(r io.Reader, limit int) (int, error) {
capB := cap(b.B)
lenB := len(b.B)
@@ -156,6 +171,7 @@ func (b *Buffer) increase() {
b.B = b1
}
+// Allocate
func (b *Buffer) Allocate(n int) {
for {
if cap(b.B) < n {
@@ -167,6 +183,7 @@ func (b *Buffer) Allocate(n int) {
}
}
+// Extend
func (b *Buffer) Extend(n int) []byte {
l := len(b.B)
e := l + n
@@ -180,6 +197,7 @@ func (b *Buffer) Extend(n int) []byte {
}
}
+// RandomString
func RandomString(length int) string {
buff := make([]byte, length/2)
rand.Read(buff)
diff --git a/node/core.go b/node/core.go
new file mode 100644
index 00000000..39acb34b
--- /dev/null
+++ b/node/core.go
@@ -0,0 +1,866 @@
+package node
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ergo-services/ergo/etf"
+ "github.com/ergo-services/ergo/gen"
+ "github.com/ergo-services/ergo/lib"
+)
+
+const (
+ startPID = 1000
+)
+
+type core struct {
+ monitorInternal
+ networkInternal
+
+ ctx context.Context
+ stop context.CancelFunc
+
+ env map[gen.EnvKey]interface{}
+ mutexEnv sync.RWMutex
+
+ compression bool
+ tls TLS
+
+ nextPID uint64
+ uniqID uint64
+ nodename string
+ creation uint32
+
+ names map[string]etf.Pid
+ mutexNames sync.RWMutex
+ aliases map[etf.Alias]*process
+ mutexAliases sync.RWMutex
+ processes map[uint64]*process
+ mutexProcesses sync.RWMutex
+
+ behaviors map[string]map[string]gen.RegisteredBehavior
+ mutexBehaviors sync.Mutex
+}
+
+type coreInternal interface {
+ gen.Core
+ CoreRouter
+
+ // core environment
+ ListEnv() map[gen.EnvKey]interface{}
+ SetEnv(name gen.EnvKey, value interface{})
+ Env(name gen.EnvKey) interface{}
+
+ monitorInternal
+ networkInternal
+
+ spawn(name string, opts processOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error)
+
+ registerName(name string, pid etf.Pid) error
+ unregisterName(name string) error
+
+ newAlias(p *process) (etf.Alias, error)
+ deleteAlias(owner *process, alias etf.Alias) error
+
+ coreNodeName() string
+ coreStop()
+ coreUptime() int64
+ coreIsAlive() bool
+
+ coreWait()
+ coreWaitWithTimeout(d time.Duration) error
+}
+
+type coreRouterInternal interface {
+ CoreRouter
+ processByPid(pid etf.Pid) *process
+}
+
+func newCore(ctx context.Context, nodename string, options Options) (coreInternal, error) {
+ c := &core{
+ ctx: ctx,
+ env: options.Env,
+ nextPID: startPID,
+ uniqID: uint64(time.Now().UnixNano()),
+ // keep node to get the process to access to the node's methods
+ nodename: nodename,
+ compression: options.Compression,
+ creation: options.Creation,
+ names: make(map[string]etf.Pid),
+ aliases: make(map[etf.Alias]*process),
+ processes: make(map[uint64]*process),
+ behaviors: make(map[string]map[string]gen.RegisteredBehavior),
+ }
+
+ corectx, corestop := context.WithCancel(ctx)
+ c.stop = corestop
+ c.ctx = corectx
+
+ c.monitorInternal = newMonitor(nodename, coreRouterInternal(c))
+ network, err := newNetwork(c.ctx, nodename, options, CoreRouter(c))
+ if err != nil {
+ corestop()
+ return nil, err
+ }
+ c.networkInternal = network
+ return c, nil
+}
+
+func (c *core) coreNodeName() string {
+ return c.nodename
+}
+
+func (c *core) coreStop() {
+ c.stop()
+ c.stopNetwork()
+}
+
+func (c *core) coreUptime() int64 {
+ return time.Now().Unix() - int64(c.creation)
+}
+
+func (c *core) coreWait() {
+ <-c.ctx.Done()
+}
+
+// WaitWithTimeout waits until node stopped. Return ErrTimeout
+// if given timeout is exceeded
+func (c *core) coreWaitWithTimeout(d time.Duration) error {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+
+ select {
+ case <-timer.C:
+ return ErrTimeout
+ case <-c.ctx.Done():
+ return nil
+ }
+}
+
+// IsAlive returns true if node is running
+func (c *core) coreIsAlive() bool {
+ return c.ctx.Err() == nil
+}
+
+func (c *core) newPID() etf.Pid {
+ // http://erlang.org/doc/apps/erts/erl_ext_dist.html#pid_ext
+ // https://stackoverflow.com/questions/243363/can-someone-explain-the-structure-of-a-pid-in-erlang
+ i := atomic.AddUint64(&c.nextPID, 1)
+ return etf.Pid{
+ Node: etf.Atom(c.nodename),
+ ID: i,
+ Creation: c.creation,
+ }
+
+}
+
+// MakeRef returns atomic reference etf.Ref within this node
+func (c *core) MakeRef() (ref etf.Ref) {
+ ref.Node = etf.Atom(c.nodename)
+ ref.Creation = c.creation
+ nt := atomic.AddUint64(&c.uniqID, 1)
+ ref.ID[0] = uint32(uint64(nt) & ((2 << 17) - 1))
+ ref.ID[1] = uint32(uint64(nt) >> 46)
+ return
+}
+
+// IsAlias
+func (c *core) IsAlias(alias etf.Alias) bool {
+ c.mutexAliases.RLock()
+ _, ok := c.aliases[alias]
+ c.mutexAliases.RUnlock()
+ return ok
+}
+
+func (c *core) newAlias(p *process) (etf.Alias, error) {
+ var alias etf.Alias
+
+ // chech if its alive
+ c.mutexProcesses.RLock()
+ _, exist := c.processes[p.self.ID]
+ c.mutexProcesses.RUnlock()
+ if !exist {
+ return alias, ErrProcessUnknown
+ }
+
+ alias = etf.Alias(c.MakeRef())
+ lib.Log("[%s] CORE create process alias for %v: %s", c.nodename, p.self, alias)
+
+ c.mutexAliases.Lock()
+ c.aliases[alias] = p
+ c.mutexAliases.Unlock()
+
+ p.Lock()
+ p.aliases = append(p.aliases, alias)
+ p.Unlock()
+ return alias, nil
+}
+
+func (c *core) deleteAlias(owner *process, alias etf.Alias) error {
+ lib.Log("[%s] CORE delete process alias %v for %v", c.nodename, alias, owner.self)
+
+ c.mutexAliases.Lock()
+ p, alias_exist := c.aliases[alias]
+ c.mutexAliases.Unlock()
+
+ if alias_exist == false {
+ return ErrAliasUnknown
+ }
+
+ c.mutexProcesses.RLock()
+ _, process_exist := c.processes[owner.self.ID]
+ c.mutexProcesses.RUnlock()
+
+ if process_exist == false {
+ return ErrProcessUnknown
+ }
+ if p.self != owner.self {
+ return ErrAliasOwner
+ }
+
+ p.Lock()
+ for i := range p.aliases {
+ if alias != p.aliases[i] {
+ continue
+ }
+ // remove it from the global alias list
+ c.mutexAliases.Lock()
+ delete(c.aliases, alias)
+ c.mutexAliases.Unlock()
+ // remove it from the process alias list
+ p.aliases[i] = p.aliases[0]
+ p.aliases = p.aliases[1:]
+ p.Unlock()
+ return nil
+ }
+ p.Unlock()
+
+ // shouldn't reach this code. seems we got a bug
+ fmt.Println("Bug: Process lost its alias. Please, report this issue")
+ c.mutexAliases.Lock()
+ delete(c.aliases, alias)
+ c.mutexAliases.Unlock()
+
+ return ErrAliasUnknown
+}
+
+func (c *core) newProcess(name string, behavior gen.ProcessBehavior, opts processOptions) (*process, error) {
+
+ var processContext context.Context
+ var kill context.CancelFunc
+
+ mailboxSize := DefaultProcessMailboxSize
+ if opts.MailboxSize > 0 {
+ mailboxSize = int(opts.MailboxSize)
+ }
+
+ processContext, kill = context.WithCancel(c.ctx)
+ if opts.Context != nil {
+ processContext = context.WithValue(processContext, "context", processContext)
+ }
+
+ pid := c.newPID()
+
+ env := make(map[gen.EnvKey]interface{})
+ // inherite the node environment
+ c.mutexEnv.RLock()
+ for k, v := range c.env {
+ env[k] = v
+ }
+ c.mutexEnv.RUnlock()
+
+ // merge the custom ones
+ for k, v := range opts.Env {
+ env[k] = v
+ }
+
+ process := &process{
+ coreInternal: c,
+
+ self: pid,
+ name: name,
+ behavior: behavior,
+ env: env,
+ compression: c.compression,
+
+ parent: opts.parent,
+ groupLeader: opts.GroupLeader,
+
+ mailBox: make(chan gen.ProcessMailboxMessage, mailboxSize),
+ gracefulExit: make(chan gen.ProcessGracefulExitRequest, mailboxSize),
+ direct: make(chan gen.ProcessDirectMessage),
+
+ context: processContext,
+ kill: kill,
+
+ reply: make(map[etf.Ref]chan etf.Term),
+ }
+
+ process.exit = func(from etf.Pid, reason string) error {
+ lib.Log("[%s] EXIT from %s to %s with reason: %s", c.nodename, from, pid, reason)
+ if processContext.Err() != nil {
+ // process is already died
+ return ErrProcessUnknown
+ }
+
+ ex := gen.ProcessGracefulExitRequest{
+ From: from,
+ Reason: reason,
+ }
+
+ // use select just in case if this process isn't been started yet
+ // or ProcessLoop is already exited (has been set to nil)
+ // otherwise it cause infinity lock
+ select {
+ case process.gracefulExit <- ex:
+ default:
+ return ErrProcessBusy
+ }
+
+ // let the process decide whether to stop itself, otherwise its going to be killed
+ if !process.trapExit {
+ process.kill()
+ }
+ return nil
+ }
+
+ if name != "" {
+ lib.Log("[%s] CORE registering name (%s): %s", c.nodename, pid, name)
+ c.mutexNames.Lock()
+ if _, exist := c.names[name]; exist {
+ c.mutexNames.Unlock()
+ return nil, ErrTaken
+ }
+ c.names[name] = process.self
+ c.mutexNames.Unlock()
+ }
+
+ lib.Log("[%s] CORE registering process: %s", c.nodename, pid)
+ c.mutexProcesses.Lock()
+ c.processes[process.self.ID] = process
+ c.mutexProcesses.Unlock()
+
+ return process, nil
+}
+
+func (c *core) deleteProcess(pid etf.Pid) {
+ c.mutexProcesses.Lock()
+ p, exist := c.processes[pid.ID]
+ if !exist {
+ c.mutexProcesses.Unlock()
+ return
+ }
+ lib.Log("[%s] CORE unregistering process: %s", c.nodename, p.self)
+ delete(c.processes, pid.ID)
+ c.mutexProcesses.Unlock()
+
+ c.mutexNames.Lock()
+ if (p.name) != "" {
+ lib.Log("[%s] CORE unregistering name (%s): %s", c.nodename, p.self, p.name)
+ delete(c.names, p.name)
+ }
+
+ // delete names registered with this pid
+ for name, pid := range c.names {
+ if p.self == pid {
+ delete(c.names, name)
+ }
+ }
+ c.mutexNames.Unlock()
+
+ c.mutexAliases.Lock()
+ for alias := range c.aliases {
+ delete(c.aliases, alias)
+ }
+ c.mutexAliases.Unlock()
+
+ return
+}
+
+func (c *core) spawn(name string, opts processOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) {
+
+ process, err := c.newProcess(name, behavior, opts)
+ if err != nil {
+ return nil, err
+ }
+ lib.Log("[%s] CORE spawn a new process %s (registered name: %q)", c.nodename, process.self, name)
+
+ initProcess := func() (ps gen.ProcessState, err error) {
+ if lib.CatchPanic() {
+ defer func() {
+ if rcv := recover(); rcv != nil {
+ pc, fn, line, _ := runtime.Caller(2)
+ fmt.Printf("Warning: initialization process failed %s[%q] %#v at %s[%s:%d]\n",
+ process.self, name, rcv, runtime.FuncForPC(pc).Name(), fn, line)
+ c.deleteProcess(process.self)
+ err = fmt.Errorf("panic")
+ }
+ }()
+ }
+
+ ps, err = behavior.ProcessInit(process, args...)
+ return
+ }
+
+ processState, err := initProcess()
+ if err != nil {
+ return nil, err
+ }
+
+ started := make(chan bool)
+ defer close(started)
+
+ cleanProcess := func(reason string) {
+ // set gracefulExit to nil before we start termination handling
+ process.gracefulExit = nil
+ c.deleteProcess(process.self)
+ // invoke cancel context to prevent memory leaks
+ // and propagate context canelation
+ process.Kill()
+ // notify all the linked process and monitors
+ c.handleTerminated(process.self, name, reason)
+ // make the rest empty
+ process.Lock()
+ process.aliases = []etf.Alias{}
+
+ // Do not clean self and name. Sometimes its good to know what pid
+ // (and what name) was used by the dead process. (gen.Applications is using it)
+ // process.name = ""
+ // process.self = etf.Pid{}
+
+ process.behavior = nil
+ process.parent = nil
+ process.groupLeader = nil
+ process.exit = nil
+ process.kill = nil
+ process.mailBox = nil
+ process.direct = nil
+ process.env = nil
+ process.reply = nil
+ process.Unlock()
+ }
+
+ go func(ps gen.ProcessState) {
+ if lib.CatchPanic() {
+ defer func() {
+ if rcv := recover(); rcv != nil {
+ pc, fn, line, _ := runtime.Caller(2)
+ fmt.Printf("Warning: process terminated %s[%q] %#v at %s[%s:%d]\n",
+ process.self, name, rcv, runtime.FuncForPC(pc).Name(), fn, line)
+ cleanProcess("panic")
+ }
+ }()
+ }
+
+ // start process loop
+ reason := behavior.ProcessLoop(ps, started)
+ // process stopped
+ cleanProcess(reason)
+
+ }(processState)
+
+ // wait for the starting process loop
+ <-started
+ return process, nil
+}
+
+func (c *core) registerName(name string, pid etf.Pid) error {
+ lib.Log("[%s] CORE registering name %s", c.nodename, name)
+ c.mutexNames.Lock()
+ defer c.mutexNames.Unlock()
+ if _, ok := c.names[name]; ok {
+ // already registered
+ return ErrTaken
+ }
+ c.names[name] = pid
+ return nil
+}
+
+func (c *core) unregisterName(name string) error {
+ lib.Log("[%s] CORE unregistering name %s", c.nodename, name)
+ c.mutexNames.Lock()
+ defer c.mutexNames.Unlock()
+ if _, ok := c.names[name]; ok {
+ delete(c.names, name)
+ return nil
+ }
+ return ErrNameUnknown
+}
+
+// ListEnv
+func (c *core) ListEnv() map[gen.EnvKey]interface{} {
+ c.mutexEnv.RLock()
+ defer c.mutexEnv.RUnlock()
+
+ env := make(map[gen.EnvKey]interface{})
+ for key, value := range c.env {
+ env[key] = value
+ }
+
+ return env
+}
+
+// SetEnv
+func (c *core) SetEnv(name gen.EnvKey, value interface{}) {
+ c.mutexEnv.Lock()
+ defer c.mutexEnv.Unlock()
+ if strings.HasPrefix(string(name), "ergo:") {
+ return
+ }
+ c.env[name] = value
+}
+
+// Env
+func (c *core) Env(name gen.EnvKey) interface{} {
+ c.mutexEnv.RLock()
+ defer c.mutexEnv.RUnlock()
+ if value, ok := c.env[name]; ok {
+ return value
+ }
+ return nil
+}
+
+// RegisterBehavior
+func (c *core) RegisterBehavior(group, name string, behavior gen.ProcessBehavior, data interface{}) error {
+ lib.Log("[%s] CORE registering behavior %q in group %q ", c.nodename, name, group)
+ var groupBehaviors map[string]gen.RegisteredBehavior
+ var exist bool
+
+ c.mutexBehaviors.Lock()
+ defer c.mutexBehaviors.Unlock()
+
+ groupBehaviors, exist = c.behaviors[group]
+ if !exist {
+ groupBehaviors = make(map[string]gen.RegisteredBehavior)
+ c.behaviors[group] = groupBehaviors
+ }
+
+ _, exist = groupBehaviors[name]
+ if exist {
+ return ErrTaken
+ }
+
+ rb := gen.RegisteredBehavior{
+ Behavior: behavior,
+ Data: data,
+ }
+ groupBehaviors[name] = rb
+ return nil
+}
+
+// RegisteredBehavior
+func (c *core) RegisteredBehavior(group, name string) (gen.RegisteredBehavior, error) {
+ var groupBehaviors map[string]gen.RegisteredBehavior
+ var rb gen.RegisteredBehavior
+ var exist bool
+
+ c.mutexBehaviors.Lock()
+ defer c.mutexBehaviors.Unlock()
+
+ groupBehaviors, exist = c.behaviors[group]
+ if !exist {
+ return rb, ErrBehaviorGroupUnknown
+ }
+
+ rb, exist = groupBehaviors[name]
+ if !exist {
+ return rb, ErrBehaviorUnknown
+ }
+ return rb, nil
+}
+
+// RegisteredBehaviorGroup
+func (c *core) RegisteredBehaviorGroup(group string) []gen.RegisteredBehavior {
+ var groupBehaviors map[string]gen.RegisteredBehavior
+ var exist bool
+ var listrb []gen.RegisteredBehavior
+
+ c.mutexBehaviors.Lock()
+ defer c.mutexBehaviors.Unlock()
+
+ groupBehaviors, exist = c.behaviors[group]
+ if !exist {
+ return listrb
+ }
+
+ for _, v := range groupBehaviors {
+ listrb = append(listrb, v)
+ }
+ return listrb
+}
+
+// UnregisterBehavior
+func (c *core) UnregisterBehavior(group, name string) error {
+ lib.Log("[%s] CORE unregistering behavior %s in group %s ", c.nodename, name, group)
+ var groupBehaviors map[string]gen.RegisteredBehavior
+ var exist bool
+
+ c.mutexBehaviors.Lock()
+ defer c.mutexBehaviors.Unlock()
+
+ groupBehaviors, exist = c.behaviors[group]
+ if !exist {
+ return ErrBehaviorUnknown
+ }
+ delete(groupBehaviors, name)
+
+ // remove group if its empty
+ if len(groupBehaviors) == 0 {
+ delete(c.behaviors, group)
+ }
+ return nil
+}
+
+// ProcessInfo
+func (c *core) ProcessInfo(pid etf.Pid) (gen.ProcessInfo, error) {
+ p := c.ProcessByPid(pid)
+ if p == nil {
+ return gen.ProcessInfo{}, fmt.Errorf("undefined")
+ }
+
+ return p.Info(), nil
+}
+
+// ProcessByPid
+func (c *core) ProcessByPid(pid etf.Pid) gen.Process {
+ p := c.processByPid(pid)
+ if p == nil {
+ return nil
+ }
+ return p
+}
+
+// ProcessByAlias
+func (c *core) ProcessByAlias(alias etf.Alias) gen.Process {
+ c.mutexAliases.RLock()
+ defer c.mutexAliases.RUnlock()
+ if p, ok := c.aliases[alias]; ok && p.IsAlive() {
+ return p
+ }
+ // unknown process
+ return nil
+}
+
+// ProcessByName
+func (c *core) ProcessByName(name string) gen.Process {
+ var pid etf.Pid
+ if name != "" {
+ // requesting Process by name
+ c.mutexNames.RLock()
+
+ if p, ok := c.names[name]; ok {
+ pid = p
+ } else {
+ c.mutexNames.RUnlock()
+ return nil
+ }
+ c.mutexNames.RUnlock()
+ }
+
+ return c.ProcessByPid(pid)
+}
+
+// ProcessList
+func (c *core) ProcessList() []gen.Process {
+ list := []gen.Process{}
+ c.mutexProcesses.RLock()
+ for _, p := range c.processes {
+ list = append(list, p)
+ }
+ c.mutexProcesses.RUnlock()
+ return list
+}
+
+//
+// implementation of CoreRouter interface:
+// RouteSend
+// RouteSendReg
+// RouteSendAlias
+//
+
+// RouteSend implements RouteSend method of Router interface
+func (c *core) RouteSend(from etf.Pid, to etf.Pid, message etf.Term) error {
+ if string(to.Node) == c.nodename {
+ if to.Creation != c.creation {
+ // message is addressed to the previous incarnation of this PID
+ return ErrProcessIncarnation
+ }
+ // local route
+ c.mutexProcesses.RLock()
+ p, exist := c.processes[to.ID]
+ c.mutexProcesses.RUnlock()
+ if !exist {
+ lib.Log("[%s] CORE route message by pid (local) %s failed. Unknown process", c.nodename, to)
+ return ErrProcessUnknown
+ }
+ lib.Log("[%s] CORE route message by pid (local) %s", c.nodename, to)
+ select {
+ case p.mailBox <- gen.ProcessMailboxMessage{From: from, Message: message}:
+ default:
+ return fmt.Errorf("WARNING! mailbox of %s is full. dropped message from %s", p.Self(), from)
+ }
+ return nil
+ }
+
+ // do not allow to send from the alien node. Proxy request must be used.
+ if string(from.Node) != c.nodename {
+ return ErrSenderUnknown
+ }
+
+ // sending to remote node
+ c.mutexProcesses.RLock()
+ p_from, exist := c.processes[from.ID]
+ c.mutexProcesses.RUnlock()
+ if !exist {
+ lib.Log("[%s] CORE route message by pid (remote) %s failed. Unknown sender", c.nodename, to)
+ return ErrSenderUnknown
+ }
+ connection, err := c.GetConnection(string(to.Node))
+ if err != nil {
+ return err
+ }
+
+ lib.Log("[%s] CORE route message by pid (remote) %s", c.nodename, to)
+ return connection.Send(p_from, to, message)
+}
+
+// RouteSendReg implements RouteSendReg method of Router interface
+func (c *core) RouteSendReg(from etf.Pid, to gen.ProcessID, message etf.Term) error {
+ if to.Node == c.nodename {
+ // local route
+ c.mutexNames.RLock()
+ pid, ok := c.names[to.Name]
+ c.mutexNames.RUnlock()
+ if !ok {
+ lib.Log("[%s] CORE route message by gen.ProcessID (local) %s failed. Unknown process", c.nodename, to)
+ return ErrProcessUnknown
+ }
+ lib.Log("[%s] CORE route message by gen.ProcessID (local) %s", c.nodename, to)
+ return c.RouteSend(from, pid, message)
+ }
+
+ // do not allow to send from the alien node. Proxy request must be used.
+ if string(from.Node) != c.nodename {
+ return ErrSenderUnknown
+ }
+
+ // send to remote node
+ c.mutexProcesses.RLock()
+ p_from, exist := c.processes[from.ID]
+ c.mutexProcesses.RUnlock()
+ if !exist {
+ lib.Log("[%s] CORE route message by gen.ProcessID (remote) %s failed. Unknown sender", c.nodename, to)
+ return ErrSenderUnknown
+ }
+ connection, err := c.GetConnection(string(to.Node))
+ if err != nil {
+ return err
+ }
+
+ lib.Log("[%s] CORE route message by gen.ProcessID (remote) %s", c.nodename, to)
+ return connection.SendReg(p_from, to, message)
+}
+
+// RouteSendAlias implements RouteSendAlias method of Router interface
+func (c *core) RouteSendAlias(from etf.Pid, to etf.Alias, message etf.Term) error {
+
+ if string(to.Node) == c.nodename {
+ // local route by alias
+ c.mutexAliases.RLock()
+ process, ok := c.aliases[to]
+ c.mutexAliases.RUnlock()
+ if !ok {
+ lib.Log("[%s] CORE route message by alias (local) %s failed. Unknown process", c.nodename, to)
+ return ErrProcessUnknown
+ }
+ lib.Log("[%s] CORE route message by alias (local) %s", c.nodename, to)
+ return c.RouteSend(from, process.self, message)
+ }
+
+ // do not allow to send from the alien node. Proxy request must be used.
+ if string(from.Node) != c.nodename {
+ return ErrSenderUnknown
+ }
+
+ // send to remote node
+ c.mutexProcesses.RLock()
+ p_from, exist := c.processes[from.ID]
+ c.mutexProcesses.RUnlock()
+ if !exist {
+ lib.Log("[%s] CORE route message by alias (remote) %s failed. Unknown sender", c.nodename, to)
+ return ErrSenderUnknown
+ }
+ connection, err := c.GetConnection(string(to.Node))
+ if err != nil {
+ return err
+ }
+
+ lib.Log("[%s] CORE route message by alias (remote) %s", c.nodename, to)
+ return connection.SendAlias(p_from, to, message)
+}
+
+// RouteProxy
+func (c *core) RouteProxy() error {
+ // FIXME
+ return nil
+}
+
+// RouteSpawnRequest
+func (c *core) RouteSpawnRequest(node string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error {
+ if node == c.nodename {
+ // get connection for reply
+ connection, err := c.GetConnection(string(request.From.Node))
+ if err != nil {
+ return err
+ }
+
+ // check if we have registered behavior with given name
+ b, err := c.RegisteredBehavior(remoteBehaviorGroup, behaviorName)
+ if err != nil {
+ return connection.SpawnReplyError(request.From, request.Ref, err)
+ }
+
+ // spawn new process
+ process_opts := processOptions{}
+ process_opts.Env = map[gen.EnvKey]interface{}{EnvKeyRemoteSpawn: request.Options}
+ process, err_spawn := c.spawn(request.Options.Name, process_opts, b.Behavior, args...)
+
+ // reply
+ if err_spawn != nil {
+ return connection.SpawnReplyError(request.From, request.Ref, err_spawn)
+ }
+ return connection.SpawnReply(request.From, request.Ref, process.Self())
+ }
+
+ connection, err := c.GetConnection(node)
+ if err != nil {
+ return err
+ }
+ return connection.SpawnRequest(behaviorName, request, args...)
+}
+
+// RouteSpawnReply
+func (c *core) RouteSpawnReply(to etf.Pid, ref etf.Ref, result etf.Term) error {
+ process := c.processByPid(to)
+ if process == nil {
+ // seems process terminated
+ return ErrProcessTerminated
+ }
+ process.PutSyncReply(ref, result)
+ return nil
+}
+
+func (c *core) processByPid(pid etf.Pid) *process {
+ c.mutexProcesses.RLock()
+ defer c.mutexProcesses.RUnlock()
+ if p, ok := c.processes[pid.ID]; ok && p.IsAlive() {
+ return p
+ }
+ // unknown process
+ return nil
+}
diff --git a/node/dist/dist.go b/node/dist/dist.go
deleted file mode 100644
index 18896186..00000000
--- a/node/dist/dist.go
+++ /dev/null
@@ -1,1552 +0,0 @@
-package dist
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/md5"
- "encoding/binary"
- "fmt"
- "io"
- "math/rand"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/ergo-services/ergo/etf"
- "github.com/ergo-services/ergo/lib"
-)
-
-var (
- ErrMissingInCache = fmt.Errorf("Missing in cache")
- ErrMalformed = fmt.Errorf("Malformed")
-)
-
-func init() {
- rand.Seed(time.Now().UTC().UnixNano())
-}
-
-type flagId uint64
-type nodeFlag flagId
-
-const (
- defaultLatency = 200 * time.Nanosecond // for linkFlusher
-
- defaultCleanTimeout = 5 * time.Second // for checkClean
- defaultCleanDeadline = 30 * time.Second // for checkClean
-
- // http://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution_header
- protoDist = 131
- protoDistCompressed = 80
- protoDistMessage = 68
- protoDistFragment1 = 69
- protoDistFragmentN = 70
-
- ProtoHandshake5 = 5
- ProtoHandshake6 = 6
-
- // distribution flags are defined here https://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-flags
- PUBLISHED flagId = 0x1
- ATOM_CACHE = 0x2
- EXTENDED_REFERENCES = 0x4
- DIST_MONITOR = 0x8
- FUN_TAGS = 0x10
- DIST_MONITOR_NAME = 0x20
- HIDDEN_ATOM_CACHE = 0x40
- NEW_FUN_TAGS = 0x80
- EXTENDED_PIDS_PORTS = 0x100
- EXPORT_PTR_TAG = 0x200
- BIT_BINARIES = 0x400
- NEW_FLOATS = 0x800
- UNICODE_IO = 0x1000
- DIST_HDR_ATOM_CACHE = 0x2000
- SMALL_ATOM_TAGS = 0x4000
- UTF8_ATOMS = 0x10000
- MAP_TAG = 0x20000
- BIG_CREATION = 0x40000
- SEND_SENDER = 0x80000 // since OTP.21 enable replacement for SEND (distProtoSEND by distProtoSEND_SENDER)
- BIG_SEQTRACE_LABELS = 0x100000
- EXIT_PAYLOAD = 0x400000 // since OTP.22 enable replacement for EXIT, EXIT2, MONITOR_P_EXIT
- FRAGMENTS = 0x800000
- HANDSHAKE23 = 0x1000000 // new connection setup handshake (version 6) introduced in OTP 23
- UNLINK_ID = 0x2000000
- // for 64bit flags
- SPAWN = 1 << 32
- NAME_ME = 1 << 33
- V4_NC = 1 << 34
- ALIAS = 1 << 35
-)
-
-type HandshakeOptions struct {
- Version int // 5 or 6
- Name string
- Cookie string
- TLS bool
- Hidden bool
- Creation uint32
-}
-
-func (nf nodeFlag) toUint32() uint32 {
- return uint32(nf)
-}
-
-func (nf nodeFlag) toUint64() uint64 {
- return uint64(nf)
-}
-
-func (nf nodeFlag) isSet(f flagId) bool {
- return (uint64(nf) & uint64(f)) != 0
-}
-
-func toNodeFlag(f ...flagId) nodeFlag {
- var flags uint64
- for _, v := range f {
- flags |= uint64(v)
- }
- return nodeFlag(flags)
-}
-
-type fragmentedPacket struct {
- buffer *lib.Buffer
- disordered *lib.Buffer
- disorderedSlices map[uint64][]byte
- fragmentID uint64
- lastUpdate time.Time
-}
-
-type Link struct {
- Name string
- Cookie string
- Hidden bool
- peer *Link
- conn net.Conn
- challenge uint32
- flags nodeFlag
- version uint16
- creation uint32
- digest []byte
-
- // writer
- flusher *linkFlusher
-
- // atom cache for incomming messages
- cacheIn [2048]*etf.Atom
- cacheInMutex sync.Mutex
-
- // atom cache for outgoing messages
- cacheOut *etf.AtomCache
-
- // fragmentation sequence ID
- sequenceID int64
- fragments map[uint64]*fragmentedPacket
- fragmentsMutex sync.Mutex
-
- // check and clean lost fragments
- checkCleanPending bool
- checkCleanTimer *time.Timer
- checkCleanTimeout time.Duration // default is 5 seconds
- checkCleanDeadline time.Duration // how long we wait for the next fragment of the certain sequenceID. Default is 30 seconds
-}
-
-func (l *Link) GetPeerName() string {
- if l.peer == nil {
- return ""
- }
-
- return l.peer.Name
-}
-
-func newLinkFlusher(w io.Writer, latency time.Duration) *linkFlusher {
- return &linkFlusher{
- latency: latency,
- writer: bufio.NewWriter(w),
- w: w, // in case if we skip buffering
- }
-}
-
-type linkFlusher struct {
- mutex sync.Mutex
- latency time.Duration
- writer *bufio.Writer
- w io.Writer
-
- timer *time.Timer
- pending bool
-}
-
-func (lf *linkFlusher) Write(b []byte) (int, error) {
- lf.mutex.Lock()
- defer lf.mutex.Unlock()
-
- l := len(b)
- lenB := l
-
- // long data write directly to the socket.
- if l > 64000 {
- for {
- n, e := lf.w.Write(b[lenB-l:])
- if e != nil {
- return n, e
- }
- // check if something left
- l -= n
- if l > 0 {
- continue
- }
- return lenB, nil
- }
- }
-
- // write data to the buffer
- for {
- n, e := lf.writer.Write(b)
- if e != nil {
- return n, e
- }
- // check if something left
- l -= n
- if l > 0 {
- continue
- }
- break
- }
-
- if lf.pending {
- return lenB, nil
- }
-
- lf.pending = true
-
- if lf.timer != nil {
- lf.timer.Reset(lf.latency)
- return lenB, nil
- }
-
- lf.timer = time.AfterFunc(lf.latency, func() {
- // KeepAlive packet is just 4 bytes with zero value
- var keepAlivePacket = []byte{0, 0, 0, 0}
-
- lf.mutex.Lock()
- defer lf.mutex.Unlock()
-
- // if we have no pending data to send we should
- // send a KeepAlive packet
- if !lf.pending {
- lf.w.Write(keepAlivePacket)
- return
- }
-
- lf.writer.Flush()
- lf.pending = false
- })
-
- return lenB, nil
-
-}
-
-func Handshake(conn net.Conn, options HandshakeOptions) (*Link, error) {
-
- link := &Link{
- Name: options.Name,
- Cookie: options.Cookie,
- Hidden: options.Hidden,
-
- flags: toNodeFlag(PUBLISHED, UNICODE_IO, DIST_MONITOR, DIST_MONITOR_NAME,
- EXTENDED_PIDS_PORTS, EXTENDED_REFERENCES, ATOM_CACHE,
- DIST_HDR_ATOM_CACHE, HIDDEN_ATOM_CACHE, NEW_FUN_TAGS,
- SMALL_ATOM_TAGS, UTF8_ATOMS, MAP_TAG,
- FRAGMENTS, HANDSHAKE23, BIG_CREATION, SPAWN, V4_NC, ALIAS,
- ),
-
- conn: conn,
- sequenceID: time.Now().UnixNano(),
- version: uint16(options.Version),
- creation: options.Creation,
- }
-
- b := lib.TakeBuffer()
- defer lib.ReleaseBuffer(b)
-
- var await []byte
-
- if options.Version == ProtoHandshake5 {
- link.composeName(b, options.TLS)
- // the next message must be send_status 's' or send_challenge 'n' (for
- // handshake version 5) or 'N' (for handshake version 6)
- await = []byte{'s', 'n', 'N'}
- } else {
- link.composeNameVersion6(b, options.TLS)
- await = []byte{'s', 'N'}
- }
- if e := b.WriteDataTo(conn); e != nil {
- return nil, e
- }
-
- // define timeout for the handshaking
- timer := time.NewTimer(5 * time.Second)
- defer timer.Stop()
-
- asyncReadChannel := make(chan error, 2)
- asyncRead := func() {
- _, e := b.ReadDataFrom(conn, 512)
- asyncReadChannel <- e
- }
-
- // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake
- // Every message in the handshake starts with a 16-bit big-endian integer,
- // which contains the message length (not counting the two initial bytes).
- // In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice
- // that after the handshake, the distribution switches to 4 byte packet headers.
- expectingBytes := 2
- if options.TLS {
- // TLS connection has 4 bytes packet length header
- expectingBytes = 4
- }
-
- for {
- go asyncRead()
-
- select {
- case <-timer.C:
- return nil, fmt.Errorf("handshake timeout")
-
- case e := <-asyncReadChannel:
- if e != nil {
- return nil, e
- }
-
- next:
- l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes])
- buffer := b.B[expectingBytes:]
-
- if len(buffer) < int(l) {
- return nil, fmt.Errorf("malformed handshake (wrong packet length)")
- }
-
- // chech if we got correct message type regarding to 'await' value
- if bytes.Count(await, buffer[0:1]) == 0 {
- return nil, fmt.Errorf("malformed handshake (wrong response)")
- }
-
- switch buffer[0] {
- case 'n':
- // 'n' + 2 (version) + 4 (flags) + 4 (challenge) + name...
- if len(b.B) < 12 {
- return nil, fmt.Errorf("malformed handshake ('n')")
- }
-
- challenge := link.readChallenge(b.B[1:])
- if challenge == 0 {
- return nil, fmt.Errorf("malformed handshake (mismatch handshake version")
- }
- b.Reset()
-
- link.composeChallengeReply(b, challenge, options.TLS)
-
- if e := b.WriteDataTo(conn); e != nil {
- return nil, e
- }
- // add 's' status for the case if we got it after 'n' or 'N' message
- await = []byte{'s', 'a'}
-
- case 'N':
- // Peer support version 6.
-
- // The new challenge message format (version 6)
- // 8 (flags) + 4 (Creation) + 2 (NameLen) + Name
- if len(buffer) < 16 {
- return nil, fmt.Errorf("malformed handshake ('N' length)")
- }
- challenge := link.readChallengeVersion6(buffer[1:])
- b.Reset()
-
- if link.version == ProtoHandshake5 {
- // send complement message
- link.composeComplement(b, options.TLS)
- if e := b.WriteDataTo(conn); e != nil {
- return nil, e
- }
- link.version = ProtoHandshake6
- }
-
- link.composeChallengeReply(b, challenge, options.TLS)
-
- if e := b.WriteDataTo(conn); e != nil {
- return nil, e
- }
-
- // add 's' (send_status message) for the case if we got it after 'n' or 'N' message
- await = []byte{'s', 'a'}
-
- case 'a':
- // 'a' + 16 (digest)
- if len(buffer) != 17 {
- return nil, fmt.Errorf("malformed handshake ('a' length of digest)")
- }
-
- // 'a' + 16 (digest)
- digest := genDigest(link.peer.challenge, link.Cookie)
- if bytes.Compare(buffer[1:17], digest) != 0 {
- return nil, fmt.Errorf("malformed handshake ('a' digest)")
- }
-
- // handshaked
- link.flusher = newLinkFlusher(link.conn, defaultLatency)
- return link, nil
-
- case 's':
- if link.readStatus(buffer[1:]) == false {
- return nil, fmt.Errorf("handshake negotiation failed")
- }
-
- await = []byte{'n', 'N'}
- // "sok"
- if len(buffer) > 4 {
- b.B = b.B[expectingBytes+3:]
- goto next
- }
- b.Reset()
-
- default:
- return nil, fmt.Errorf("malformed handshake ('%c' digest)", buffer[0])
- }
-
- }
-
- }
-
-}
-
-func HandshakeAccept(conn net.Conn, options HandshakeOptions) (*Link, error) {
- link := &Link{
- Name: options.Name,
- Cookie: options.Cookie,
- Hidden: options.Hidden,
-
- flags: toNodeFlag(PUBLISHED, UNICODE_IO, DIST_MONITOR, DIST_MONITOR_NAME,
- EXTENDED_PIDS_PORTS, EXTENDED_REFERENCES, ATOM_CACHE,
- DIST_HDR_ATOM_CACHE, HIDDEN_ATOM_CACHE, NEW_FUN_TAGS,
- SMALL_ATOM_TAGS, UTF8_ATOMS, MAP_TAG,
- FRAGMENTS, HANDSHAKE23, BIG_CREATION, SPAWN, V4_NC, ALIAS,
- ),
-
- conn: conn,
- sequenceID: time.Now().UnixNano(),
- challenge: rand.Uint32(),
- version: ProtoHandshake6,
- creation: options.Creation,
- }
-
- b := lib.TakeBuffer()
- defer lib.ReleaseBuffer(b)
-
- var await []byte
-
- // define timeout for the handshaking
- timer := time.NewTimer(5 * time.Second)
- defer timer.Stop()
-
- asyncReadChannel := make(chan error, 2)
- asyncRead := func() {
- _, e := b.ReadDataFrom(conn, 512)
- asyncReadChannel <- e
- }
-
- // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake
- // Every message in the handshake starts with a 16-bit big-endian integer,
- // which contains the message length (not counting the two initial bytes).
- // In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice
- // that after the handshake, the distribution switches to 4 byte packet headers.
- expectingBytes := 2
- if options.TLS {
- // TLS connection has 4 bytes packet length header
- expectingBytes = 4
- }
-
- // the comming message must be 'receive_name' as an answer for the
- // 'send_name' message request we just sent
- await = []byte{'n', 'N'}
-
- for {
- go asyncRead()
-
- select {
- case <-timer.C:
- return nil, fmt.Errorf("handshake accept timeout")
- case e := <-asyncReadChannel:
- if e != nil {
- return nil, e
- }
-
- if b.Len() < expectingBytes+1 {
- return nil, fmt.Errorf("malformed handshake (too short packet)")
- }
-
- next:
- l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes])
- buffer := b.B[expectingBytes:]
-
- if len(buffer) < int(l) {
- return nil, fmt.Errorf("malformed handshake (wrong packet length)")
- }
-
- if bytes.Count(await, buffer[0:1]) == 0 {
- return nil, fmt.Errorf("malformed handshake (wrong response %d)", buffer[0])
- }
-
- switch buffer[0] {
- case 'n':
- if len(buffer) < 8 {
- return nil, fmt.Errorf("malformed handshake ('n' length)")
- }
-
- link.peer = link.readName(buffer[1:])
- b.Reset()
- link.composeStatus(b, options.TLS)
- if e := b.WriteDataTo(conn); e != nil {
- return nil, fmt.Errorf("malformed handshake ('n' accept name)")
- }
-
- b.Reset()
- if link.peer.flags.isSet(HANDSHAKE23) {
- link.composeChallengeVersion6(b, options.TLS)
- await = []byte{'s', 'r', 'c'}
- } else {
- link.version = ProtoHandshake5
- link.composeChallenge(b, options.TLS)
- await = []byte{'s', 'r'}
- }
- if e := b.WriteDataTo(conn); e != nil {
- return nil, e
- }
-
- case 'N':
- // The new challenge message format (version 6)
- // 8 (flags) + 4 (Creation) + 2 (NameLen) + Name
- if len(buffer) < 16 {
- return nil, fmt.Errorf("malformed handshake ('N' length)")
- }
- link.peer = link.readNameVersion6(buffer[1:])
- b.Reset()
- link.composeStatus(b, options.TLS)
- if e := b.WriteDataTo(conn); e != nil {
- return nil, fmt.Errorf("malformed handshake ('N' accept name)")
- }
-
- b.Reset()
- link.composeChallengeVersion6(b, options.TLS)
- if e := b.WriteDataTo(conn); e != nil {
- return nil, e
- }
-
- await = []byte{'s', 'r'}
-
- case 'c':
- if len(buffer) < 9 {
- return nil, fmt.Errorf("malformed handshake ('c' length)")
- }
- link.readComplement(buffer[1:])
-
- await = []byte{'r'}
-
- if len(buffer) > 9 {
- b.B = b.B[expectingBytes+9:]
- goto next
- }
- b.Reset()
-
- case 'r':
- if len(buffer) < 19 {
- return nil, fmt.Errorf("malformed handshake ('r' length)")
- }
-
- if !link.validateChallengeReply(buffer[1:]) {
- return nil, fmt.Errorf("malformed handshake ('r' invalid reply)")
- }
- b.Reset()
-
- link.composeChallengeAck(b, options.TLS)
- if e := b.WriteDataTo(conn); e != nil {
- return nil, e
- }
-
- // handshaked
- link.flusher = newLinkFlusher(link.conn, defaultLatency)
-
- return link, nil
-
- case 's':
- if link.readStatus(buffer[1:]) == false {
- return nil, fmt.Errorf("link status !ok")
- }
-
- await = []byte{'c', 'r'}
- if len(buffer) > 4 {
- b.B = b.B[expectingBytes+3:]
- goto next
- }
- b.Reset()
-
- default:
- return nil, fmt.Errorf("malformed handshake (unknown code %d)", b.B[0])
- }
-
- }
-
- }
-}
-
-func (l *Link) Close() {
- if l.conn != nil {
- l.conn.Close()
- }
-}
-
-func (l *Link) PeerName() string {
- if l.peer != nil {
- return l.peer.Name
- }
- return ""
-}
-
-func (l *Link) Read(b *lib.Buffer) (int, error) {
- // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#protocol-between-connected-nodes
- expectingBytes := 4
-
- for {
- if b.Len() < expectingBytes {
- n, e := b.ReadDataFrom(l.conn, 0)
- if n == 0 {
- // link was closed
- return 0, nil
- }
-
- if e != nil && e != io.EOF {
- // something went wrong
- return 0, e
- }
-
- // check onemore time if we should read more data
- continue
- }
-
- packetLength := binary.BigEndian.Uint32(b.B[:4])
- if packetLength == 0 {
- // keepalive
- l.conn.Write(b.B[:4])
- b.Set(b.B[4:])
-
- expectingBytes = 4
- continue
- }
-
- if b.Len() < int(packetLength)+4 {
- expectingBytes = int(packetLength) + 4
- continue
- }
-
- return int(packetLength) + 4, nil
- }
-
-}
-
-type deferrMissing struct {
- b *lib.Buffer
- c int
-}
-
-func (l *Link) ReadHandlePacket(ctx context.Context, recv chan *lib.Buffer,
- handler func(string, etf.Term, etf.Term) error) {
- var b *lib.Buffer
- var missing deferrMissing
- var Timeout <-chan time.Time
-
- deferrChannel := make(chan deferrMissing, 100)
- defer close(deferrChannel)
-
- timer := lib.TakeTimer()
- defer lib.ReleaseTimer(timer)
-
- dChannel := deferrChannel
-
- for {
- select {
- case missing = <-dChannel:
- b = missing.b
- default:
- if len(deferrChannel) > 0 {
- timer.Reset(150 * time.Millisecond)
- Timeout = timer.C
- } else {
- Timeout = nil
- }
- select {
- case b = <-recv:
- if b == nil {
- // channel was closed
- return
- }
- case <-Timeout:
- dChannel = deferrChannel
- continue
- }
- }
-
- // read and decode received packet
- control, message, err := l.ReadPacket(b.B)
-
- if err == ErrMissingInCache {
- if b == missing.b && missing.c > 100 {
- fmt.Println("Error: Disordered data at the link with", l.PeerName(), ". Close connection")
- l.Close()
- lib.ReleaseBuffer(b)
- return
- }
-
- if b == missing.b {
- missing.c++
- } else {
- missing.b = b
- missing.c = 0
- }
-
- select {
- case deferrChannel <- missing:
- // read recv channel
- dChannel = nil
- continue
- default:
- fmt.Println("Error: Mess at the link with", l.PeerName(), ". Close connection")
- l.Close()
- lib.ReleaseBuffer(b)
- return
- }
- }
-
- dChannel = deferrChannel
-
- if err != nil {
- fmt.Println("Malformed Dist proto at the link with", l.PeerName(), err)
- l.Close()
- lib.ReleaseBuffer(b)
- return
- }
-
- if control == nil {
- // fragment
- continue
- }
-
- // handle message
- if err := handler(l.peer.Name, control, message); err != nil {
- fmt.Printf("Malformed Control packet at the link with %s: %#v\n", l.PeerName(), control)
- l.Close()
- lib.ReleaseBuffer(b)
- return
- }
-
- // we have to release this buffer
- lib.ReleaseBuffer(b)
-
- }
-}
-
-func (l *Link) ReadPacket(packet []byte) (etf.Term, etf.Term, error) {
- if len(packet) < 5 {
- return nil, nil, fmt.Errorf("malformed packet")
- }
-
- // [:3] length
- switch packet[4] {
- case protoDist:
- return l.ReadDist(packet[5:])
- default:
- // unknown proto
- return nil, nil, fmt.Errorf("unknown/unsupported proto")
- }
-
-}
-
-func (l *Link) ReadDist(packet []byte) (etf.Term, etf.Term, error) {
- switch packet[0] {
- case protoDistCompressed:
- // do we need it?
- // zip.NewReader(...)
- // ...unzipping to the new buffer b (lib.TakeBuffer)
- // just in case: if b[0] == protoDistCompressed return error
- // otherwise it will cause recursive call and im not sure if its ok
- // return l.ReadDist(b)
-
- case protoDistMessage:
- var control, message etf.Term
- var cache []etf.Atom
- var err error
-
- cache, packet, err = l.decodeDistHeaderAtomCache(packet[1:])
-
- if err != nil {
- return nil, nil, err
- }
-
- decodeOptions := etf.DecodeOptions{
- FlagV4NC: l.peer.flags.isSet(V4_NC),
- FlagBigCreation: l.peer.flags.isSet(BIG_CREATION),
- }
-
- control, packet, err = etf.Decode(packet, cache, decodeOptions)
- if err != nil {
- return nil, nil, err
- }
-
- if len(packet) == 0 {
- return control, nil, nil
- }
-
- message, packet, err = etf.Decode(packet, cache, decodeOptions)
- if err != nil {
- return nil, nil, err
- }
-
- if len(packet) != 0 {
- return nil, nil, fmt.Errorf("packet has extra %d byte(s)", len(packet))
- }
-
- return control, message, nil
-
- case protoDistFragment1, protoDistFragmentN:
- first := packet[0] == protoDistFragment1
- if len(packet) < 18 {
- return nil, nil, fmt.Errorf("malformed fragment")
- }
-
- // We should decode first fragment in order to process Atom Cache Header
- // to get rid the case when we get the first fragment of the packet
- // and the next packet is not the part of the fragmented packet, but with
- // the ids were encoded in the first fragment
- if first {
- l.decodeDistHeaderAtomCache(packet[1:])
- }
-
- if assembled, err := l.decodeFragment(packet[1:], first); assembled != nil {
- if err != nil {
- return nil, nil, err
- }
- defer lib.ReleaseBuffer(assembled)
- return l.ReadDist(assembled.B)
- } else {
- if err != nil {
- return nil, nil, err
- }
- }
-
- return nil, nil, nil
- }
-
- return nil, nil, fmt.Errorf("unknown packet type %d", packet[0])
-}
-
-func (l *Link) decodeFragment(packet []byte, first bool) (*lib.Buffer, error) {
- l.fragmentsMutex.Lock()
- defer l.fragmentsMutex.Unlock()
-
- if l.fragments == nil {
- l.fragments = make(map[uint64]*fragmentedPacket)
- }
-
- sequenceID := binary.BigEndian.Uint64(packet)
- fragmentID := binary.BigEndian.Uint64(packet[8:])
- if fragmentID == 0 {
- return nil, fmt.Errorf("fragmentID can't be 0")
- }
-
- fragmented, ok := l.fragments[sequenceID]
- if !ok {
- fragmented = &fragmentedPacket{
- buffer: lib.TakeBuffer(),
- disordered: lib.TakeBuffer(),
- disorderedSlices: make(map[uint64][]byte),
- lastUpdate: time.Now(),
- }
- fragmented.buffer.AppendByte(protoDistMessage)
- l.fragments[sequenceID] = fragmented
- }
-
- // until we get the first item everything will be treated as disordered
- if first {
- fragmented.fragmentID = fragmentID + 1
- }
-
- if fragmented.fragmentID-fragmentID != 1 {
- // got the next fragment. disordered
- slice := fragmented.disordered.Extend(len(packet) - 16)
- copy(slice, packet[16:])
- fragmented.disorderedSlices[fragmentID] = slice
- } else {
- // order is correct. just append
- fragmented.buffer.Append(packet[16:])
- fragmented.fragmentID = fragmentID
- }
-
- // check whether we have disordered slices and try
- // to append them if it does fit
- if fragmented.fragmentID > 0 && len(fragmented.disorderedSlices) > 0 {
- for i := fragmented.fragmentID - 1; i > 0; i-- {
- if slice, ok := fragmented.disorderedSlices[i]; ok {
- fragmented.buffer.Append(slice)
- delete(fragmented.disorderedSlices, i)
- fragmented.fragmentID = i
- continue
- }
- break
- }
- }
-
- fragmented.lastUpdate = time.Now()
-
- if fragmented.fragmentID == 1 && len(fragmented.disorderedSlices) == 0 {
- // it was the last fragment
- delete(l.fragments, sequenceID)
- lib.ReleaseBuffer(fragmented.disordered)
- return fragmented.buffer, nil
- }
-
- if l.checkCleanPending {
- return nil, nil
- }
-
- if l.checkCleanTimer != nil {
- l.checkCleanTimer.Reset(l.checkCleanTimeout)
- return nil, nil
- }
-
- l.checkCleanTimer = time.AfterFunc(l.checkCleanTimeout, func() {
- l.fragmentsMutex.Lock()
- defer l.fragmentsMutex.Unlock()
-
- if l.checkCleanTimeout == 0 {
- l.checkCleanTimeout = defaultCleanTimeout
- }
- if l.checkCleanDeadline == 0 {
- l.checkCleanDeadline = defaultCleanDeadline
- }
-
- valid := time.Now().Add(-l.checkCleanDeadline)
- for sequenceID, fragmented := range l.fragments {
- if fragmented.lastUpdate.Before(valid) {
- // dropping due to excided deadline
- delete(l.fragments, sequenceID)
- }
- }
- if len(l.fragments) == 0 {
- l.checkCleanPending = false
- return
- }
-
- l.checkCleanPending = true
- l.checkCleanTimer.Reset(l.checkCleanTimeout)
- })
-
- return nil, nil
-}
-
-func (l *Link) decodeDistHeaderAtomCache(packet []byte) ([]etf.Atom, []byte, error) {
- // all the details are here https://erlang.org/doc/apps/erts/erl_ext_dist.html#normal-distribution-header
-
- // number of atom references are present in package
- references := int(packet[0])
- if references == 0 {
- return nil, packet[1:], nil
- }
-
- cache := make([]etf.Atom, references)
- flagsLen := references/2 + 1
- if len(packet) < 1+flagsLen {
- // malformed
- return nil, nil, ErrMalformed
- }
- flags := packet[1 : flagsLen+1]
-
- // The least significant bit in a half byte is flag LongAtoms.
- // If it is set, 2 bytes are used for atom lengths instead of 1 byte
- // in the distribution header.
- headerAtomLength := 1 // if 'LongAtom' is not set
-
- // extract this bit. just increase headereAtomLength if this flag is set
- lastByte := flags[len(flags)-1]
- shift := uint((references & 0x01) * 4)
- headerAtomLength += int((lastByte >> shift) & 0x01)
-
- // 1 (number of references) + references/2+1 (length of flags)
- packet = packet[1+flagsLen:]
-
- for i := 0; i < references; i++ {
- if len(packet) < 1+headerAtomLength {
- // malformed
- return nil, nil, ErrMalformed
- }
- shift = uint((i & 0x01) * 4)
- flag := (flags[i/2] >> shift) & 0x0F
- isNewReference := flag&0x08 == 0x08
- idxReference := uint16(flag & 0x07)
- idxInternal := uint16(packet[0])
- idx := (idxReference << 8) | idxInternal
-
- if isNewReference {
- atomLen := uint16(packet[1])
- if headerAtomLength == 2 {
- atomLen = binary.BigEndian.Uint16(packet[1:3])
- }
- // extract atom
- packet = packet[1+headerAtomLength:]
- if len(packet) < int(atomLen) {
- // malformed
- return nil, nil, ErrMalformed
- }
- atom := etf.Atom(packet[:atomLen])
- // store in temporary cache for decoding
- cache[i] = atom
-
- // store in link' cache
- l.cacheInMutex.Lock()
- l.cacheIn[idx] = &atom
- l.cacheInMutex.Unlock()
- packet = packet[atomLen:]
- continue
- }
-
- l.cacheInMutex.Lock()
- c := l.cacheIn[idx]
- l.cacheInMutex.Unlock()
- if c == nil {
- return cache, packet, ErrMissingInCache
- }
- cache[i] = *c
- packet = packet[1:]
- }
-
- return cache, packet, nil
-}
-
-func (l *Link) SetAtomCache(cache *etf.AtomCache) {
- l.cacheOut = cache
-}
-
-func (l *Link) encodeDistHeaderAtomCache(b *lib.Buffer,
- writerAtomCache map[etf.Atom]etf.CacheItem,
- encodingAtomCache *etf.ListAtomCache) {
-
- n := encodingAtomCache.Len()
- if n == 0 {
- b.AppendByte(0)
- return
- }
-
- b.AppendByte(byte(n)) // write NumberOfAtomCache
-
- lenFlags := n/2 + 1
- b.Extend(lenFlags)
-
- flags := b.B[1 : lenFlags+1]
- flags[lenFlags-1] = 0 // clear last byte to make sure we have valid LongAtom flag
-
- for i := 0; i < len(encodingAtomCache.L); i++ {
- shift := uint((i & 0x01) * 4)
- idxReference := byte(encodingAtomCache.L[i].ID >> 8) // SegmentIndex
- idxInternal := byte(encodingAtomCache.L[i].ID & 255) // InternalSegmentIndex
-
- cachedItem := writerAtomCache[encodingAtomCache.L[i].Name]
- if !cachedItem.Encoded {
- idxReference |= 8 // set NewCacheEntryFlag
- }
-
- // we have to clear before reuse
- if shift == 0 {
- flags[i/2] = 0
- }
- flags[i/2] |= idxReference << shift
-
- if cachedItem.Encoded {
- b.AppendByte(idxInternal)
- continue
- }
-
- if encodingAtomCache.HasLongAtom {
- // 1 (InternalSegmentIndex) + 2 (length) + name
- allocLen := 1 + 2 + len(encodingAtomCache.L[i].Name)
- buf := b.Extend(allocLen)
- buf[0] = idxInternal
- binary.BigEndian.PutUint16(buf[1:3], uint16(len(encodingAtomCache.L[i].Name)))
- copy(buf[3:], encodingAtomCache.L[i].Name)
- } else {
-
- // 1 (InternalSegmentIndex) + 1 (length) + name
- allocLen := 1 + 1 + len(encodingAtomCache.L[i].Name)
- buf := b.Extend(allocLen)
- buf[0] = idxInternal
- buf[1] = byte(len(encodingAtomCache.L[i].Name))
- copy(buf[2:], encodingAtomCache.L[i].Name)
- }
-
- cachedItem.Encoded = true
- writerAtomCache[encodingAtomCache.L[i].Name] = cachedItem
- }
-
- if encodingAtomCache.HasLongAtom {
- shift := uint((n & 0x01) * 4)
- flags[lenFlags-1] |= 1 << shift // set LongAtom = 1
- }
-}
-
-func (l *Link) Writer(send <-chan []etf.Term, fragmentationUnit int) {
- var terms []etf.Term
-
- var encodingAtomCache *etf.ListAtomCache
- var writerAtomCache map[etf.Atom]etf.CacheItem
- var linkAtomCache *etf.AtomCache
- var lastCacheID int16 = -1
-
- var lenControl, lenMessage, lenAtomCache, lenPacket, startDataPosition int
- var atomCacheBuffer, packetBuffer *lib.Buffer
- var err error
-
- cacheEnabled := l.peer.flags.isSet(DIST_HDR_ATOM_CACHE) && l.cacheOut != nil
- fragmentationEnabled := l.peer.flags.isSet(FRAGMENTS) && fragmentationUnit > 0
-
- // Header atom cache is encoded right after the control/message encoding process
- // but should be stored as a first item in the packet.
- // Thats why we do reserve some space for it in order to get rid
- // of reallocation packetBuffer data
- reserveHeaderAtomCache := 8192
-
- if cacheEnabled {
- encodingAtomCache = etf.TakeListAtomCache()
- defer etf.ReleaseListAtomCache(encodingAtomCache)
- writerAtomCache = make(map[etf.Atom]etf.CacheItem)
- linkAtomCache = l.cacheOut
- }
-
- encodeOptions := etf.EncodeOptions{
- LinkAtomCache: linkAtomCache,
- WriterAtomCache: writerAtomCache,
- EncodingAtomCache: encodingAtomCache,
- FlagBigCreation: l.peer.flags.isSet(BIG_CREATION),
- FlagV4NC: l.peer.flags.isSet(V4_NC),
- }
-
- for {
- terms = nil
- terms = <-send
-
- if terms == nil {
- // channel was closed
- return
- }
-
- packetBuffer = lib.TakeBuffer()
- lenControl, lenMessage, lenAtomCache, lenPacket, startDataPosition = 0, 0, 0, 0, reserveHeaderAtomCache
-
- // do reserve for the header 8K, should be enough
- packetBuffer.Allocate(reserveHeaderAtomCache)
-
- // clear encoding cache
- if cacheEnabled {
- encodingAtomCache.Reset()
- }
-
- // encode Control
- err = etf.Encode(terms[0], packetBuffer, encodeOptions)
- if err != nil {
- fmt.Println(err)
- lib.ReleaseBuffer(packetBuffer)
- continue
- }
- lenControl = packetBuffer.Len() - reserveHeaderAtomCache
-
- // encode Message if present
- if len(terms) == 2 {
- err = etf.Encode(terms[1], packetBuffer, encodeOptions)
- if err != nil {
- fmt.Println(err)
- lib.ReleaseBuffer(packetBuffer)
- continue
- }
-
- }
- lenMessage = packetBuffer.Len() - reserveHeaderAtomCache - lenControl
-
- // encode Header Atom Cache if its enabled
- if cacheEnabled && encodingAtomCache.Len() > 0 {
- atomCacheBuffer = lib.TakeBuffer()
- l.encodeDistHeaderAtomCache(atomCacheBuffer, writerAtomCache, encodingAtomCache)
- lenAtomCache = atomCacheBuffer.Len()
-
- if lenAtomCache > reserveHeaderAtomCache-22 {
- // are you serious? ))) what da hell you just sent?
- // FIXME i'm gonna fix it if someone report about this issue :)
- panic("exceed atom header cache size limit. please report about this issue")
- }
-
- startDataPosition -= lenAtomCache
- copy(packetBuffer.B[startDataPosition:], atomCacheBuffer.B)
- lib.ReleaseBuffer(atomCacheBuffer)
-
- } else {
- lenAtomCache = 1
- startDataPosition -= lenAtomCache
- packetBuffer.B[startDataPosition] = byte(0)
- }
-
- for {
-
- // 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistMessage) + lenAtomCache
- lenPacket = 1 + 1 + lenAtomCache + lenControl + lenMessage
-
- if !fragmentationEnabled || lenPacket < fragmentationUnit {
- // send as a single packet
- startDataPosition -= 6
-
- binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
- packetBuffer.B[startDataPosition+4] = protoDist // 131
- packetBuffer.B[startDataPosition+5] = protoDistMessage // 68
- if _, err := l.flusher.Write(packetBuffer.B[startDataPosition:]); err != nil {
- return
- }
- break
- }
-
- // Message should be fragmented
-
- // https://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution-header-for-fragmented-messages
- // "The entire atom cache and control message has to be part of the starting fragment"
-
- sequenceID := uint64(atomic.AddInt64(&l.sequenceID, 1))
- numFragments := lenMessage/fragmentationUnit + 1
-
- // 1 (dist header: 131) + 1 (dist header: protoDistFragment) + 8 (sequenceID) + 8 (fragmentID) + ...
- lenPacket = 1 + 1 + 8 + 8 + lenAtomCache + lenControl + fragmentationUnit
-
- // 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistFragment) + 8 (sequenceID) + 8 (fragmentID)
- startDataPosition -= 22
-
- binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
- packetBuffer.B[startDataPosition+4] = protoDist // 131
- packetBuffer.B[startDataPosition+5] = protoDistFragment1 // 69
-
- binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID))
- binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments))
- if _, err := l.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket]); err != nil {
- return
- }
-
- startDataPosition += 4 + lenPacket
- numFragments--
-
- nextFragment:
-
- if len(packetBuffer.B[startDataPosition:]) > fragmentationUnit {
- lenPacket = 1 + 1 + 8 + 8 + fragmentationUnit
- // reuse the previous 22 bytes for the next frame header
- startDataPosition -= 22
-
- } else {
- // the last one
- lenPacket = 1 + 1 + 8 + 8 + len(packetBuffer.B[startDataPosition:])
- startDataPosition -= 22
- }
-
- binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
- packetBuffer.B[startDataPosition+4] = protoDist // 131
- packetBuffer.B[startDataPosition+5] = protoDistFragmentN // 70
-
- binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID))
- binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments))
-
- if _, err := l.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket]); err != nil {
- return
- }
-
- startDataPosition += 4 + lenPacket
- numFragments--
- if numFragments > 0 {
- goto nextFragment
- }
-
- // done
- break
- }
-
- lib.ReleaseBuffer(packetBuffer)
-
- if !cacheEnabled {
- continue
- }
-
- // get updates from link AtomCache and update the local one (map writerAtomCache)
- id := linkAtomCache.GetLastID()
- if lastCacheID < id {
- linkAtomCache.Lock()
- for _, a := range linkAtomCache.ListSince(lastCacheID + 1) {
- writerAtomCache[a] = etf.CacheItem{ID: lastCacheID + 1, Name: a, Encoded: false}
- lastCacheID++
- }
- linkAtomCache.Unlock()
- }
-
- }
-
-}
-
-func (l *Link) GetRemoteName() string {
- return l.peer.Name
-}
-
-func (l *Link) composeName(b *lib.Buffer, tls bool) {
- if tls {
- b.Allocate(11)
- dataLength := 7 + len(l.Name) // byte + uint16 + uint32 + len(l.Name)
- binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
- b.B[4] = 'n'
- binary.BigEndian.PutUint16(b.B[5:7], l.version) // uint16
- binary.BigEndian.PutUint32(b.B[7:11], l.flags.toUint32()) // uint32
- b.Append([]byte(l.Name))
- return
- }
-
- b.Allocate(9)
- dataLength := 7 + len(l.Name) // byte + uint16 + uint32 + len(l.Name)
- binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
- b.B[2] = 'n'
- binary.BigEndian.PutUint16(b.B[3:5], l.version) // uint16
- binary.BigEndian.PutUint32(b.B[5:9], l.flags.toUint32()) // uint32
- b.Append([]byte(l.Name))
-}
-
-func (l *Link) composeNameVersion6(b *lib.Buffer, tls bool) {
- if tls {
- b.Allocate(19)
- dataLength := 15 + len(l.Name) // 1 + 8 (flags) + 4 (creation) + 2 (len l.Name)
- binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
- b.B[4] = 'N'
- binary.BigEndian.PutUint64(b.B[5:13], l.flags.toUint64()) // uint64
- binary.BigEndian.PutUint32(b.B[13:17], l.creation) //uint32
- binary.BigEndian.PutUint16(b.B[17:19], uint16(len(l.Name))) // uint16
- b.Append([]byte(l.Name))
- return
- }
-
- b.Allocate(17)
- dataLength := 15 + len(l.Name) // 1 + 8 (flags) + 4 (creation) + 2 (len l.Name)
- binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
- b.B[2] = 'N'
- binary.BigEndian.PutUint64(b.B[3:11], l.flags.toUint64()) // uint64
- binary.BigEndian.PutUint32(b.B[11:15], l.creation) // uint32
- binary.BigEndian.PutUint16(b.B[15:17], uint16(len(l.Name))) // uint16
- b.Append([]byte(l.Name))
-}
-
-func (l *Link) readName(b []byte) *Link {
- peer := &Link{
- Name: string(b[6:]),
- version: binary.BigEndian.Uint16(b[0:2]),
- flags: nodeFlag(binary.BigEndian.Uint32(b[2:6])),
- }
- return peer
-}
-
-func (l *Link) readNameVersion6(b []byte) *Link {
- nameLen := int(binary.BigEndian.Uint16(b[12:14]))
- peer := &Link{
- flags: nodeFlag(binary.BigEndian.Uint64(b[0:8])),
- creation: binary.BigEndian.Uint32(b[8:12]),
- Name: string(b[14 : 14+nameLen]),
- version: ProtoHandshake6,
- }
- return peer
-}
-
-func (l *Link) composeStatus(b *lib.Buffer, tls bool) {
- //FIXME: there are few options for the status:
- // ok, ok_simultaneous, nok, not_allowed, alive
- // More details here: https://erlang.org/doc/apps/erts/erl_dist_protocol.html#the-handshake-in-detail
- if tls {
- b.Allocate(4)
- dataLength := 3 // 's' + "ok"
- binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
- b.Append([]byte("sok"))
- return
- }
-
- b.Allocate(2)
- dataLength := 3 // 's' + "ok"
- binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
- b.Append([]byte("sok"))
-
-}
-
-func (l *Link) readStatus(msg []byte) bool {
- if string(msg[:2]) == "ok" {
- return true
- }
-
- return false
-}
-
-func (l *Link) composeChallenge(b *lib.Buffer, tls bool) {
- if tls {
- b.Allocate(15)
- dataLength := uint32(11 + len(l.Name))
- binary.BigEndian.PutUint32(b.B[0:4], dataLength)
- b.B[4] = 'n'
- binary.BigEndian.PutUint16(b.B[5:7], l.version) // uint16
- binary.BigEndian.PutUint32(b.B[7:11], l.flags.toUint32()) // uint32
- binary.BigEndian.PutUint32(b.B[11:15], l.challenge) // uint32
- b.Append([]byte(l.Name))
- return
- }
-
- b.Allocate(13)
- dataLength := 11 + len(l.Name)
- binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
- b.B[2] = 'n'
- binary.BigEndian.PutUint16(b.B[3:5], l.version) // uint16
- binary.BigEndian.PutUint32(b.B[5:9], l.flags.toUint32()) // uint32
- binary.BigEndian.PutUint32(b.B[9:13], l.challenge) // uint32
- b.Append([]byte(l.Name))
-}
-
-func (l *Link) composeChallengeVersion6(b *lib.Buffer, tls bool) {
- if tls {
- // 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(l.Name))
- b.Allocate(23)
- dataLength := 19 + len(l.Name)
- binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
- b.B[4] = 'N'
- binary.BigEndian.PutUint64(b.B[5:13], uint64(l.flags)) // uint64
- binary.BigEndian.PutUint32(b.B[13:17], l.challenge) // uint32
- binary.BigEndian.PutUint32(b.B[17:21], l.creation) // uint32
- binary.BigEndian.PutUint16(b.B[21:23], uint16(len(l.Name))) // uint16
- b.Append([]byte(l.Name))
- return
- }
-
- // 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(l.Name))
- b.Allocate(21)
- dataLength := 19 + len(l.Name)
- binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
- b.B[2] = 'N'
- binary.BigEndian.PutUint64(b.B[3:11], uint64(l.flags)) // uint64
- binary.BigEndian.PutUint32(b.B[11:15], l.challenge) // uint32
- binary.BigEndian.PutUint32(b.B[15:19], l.creation) // uint32
- binary.BigEndian.PutUint16(b.B[19:21], uint16(len(l.Name))) // uint16
- b.Append([]byte(l.Name))
-}
-
-func (l *Link) readChallenge(msg []byte) (challenge uint32) {
- version := binary.BigEndian.Uint16(msg[0:2])
- if version != ProtoHandshake5 {
- return 0
- }
-
- link := &Link{
- Name: string(msg[10:]),
- version: version,
- flags: nodeFlag(binary.BigEndian.Uint32(msg[2:6])),
- }
- l.peer = link
- return binary.BigEndian.Uint32(msg[6:10])
-}
-
-func (l *Link) readChallengeVersion6(msg []byte) (challenge uint32) {
- lenName := int(binary.BigEndian.Uint16(msg[16:18]))
- link := &Link{
- Name: string(msg[18 : 18+lenName]),
- version: ProtoHandshake6,
- flags: nodeFlag(binary.BigEndian.Uint64(msg[0:8])),
- creation: binary.BigEndian.Uint32(msg[12:16]),
- }
- l.peer = link
- return binary.BigEndian.Uint32(msg[8:12])
-}
-
-func (l *Link) readComplement(msg []byte) {
- flags := uint64(binary.BigEndian.Uint32(msg[0:4])) << 32
- l.peer.flags = nodeFlag(l.peer.flags.toUint64() | flags)
- l.peer.creation = binary.BigEndian.Uint32(msg[4:8])
- return
-}
-
-func (l *Link) validateChallengeReply(b []byte) bool {
- l.peer.challenge = binary.BigEndian.Uint32(b[:4])
- digestB := b[4:]
-
- digestA := genDigest(l.challenge, l.Cookie)
- return bytes.Equal(digestA[:], digestB)
-}
-
-func (l *Link) composeChallengeAck(b *lib.Buffer, tls bool) {
- if tls {
- b.Allocate(5)
- dataLength := uint32(17) // 'a' + 16 (digest)
- binary.BigEndian.PutUint32(b.B[0:4], dataLength)
- b.B[4] = 'a'
- digest := genDigest(l.peer.challenge, l.Cookie)
- b.Append(digest)
- return
- }
-
- b.Allocate(3)
- dataLength := uint16(17) // 'a' + 16 (digest)
- binary.BigEndian.PutUint16(b.B[0:2], dataLength)
- b.B[2] = 'a'
- digest := genDigest(l.peer.challenge, l.Cookie)
- b.Append(digest)
-}
-
-func (l *Link) composeChallengeReply(b *lib.Buffer, challenge uint32, tls bool) {
- if tls {
- l.digest = genDigest(challenge, l.Cookie)
- b.Allocate(9)
- dataLength := 5 + len(l.digest) // 1 (byte) + 4 (challenge) + 16 (digest)
- binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
- b.B[4] = 'r'
- binary.BigEndian.PutUint32(b.B[5:9], l.challenge) // uint32
- b.Append(l.digest[:])
- return
- }
-
- b.Allocate(7)
- l.digest = genDigest(challenge, l.Cookie)
- dataLength := 5 + len(l.digest) // 1 (byte) + 4 (challenge) + 16 (digest)
- binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
- b.B[2] = 'r'
- binary.BigEndian.PutUint32(b.B[3:7], l.challenge) // uint32
- b.Append(l.digest)
-}
-
-func (l *Link) composeComplement(b *lib.Buffer, tls bool) {
- flags := uint32(l.flags.toUint64() >> 32)
- if tls {
- b.Allocate(13)
- dataLength := 9 // 1 + 4 (flag high) + 4 (creation)
- binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
- b.B[4] = 'c'
- binary.BigEndian.PutUint32(b.B[5:9], flags)
- binary.BigEndian.PutUint32(b.B[9:13], l.creation)
- return
- }
-
- dataLength := 9 // 1 + 4 (flag high) + 4 (creation)
- b.Allocate(11)
- binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
- b.B[2] = 'c'
- binary.BigEndian.PutUint32(b.B[3:7], flags)
- binary.BigEndian.PutUint32(b.B[7:11], l.creation)
- return
-}
-
-func genDigest(challenge uint32, cookie string) []byte {
- s := fmt.Sprintf("%s%d", cookie, challenge)
- digest := md5.Sum([]byte(s))
- return digest[:]
-}
diff --git a/node/epmd.go b/node/epmd.go
deleted file mode 100644
index 65db6848..00000000
--- a/node/epmd.go
+++ /dev/null
@@ -1,473 +0,0 @@
-package node
-
-import (
- "context"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/ergo-services/ergo/lib"
-)
-
-const (
- EPMD_ALIVE2_REQ = 120
- EPMD_ALIVE2_RESP = 121
-
- EPMD_PORT_PLEASE2_REQ = 122
- EPMD_PORT2_RESP = 119
-
- EPMD_NAMES_REQ = 110 // $n
-
- EPMD_DUMP_REQ = 100 // $d
- EPMD_KILL_REQ = 107 // $k
- EPMD_STOP_REQ = 115 // $s
-)
-
-type epmd struct {
- Name string
- Domain string
-
- // Listening port for incoming connections
- NodePort uint16
-
- // EPMD port for the cluster
- Port uint16
- Type uint8
-
- Protocol uint8
- HighVsn uint16
- LowVsn uint16
- Extra []byte
- Creation uint16
-
- staticOnly bool
- staticRoutes map[string]NetworkRoute
- mtx sync.RWMutex
-
- response chan interface{}
-}
-
-func (e *epmd) Init(ctx context.Context, name string, port uint16, opts Options) error {
- ns := strings.Split(name, "@")
- if len(ns) != 2 {
- return fmt.Errorf("(EMPD) FQDN for node name is required (example: node@hostname)")
- }
-
- e.Name = ns[0]
- e.Domain = ns[1]
- e.NodePort = port
- e.Port = opts.EPMDPort
-
- // http://erlang.org/doc/reference_manual/distributed.html (section 13.5)
- // // 77 — regular public node, 72 — hidden
- if opts.Hidden {
- e.Type = 72
- } else {
- e.Type = 77
- }
-
- e.Protocol = 0
- e.HighVsn = uint16(opts.HandshakeVersion)
- e.LowVsn = 5
- // FIXME overflows value opts.creation is uint32
- e.Creation = uint16(opts.creation)
-
- e.staticOnly = opts.DisableEPMD
- e.staticRoutes = make(map[string]NetworkRoute)
-
- ready := make(chan error)
-
- go func(e *epmd) {
- defer close(ready)
- for {
- if !opts.DisableEPMDServer {
- // trying to start embedded EPMD before we go further
- Server(ctx, e.Port)
- }
- dsn := net.JoinHostPort("", strconv.Itoa(int(e.Port)))
- conn, err := net.Dial("tcp", dsn)
- if err != nil {
- ready <- err
- return
- }
-
- conn.Write(compose_ALIVE2_REQ(e))
-
- for {
- buf := make([]byte, 1024)
- _, err := conn.Read(buf)
- if err != nil {
- lib.Log("EPMD: closing connection")
- conn.Close()
- break
- }
-
- if buf[0] == EPMD_ALIVE2_RESP {
- creation := read_ALIVE2_RESP(buf)
- switch creation {
- case false:
- ready <- fmt.Errorf("Duplicate name '%s'", e.Name)
- return
- default:
- e.Creation = creation.(uint16)
- }
- ready <- nil
- } else {
- lib.Log("Malformed EPMD reply")
- conn.Close()
- break
- }
- }
-
- }
- }(e)
-
- return <-ready
-}
-
-func (e *epmd) AddStaticRoute(name string, port uint16, cookie string, tls bool) error {
- ns := strings.Split(name, "@")
- if len(ns) == 1 {
- ns = append(ns, "localhost")
- }
- if len(ns) != 2 {
- return fmt.Errorf("wrong FQDN")
- }
- if _, err := net.LookupHost(ns[1]); err != nil {
- return err
- }
-
- if e.staticOnly && port == 0 {
- return fmt.Errorf("EMPD is disabled. Port must be > 0")
- }
-
- e.mtx.Lock()
- defer e.mtx.Unlock()
- if _, ok := e.staticRoutes[name]; ok {
- // already exist
- return fmt.Errorf("already exist")
- }
- e.staticRoutes[name] = NetworkRoute{int(port), cookie, tls}
-
- return nil
-}
-
-func (e *epmd) RemoveStaticRoute(name string) {
- e.mtx.Lock()
- defer e.mtx.Unlock()
- delete(e.staticRoutes, name)
- return
-}
-
-func (e *epmd) resolve(name string) (NetworkRoute, error) {
- // chech static routes first
- e.mtx.RLock()
- defer e.mtx.RUnlock()
- nr, ok := e.staticRoutes[name]
- if ok && nr.Port > 0 {
- return nr, nil
- }
-
- if e.staticOnly {
- return nr, fmt.Errorf("Can't resolve %s", name)
- }
-
- // no static route for the given name. go the regular way
- port, err := e.resolvePort(name)
- if err != nil {
- return nr, err
- }
- return NetworkRoute{port, nr.Cookie, nr.TLS}, nil
-}
-
-func (e *epmd) resolvePort(name string) (int, error) {
- ns := strings.Split(name, "@")
- if len(ns) != 2 {
- return 0, fmt.Errorf("incorrect FQDN node name (example: node@localhost)")
- }
- conn, err := net.Dial("tcp", net.JoinHostPort(ns[1], fmt.Sprintf("%d", e.Port)))
- if err != nil {
- return 0, err
- }
-
- defer conn.Close()
-
- buf := compose_PORT_PLEASE2_REQ(ns[0])
- _, err = conn.Write(buf)
- if err != nil {
- return -1, fmt.Errorf("initiate connection - %s", err)
- }
-
- buf = make([]byte, 1024)
- _, err = conn.Read(buf)
- if err != nil && err != io.EOF {
- return -1, fmt.Errorf("reading from link - %s", err)
- }
-
- if buf[0] == EPMD_PORT2_RESP && buf[1] == 0 {
- p := binary.BigEndian.Uint16(buf[2:4])
- // we don't use all the extra info for a while. FIXME (do we need it?)
- return int(p), nil
- } else if buf[1] > 0 {
- return -1, fmt.Errorf("desired node not found")
- } else {
- return -1, fmt.Errorf("malformed reply - %#v", buf)
- }
-}
-
-func compose_ALIVE2_REQ(e *epmd) (reply []byte) {
- reply = make([]byte, 2+14+len(e.Name)+len(e.Extra))
- binary.BigEndian.PutUint16(reply[0:2], uint16(len(reply)-2))
- reply[2] = byte(EPMD_ALIVE2_REQ)
- binary.BigEndian.PutUint16(reply[3:5], e.NodePort)
- reply[5] = e.Type
- reply[6] = e.Protocol
- binary.BigEndian.PutUint16(reply[7:9], e.HighVsn)
- binary.BigEndian.PutUint16(reply[9:11], e.LowVsn)
- nLen := len(e.Name)
- binary.BigEndian.PutUint16(reply[11:13], uint16(nLen))
- offset := (13 + nLen)
- copy(reply[13:offset], e.Name)
- nELen := len(e.Extra)
- binary.BigEndian.PutUint16(reply[offset:offset+2], uint16(nELen))
- copy(reply[offset+2:offset+2+nELen], e.Extra)
- return
-}
-
-func read_ALIVE2_RESP(reply []byte) interface{} {
- if reply[1] == 0 {
- return binary.BigEndian.Uint16(reply[2:4])
- }
- return false
-}
-
-func compose_PORT_PLEASE2_REQ(name string) (reply []byte) {
- replylen := uint16(2 + len(name) + 1)
- reply = make([]byte, replylen)
- binary.BigEndian.PutUint16(reply[0:2], uint16(len(reply)-2))
- reply[2] = byte(EPMD_PORT_PLEASE2_REQ)
- copy(reply[3:replylen], name)
- return
-}
-
-/// empd server implementation
-
-type nodeinfo struct {
- Port uint16
- Hidden bool
- HiVersion uint16
- LoVersion uint16
- Extra []byte
-}
-
-type embeddedEPMDserver struct {
- portmap map[string]*nodeinfo
- mtx sync.RWMutex
-}
-
-func (e *embeddedEPMDserver) Join(name string, info *nodeinfo) bool {
-
- e.mtx.Lock()
- defer e.mtx.Unlock()
- if _, ok := e.portmap[name]; ok {
- // already registered
- return false
- }
- lib.Log("EPMD registering node: '%s' port:%d hidden:%t", name, info.Port, info.Hidden)
- e.portmap[name] = info
-
- return true
-}
-
-func (e *embeddedEPMDserver) Get(name string) *nodeinfo {
- e.mtx.RLock()
- defer e.mtx.RUnlock()
- if info, ok := e.portmap[name]; ok {
- return info
- }
- return nil
-}
-
-func (e *embeddedEPMDserver) Leave(name string) {
- lib.Log("EPMD unregistering node: '%s'", name)
-
- e.mtx.Lock()
- delete(e.portmap, name)
- e.mtx.Unlock()
-}
-
-func (e *embeddedEPMDserver) ListAll() map[string]uint16 {
- e.mtx.Lock()
- lst := make(map[string]uint16)
- for k, v := range e.portmap {
- lst[k] = v.Port
- }
- e.mtx.Unlock()
- return lst
-}
-
-func Server(ctx context.Context, port uint16) error {
-
- lc := net.ListenConfig{}
- epmd, err := lc.Listen(ctx, "tcp", net.JoinHostPort("", strconv.Itoa(int(port))))
- if err != nil {
- lib.Log("Can't start embedded EPMD service: %s", err)
- return fmt.Errorf("Can't start embedded EPMD service: %s", err)
-
- }
-
- epmdServer := &embeddedEPMDserver{
- portmap: make(map[string]*nodeinfo),
- }
-
- lib.Log("Started embedded EMPD service and listen port: %d", port)
-
- go func() {
- for {
- c, err := epmd.Accept()
- if err != nil {
- lib.Log(err.Error())
- continue
- }
-
- lib.Log("EPMD accepted new connection from %s", c.RemoteAddr().String())
-
- //epmd connection handler loop
- go func(c net.Conn) {
- defer c.Close()
- buf := make([]byte, 1024)
- name := ""
- for {
- n, err := c.Read(buf)
- lib.Log("Request from EPMD client: %v", buf[:n])
- if err != nil {
- if name != "" {
- epmdServer.Leave(name)
- }
- return
- }
- // buf[0:1] - length
- if uint16(n-2) != binary.BigEndian.Uint16(buf[0:2]) {
- continue
- }
-
- switch buf[2] {
- case EPMD_ALIVE2_REQ:
- reply, registered := epmdServer.compose_ALIVE2_RESP(buf[3:n])
- c.Write(reply)
- if registered == "" {
- return
- }
- name = registered
- if tcp, ok := c.(*net.TCPConn); !ok {
- tcp.SetKeepAlive(true)
- tcp.SetKeepAlivePeriod(15 * time.Second)
- tcp.SetNoDelay(true)
- }
- continue
- case EPMD_PORT_PLEASE2_REQ:
- c.Write(epmdServer.compose_EPMD_PORT2_RESP(buf[3:n]))
- return
- case EPMD_NAMES_REQ:
- c.Write(epmdServer.compose_EPMD_NAMES_RESP(port, buf[3:n]))
- return
- default:
- lib.Log("unknown EPMD request")
- return
- }
-
- }
- }(c)
-
- }
- }()
-
- return nil
-}
-
-func (e *embeddedEPMDserver) compose_ALIVE2_RESP(req []byte) ([]byte, string) {
-
- hidden := false //
- if req[2] == 72 {
- hidden = true
- }
-
- namelen := binary.BigEndian.Uint16(req[8:10])
- name := string(req[10 : 10+namelen])
-
- info := nodeinfo{
- Port: binary.BigEndian.Uint16(req[0:2]),
- Hidden: hidden,
- HiVersion: binary.BigEndian.Uint16(req[4:6]),
- LoVersion: binary.BigEndian.Uint16(req[6:8]),
- }
-
- reply := make([]byte, 4)
- reply[0] = EPMD_ALIVE2_RESP
-
- registered := ""
- if e.Join(name, &info) {
- reply[1] = 0
- registered = name
- } else {
- reply[1] = 1
- }
-
- binary.BigEndian.PutUint16(reply[2:], uint16(1))
- lib.Log("Made reply for ALIVE2_REQ: (%s) %#v", name, reply)
- return reply, registered
-}
-
-func (e *embeddedEPMDserver) compose_EPMD_PORT2_RESP(req []byte) []byte {
- name := string(req)
- info := e.Get(name)
-
- if info == nil {
- // not found
- lib.Log("EPMD: looking for '%s'. Not found", name)
- return []byte{EPMD_PORT2_RESP, 1}
- }
-
- reply := make([]byte, 12+len(name)+2+len(info.Extra))
- reply[0] = EPMD_PORT2_RESP
- reply[1] = 0
- binary.BigEndian.PutUint16(reply[2:4], uint16(info.Port))
- if info.Hidden {
- reply[4] = 72
- } else {
- reply[4] = 77
- }
- reply[5] = 0 // protocol tcp
- binary.BigEndian.PutUint16(reply[6:8], uint16(info.HiVersion))
- binary.BigEndian.PutUint16(reply[8:10], uint16(info.LoVersion))
- binary.BigEndian.PutUint16(reply[10:12], uint16(len(name)))
- offset := 12 + len(name)
- copy(reply[12:offset], name)
- nELen := len(info.Extra)
- binary.BigEndian.PutUint16(reply[offset:offset+2], uint16(nELen))
- copy(reply[offset+2:offset+2+nELen], info.Extra)
-
- lib.Log("Made reply for EPMD_PORT_PLEASE2_REQ: %#v", reply)
-
- return reply
-}
-
-func (e *embeddedEPMDserver) compose_EPMD_NAMES_RESP(port uint16, req []byte) []byte {
- // io:format("name ~ts at port ~p~n", [NodeName, Port]).
- var str strings.Builder
- var s string
- var portbuf [4]byte
- binary.BigEndian.PutUint32(portbuf[0:4], uint32(port))
- str.WriteString(string(portbuf[0:]))
- for h, p := range e.ListAll() {
- s = fmt.Sprintf("name %s at port %d\n", h, p)
- str.WriteString(s)
- }
-
- return []byte(str.String())
-}
diff --git a/node/monitor.go b/node/monitor.go
index ba89c4c1..e779d3c7 100644
--- a/node/monitor.go
+++ b/node/monitor.go
@@ -3,8 +3,7 @@ package node
// http://erlang.org/doc/reference_manual/processes.html
import (
- "math"
- "strings"
+ "fmt"
"sync"
"github.com/ergo-services/ergo/etf"
@@ -17,22 +16,33 @@ type monitorItem struct {
ref etf.Ref
}
-type linkProcessRequest struct {
- pidA etf.Pid
- pidB etf.Pid
-}
-
type monitorInternal interface {
- monitorProcess(by etf.Pid, process interface{}, ref etf.Ref)
- demonitorProcess(ref etf.Ref) bool
- monitorNode(by etf.Pid, node string) etf.Ref
+ // RouteLink
+ RouteLink(pidA etf.Pid, pidB etf.Pid) error
+ // RouteUnlink
+ RouteUnlink(pidA etf.Pid, pidB etf.Pid) error
+ // RouteExit
+ RouteExit(to etf.Pid, terminated etf.Pid, reason string) error
+ // RouteMonitorReg
+ RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error
+ // RouteMonitor
+ RouteMonitor(by etf.Pid, process etf.Pid, ref etf.Ref) error
+ // RouteDemonitor
+ RouteDemonitor(by etf.Pid, ref etf.Ref) error
+ // RouteMonitorExitReg
+ RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error
+ // RouteMonitorExit
+ RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error
+ // RouteNodeDown
+ RouteNodeDown(name string)
+
+ // IsMonitor
+ IsMonitor(ref etf.Ref) bool
+
+ monitorNode(by etf.Pid, node string, ref etf.Ref)
demonitorNode(ref etf.Ref) bool
- nodeDown(name string)
- processTerminated(terminated etf.Pid, name, reason string)
-
- link(pidA, pidB etf.Pid)
- unlink(pidA, pidB etf.Pid)
+ handleTerminated(terminated etf.Pid, name, reason string)
processLinks(process etf.Pid) []etf.Pid
processMonitors(process etf.Pid) []etf.Pid
@@ -41,297 +51,48 @@ type monitorInternal interface {
}
type monitor struct {
+ // monitors by pid
processes map[etf.Pid][]monitorItem
ref2pid map[etf.Ref]etf.Pid
- mutexProcesses sync.Mutex
- links map[etf.Pid][]etf.Pid
- mutexLinks sync.Mutex
- nodes map[string][]monitorItem
- ref2node map[etf.Ref]string
- mutexNodes sync.Mutex
-
- registrar registrarInternal
+ mutexProcesses sync.RWMutex
+ // monitors by name
+ names map[gen.ProcessID][]monitorItem
+ ref2name map[etf.Ref]gen.ProcessID
+ mutexNames sync.RWMutex
+
+ // links
+ links map[etf.Pid][]etf.Pid
+ mutexLinks sync.Mutex
+
+ // monitors of nodes
+ nodes map[string][]monitorItem
+ ref2node map[etf.Ref]string
+ mutexNodes sync.Mutex
+
+ nodename string
+ router coreRouterInternal
}
-func newMonitor(registrar registrarInternal) monitor {
- return monitor{
+func newMonitor(nodename string, router coreRouterInternal) monitorInternal {
+ return &monitor{
processes: make(map[etf.Pid][]monitorItem),
+ names: make(map[gen.ProcessID][]monitorItem),
links: make(map[etf.Pid][]etf.Pid),
nodes: make(map[string][]monitorItem),
ref2pid: make(map[etf.Ref]etf.Pid),
+ ref2name: make(map[etf.Ref]gen.ProcessID),
ref2node: make(map[etf.Ref]string),
- registrar: registrar,
- }
-}
-
-func (m *monitor) monitorProcess(by etf.Pid, process interface{}, ref etf.Ref) {
- if by.Node != ref.Node {
- lib.Log("[%s] Incorrect monitor request by Pid = %v and Ref = %v", m.registrar.NodeName(), by, ref)
- return
- }
-
-next:
- switch t := process.(type) {
- case etf.Pid:
- lib.Log("[%s] MONITOR process: %s => %s", m.registrar.NodeName(), by, t)
-
- // If 'process' belongs to this node we should make sure if its alive.
- // http://erlang.org/doc/reference_manual/processes.html#monitors
- // If Pid does not exist a gen.MessageDown must be
- // send immediately with Reason set to noproc.
- if p := m.registrar.ProcessByPid(t); string(t.Node) == m.registrar.NodeName() && p == nil {
- m.notifyProcessTerminated(ref, by, t, "noproc")
- return
- }
-
- m.mutexProcesses.Lock()
- l := m.processes[t]
- item := monitorItem{
- pid: by,
- ref: ref,
- }
- m.processes[t] = append(l, item)
- m.ref2pid[ref] = t
- m.mutexProcesses.Unlock()
-
- if isVirtualPid(t) {
- // this Pid was created as a virtual. we use virtual pids for the
- // monitoring process by the registered name.
- return
- }
-
- if string(t.Node) == m.registrar.NodeName() {
- // this is the local process so we have nothing to do
- return
- }
-
- // request monitoring the remote process
- message := etf.Tuple{distProtoMONITOR, by, t, ref}
- if err := m.registrar.routeRaw(t.Node, message); err != nil {
- m.notifyProcessTerminated(ref, by, t, "noconnection")
- m.mutexProcesses.Lock()
- delete(m.ref2pid, ref)
- m.mutexProcesses.Unlock()
- }
-
- case string:
- // requesting monitor of local process
- vPid := virtualPid(gen.ProcessID{t, m.registrar.NodeName()})
- // If Pid does not exist a gen.MessageDown must be
- // send immediately with Reason set to noproc.
- if p := m.registrar.ProcessByName(t); p == nil {
- m.notifyProcessTerminated(ref, by, vPid, "noproc")
- return
- }
- process = vPid
- goto next
-
- case etf.Atom:
- // the same as 'string'
- vPid := virtualPid(gen.ProcessID{string(t), m.registrar.NodeName()})
- if p := m.registrar.ProcessByName(string(t)); p == nil {
- m.notifyProcessTerminated(ref, by, vPid, "noproc")
- return
- }
- process = vPid
- goto next
-
- case gen.ProcessID:
- // requesting monitor of remote process by the local one using registered process name
- vPid := virtualPid(t)
- process = vPid
-
- if t.Node == m.registrar.NodeName() {
- // If Pid does not exist a gen.MessageDown must be
- // send immediately with Reason set to noproc.
- if p := m.registrar.ProcessByName(t.Name); p == nil {
- m.notifyProcessTerminated(ref, by, vPid, "noproc")
- return
- }
- goto next
- }
-
- message := etf.Tuple{distProtoMONITOR, by, etf.Atom(t.Name), ref}
- if err := m.registrar.routeRaw(etf.Atom(t.Node), message); err != nil {
- m.notifyProcessTerminated(ref, by, vPid, "noconnection")
- return
- }
-
- // in order to handle 'nodedown' event we create a local monitor on a virtual pid
- goto next
- }
-}
-
-func (m *monitor) demonitorProcess(ref etf.Ref) bool {
- var process interface{}
- var node etf.Atom
-
- m.mutexProcesses.Lock()
- defer m.mutexProcesses.Unlock()
-
- pid, knownRef := m.ref2pid[ref]
- if !knownRef {
- // unknown monitor reference
- return false
- }
-
- // cheching for monitorItem list
- items := m.processes[pid]
-
- // remove PID from monitoring processes list
- for i := range items {
- if items[i].ref != ref {
- continue
- }
- process = pid
- node = pid.Node
- if isVirtualPid(pid) {
- processID := virtualPidToProcessID(pid)
- process = etf.Atom(processID.Name)
- node = etf.Atom(processID.Node)
- }
-
- if string(node) != m.registrar.NodeName() {
- message := etf.Tuple{distProtoDEMONITOR, items[i].pid, process, ref}
- m.registrar.routeRaw(node, message)
- }
-
- items[i] = items[0]
- items = items[1:]
- delete(m.ref2pid, ref)
- break
-
- }
-
- if len(items) == 0 {
- delete(m.processes, pid)
- } else {
- m.processes[pid] = items
+ nodename: nodename,
+ router: router,
}
-
- return true
}
-func (m *monitor) link(pidA, pidB etf.Pid) {
- lib.Log("[%s] LINK process: %v => %v", m.registrar.NodeName(), pidA, pidB)
+func (m *monitor) monitorNode(by etf.Pid, node string, ref etf.Ref) {
+ lib.Log("[%s] MONITOR NODE : %v => %s", m.nodename, by, node)
- // http://erlang.org/doc/reference_manual/processes.html#links
- // Links are bidirectional and there can only be one link between
- // two processes. Repeated calls to link(Pid) have no effect.
-
- // If the link already exists or a process attempts to create
- // a link to itself, nothing is done.
- if pidA == pidB {
- return
- }
-
- m.mutexLinks.Lock()
- defer m.mutexLinks.Unlock()
-
- linksA := m.links[pidA]
- if pidA.Node == etf.Atom(m.registrar.NodeName()) {
- // check if these processes are linked already (source)
- for i := range linksA {
- if linksA[i] == pidB {
- return
- }
- }
-
- m.links[pidA] = append(linksA, pidB)
- }
-
- // check if these processes are linked already (destination)
- linksB := m.links[pidB]
- for i := range linksB {
- if linksB[i] == pidA {
- return
- }
- }
-
- if pidB.Node == etf.Atom(m.registrar.NodeName()) {
- // for the local process we should make sure if its alive
- // otherwise send 'EXIT' message with 'noproc' as a reason
- if p := m.registrar.ProcessByPid(pidB); p == nil {
- m.notifyProcessExit(pidA, pidB, "noproc")
- if len(linksA) > 0 {
- m.links[pidA] = linksA
- } else {
- delete(m.links, pidA)
- }
- return
- }
- } else {
- // linking with remote process
- message := etf.Tuple{distProtoLINK, pidA, pidB}
- if err := m.registrar.routeRaw(pidB.Node, message); err != nil {
- // seems we have no connection with this node. notify the sender
- // with 'EXIT' message and 'noconnection' as a reason
- m.notifyProcessExit(pidA, pidB, "noconnection")
- if len(linksA) > 0 {
- m.links[pidA] = linksA
- } else {
- delete(m.links, pidA)
- }
- return
- }
- }
-
- m.links[pidB] = append(linksB, pidA)
-}
-
-func (m *monitor) unlink(pidA, pidB etf.Pid) {
- m.mutexLinks.Lock()
- defer m.mutexLinks.Unlock()
-
- if pidB.Node != etf.Atom(m.registrar.NodeName()) {
- message := etf.Tuple{distProtoUNLINK, pidA, pidB}
- m.registrar.routeRaw(pidB.Node, message)
- }
-
- if pidA.Node == etf.Atom(m.registrar.NodeName()) {
- linksA := m.links[pidA]
- for i := range linksA {
- if linksA[i] != pidB {
- continue
- }
-
- linksA[i] = linksA[0]
- linksA = linksA[1:]
- if len(linksA) > 0 {
- m.links[pidA] = linksA
- } else {
- delete(m.links, pidA)
- }
- break
-
- }
- }
-
- linksB := m.links[pidB]
- for i := range linksB {
- if linksB[i] != pidA {
- continue
- }
- linksB[i] = linksB[0]
- linksB = linksB[1:]
- if len(linksB) > 0 {
- m.links[pidB] = linksB
- } else {
- delete(m.links, pidB)
- }
- break
-
- }
-}
-
-func (m *monitor) monitorNode(by etf.Pid, node string) etf.Ref {
- lib.Log("[%s] MONITOR NODE : %v => %s", m.registrar.NodeName(), by, node)
-
- ref := m.registrar.MakeRef()
m.mutexNodes.Lock()
- defer m.mutexNodes.Unlock()
l := m.nodes[node]
item := monitorItem{
@@ -340,8 +101,12 @@ func (m *monitor) monitorNode(by etf.Pid, node string) etf.Ref {
}
m.nodes[node] = append(l, item)
m.ref2node[ref] = node
+ m.mutexNodes.Unlock()
- return ref
+ _, err := m.router.GetConnection(node)
+ if err != nil {
+ m.RouteNodeDown(node)
+ }
}
func (m *monitor) demonitorNode(ref etf.Ref) bool {
@@ -365,47 +130,64 @@ func (m *monitor) demonitorNode(ref etf.Ref) bool {
l[i] = l[0]
l = l[1:]
- m.mutexProcesses.Lock()
- delete(m.ref2pid, ref)
- m.mutexProcesses.Unlock()
break
-
}
- m.nodes[name] = l
delete(m.ref2node, ref)
+
+ if len(l) == 0 {
+ delete(m.nodes, name)
+ } else {
+ m.nodes[name] = l
+ }
+
return true
}
-func (m *monitor) nodeDown(name string) {
- lib.Log("[%s] MONITOR NODE down: %v", m.registrar.NodeName(), name)
+func (m *monitor) RouteNodeDown(name string) {
+ lib.Log("[%s] MONITOR NODE down: %v", m.nodename, name)
+ // notify node monitors
m.mutexNodes.Lock()
if pids, ok := m.nodes[name]; ok {
for i := range pids {
- lib.Log("[%s] MONITOR node down: %v. send notify to: %s", m.registrar.NodeName(), name, pids[i].pid)
- m.notifyNodeDown(pids[i].pid, name)
- delete(m.nodes, name)
+ lib.Log("[%s] MONITOR node down: %v. send notify to: %s", m.nodename, name, pids[i].pid)
+ message := gen.MessageNodeDown{Name: name}
+ m.router.RouteSend(etf.Pid{}, pids[i].pid, message)
}
+ delete(m.nodes, name)
}
m.mutexNodes.Unlock()
- // notify process monitors
+ // notify processes created monitors by pid
m.mutexProcesses.Lock()
for pid, ps := range m.processes {
- if isVirtualPid(pid) {
- processID := virtualPidToProcessID(pid)
- if processID.Node != name {
- continue
- }
+ if string(pid.Node) != name {
+ continue
}
for i := range ps {
- m.notifyProcessTerminated(ps[i].ref, ps[i].pid, pid, "noconnection")
+ // args: (to, terminated, reason, ref)
delete(m.ref2pid, ps[i].ref)
+ m.sendMonitorExit(ps[i].pid, pid, "noconnection", ps[i].ref)
}
delete(m.processes, pid)
}
m.mutexProcesses.Unlock()
+ // notify processes created monitors by name
+ m.mutexNames.Lock()
+ for processID, ps := range m.names {
+ if processID.Node != name {
+ continue
+ }
+ for i := range ps {
+ // args: (to, terminated, reason, ref)
+ delete(m.ref2name, ps[i].ref)
+ m.sendMonitorExitReg(ps[i].pid, processID, "noconnection", ps[i].ref)
+ }
+ delete(m.names, processID)
+ }
+ m.mutexNames.Unlock()
+
// notify linked processes
m.mutexLinks.Lock()
for link, pids := range m.links {
@@ -414,7 +196,7 @@ func (m *monitor) nodeDown(name string) {
}
for i := range pids {
- m.notifyProcessExit(pids[i], link, "noconnection")
+ m.sendExit(pids[i], link, "noconnection")
p, ok := m.links[pids[i]]
if !ok {
@@ -444,39 +226,41 @@ func (m *monitor) nodeDown(name string) {
m.mutexLinks.Unlock()
}
-func (m *monitor) processTerminated(terminated etf.Pid, name, reason string) {
- lib.Log("[%s] MONITOR process terminated: %v", m.registrar.NodeName(), terminated)
-
- // just wrapper for the iterating through monitors list
- handleMonitors := func(terminatedPid etf.Pid, items []monitorItem) {
- for i := range items {
- lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.registrar.NodeName(), terminated, items[i].pid)
- m.notifyProcessTerminated(items[i].ref, items[i].pid, terminatedPid, reason)
- delete(m.ref2pid, items[i].ref)
- }
- delete(m.processes, terminatedPid)
- }
+func (m *monitor) handleTerminated(terminated etf.Pid, name string, reason string) {
+ lib.Log("[%s] MONITOR process terminated: %v", m.nodename, terminated)
- m.mutexProcesses.Lock()
// if terminated process had a name we should make shure to clean up them all
+ m.mutexNames.Lock()
if name != "" {
- // monitor was created by the name so we should look up using virtual pid
- terminatedPid := virtualPid(gen.ProcessID{name, m.registrar.NodeName()})
- if items, ok := m.processes[terminatedPid]; ok {
- handleMonitors(terminatedPid, items)
+ terminatedProcessID := gen.ProcessID{Name: name, Node: m.nodename}
+ if items, ok := m.names[terminatedProcessID]; ok {
+ for i := range items {
+ lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminatedProcessID, items[i].pid)
+ m.sendMonitorExitReg(items[i].pid, terminatedProcessID, reason, items[i].ref)
+ delete(m.ref2name, items[i].ref)
+ }
+ delete(m.names, terminatedProcessID)
}
}
+ m.mutexNames.Unlock()
// check whether we have monitorItem on this process by Pid (terminated)
+ m.mutexProcesses.Lock()
if items, ok := m.processes[terminated]; ok {
- handleMonitors(terminated, items)
+
+ for i := range items {
+ lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid)
+ m.sendMonitorExit(items[i].pid, terminated, reason, items[i].ref)
+ delete(m.ref2pid, items[i].ref)
+ }
+ delete(m.processes, terminated)
}
m.mutexProcesses.Unlock()
m.mutexLinks.Lock()
if pidLinks, ok := m.links[terminated]; ok {
for i := range pidLinks {
- lib.Log("[%s] LINK process exited: %s. send notify to: %s", m.registrar.NodeName(), terminated, pidLinks[i])
- m.notifyProcessExit(pidLinks[i], terminated, reason)
+ lib.Log("[%s] LINK process exited: %s. send notify to: %s", m.nodename, terminated, pidLinks[i])
+ m.sendExit(pidLinks[i], terminated, reason)
// remove A link
pids, ok := m.links[pidLinks[i]]
@@ -501,7 +285,7 @@ func (m *monitor) processTerminated(terminated etf.Pid, name, reason string) {
// remove link
delete(m.links, terminated)
}
- defer m.mutexLinks.Unlock()
+ m.mutexLinks.Unlock()
}
@@ -521,9 +305,6 @@ func (m *monitor) processMonitors(process etf.Pid) []etf.Pid {
defer m.mutexProcesses.Unlock()
for p, by := range m.processes {
- if isVirtualPid(p) {
- continue
- }
for b := range by {
if by[b].pid == process {
monitors = append(monitors, p)
@@ -538,13 +319,9 @@ func (m *monitor) processMonitorsByName(process etf.Pid) []gen.ProcessID {
m.mutexProcesses.Lock()
defer m.mutexProcesses.Unlock()
- for p, by := range m.processes {
- if !isVirtualPid(p) {
- continue
- }
+ for processID, by := range m.names {
for b := range by {
if by[b].pid == process {
- processID := virtualPidToProcessID(p)
monitors = append(monitors, processID)
}
}
@@ -556,9 +333,7 @@ func (m *monitor) processMonitoredBy(process etf.Pid) []etf.Pid {
monitors := []etf.Pid{}
m.mutexProcesses.Lock()
defer m.mutexProcesses.Unlock()
-
if m, ok := m.processes[process]; ok {
- monitors := []etf.Pid{}
for i := range m {
monitors = append(monitors, m[i].pid)
}
@@ -573,89 +348,472 @@ func (m *monitor) IsMonitor(ref etf.Ref) bool {
if _, ok := m.ref2pid[ref]; ok {
return true
}
+ if _, ok := m.ref2name[ref]; ok {
+ return true
+ }
return false
}
-func (m *monitor) notifyNodeDown(to etf.Pid, node string) {
- message := gen.MessageNodeDown{node}
- m.registrar.route(etf.Pid{}, to, message)
+//
+// implementation of CoreRouter interface:
+//
+// RouteLink
+// RouteUnlink
+// RouteExit
+// RouteMonitor
+// RouteMonitorReg
+// RouteDemonitor
+// RouteMonitorExit
+// RouteMonitorExitReg
+//
+
+func (m *monitor) RouteLink(pidA etf.Pid, pidB etf.Pid) error {
+ lib.Log("[%s] LINK process: %v => %v", m.nodename, pidA, pidB)
+
+ // http://erlang.org/doc/reference_manual/processes.html#links
+ // Links are bidirectional and there can only be one link between
+ // two processes. Repeated calls to link(Pid) have no effect.
+
+ // Returns error if link is already exist or a process attempts to create
+ // a link to itself
+
+ if pidA == pidB {
+ return fmt.Errorf("Can not link to itself")
+ }
+
+ m.mutexLinks.Lock()
+ linksA := m.links[pidA]
+ m.mutexLinks.Unlock()
+
+ if pidA.Node == etf.Atom(m.nodename) {
+ // check if these processes are linked already (source)
+ for i := range linksA {
+ if linksA[i] == pidB {
+ return fmt.Errorf("Already linked")
+ }
+ }
+
+ }
+
+ // check if these processes are linked already (destination)
+ m.mutexLinks.Lock()
+ linksB := m.links[pidB]
+ m.mutexLinks.Unlock()
+
+ for i := range linksB {
+ if linksB[i] == pidA {
+ return fmt.Errorf("Already linked")
+ }
+ }
+
+ if pidB.Node == etf.Atom(m.nodename) {
+ // for the local process we should make sure if its alive
+ // otherwise send 'EXIT' message with 'noproc' as a reason
+ if p := m.router.processByPid(pidB); p == nil {
+ m.sendExit(pidA, pidB, "noproc")
+ return ErrProcessUnknown
+ }
+ m.mutexLinks.Lock()
+ m.links[pidA] = append(linksA, pidB)
+ m.links[pidB] = append(linksB, pidA)
+ m.mutexLinks.Unlock()
+ return nil
+ }
+
+ // linking with remote process
+ connection, err := m.router.GetConnection(string(pidB.Node))
+ if err != nil {
+ m.sendExit(pidA, pidB, "noconnection")
+ return err
+ }
+
+ if err := connection.Link(pidA, pidB); err != nil {
+ m.sendExit(pidA, pidB, err.Error())
+ return err
+ }
+
+ m.mutexLinks.Lock()
+ m.links[pidA] = append(linksA, pidB)
+ m.links[pidB] = append(linksB, pidA)
+ m.mutexLinks.Unlock()
+ return nil
+}
+
+func (m *monitor) RouteUnlink(pidA etf.Pid, pidB etf.Pid) error {
+ m.mutexLinks.Lock()
+ defer m.mutexLinks.Unlock()
+
+ if pidA.Node == etf.Atom(m.nodename) {
+ linksA := m.links[pidA]
+ for i := range linksA {
+ if linksA[i] != pidB {
+ continue
+ }
+
+ linksA[i] = linksA[0]
+ linksA = linksA[1:]
+ if len(linksA) > 0 {
+ m.links[pidA] = linksA
+ } else {
+ delete(m.links, pidA)
+ }
+ break
+ }
+ }
+
+ linksB := m.links[pidB]
+ for i := range linksB {
+ if linksB[i] != pidA {
+ continue
+ }
+ linksB[i] = linksB[0]
+ linksB = linksB[1:]
+ if len(linksB) > 0 {
+ m.links[pidB] = linksB
+ } else {
+ delete(m.links, pidB)
+ }
+ break
+
+ }
+
+ if pidB.Node != etf.Atom(m.nodename) {
+ connection, err := m.router.GetConnection(string(pidB.Node))
+ if err != nil {
+ m.sendExit(pidA, pidB, "noconnection")
+ return err
+ }
+ if err := connection.Unlink(pidA, pidB); err != nil {
+ m.sendExit(pidA, pidB, err.Error())
+ return err
+ }
+ }
+ return nil
}
-func (m *monitor) notifyProcessTerminated(ref etf.Ref, to etf.Pid, terminated etf.Pid, reason string) {
- // for remote {21, FromProc, ToPid, Ref, Reason}, where FromProc = monitored process
- localNode := etf.Atom(m.registrar.NodeName())
- if to.Node != localNode {
- // do nothing
- if reason == "noconnection" {
- return
+func (m *monitor) RouteExit(to etf.Pid, terminated etf.Pid, reason string) error {
+ m.mutexLinks.Lock()
+ defer m.mutexLinks.Unlock()
+
+ pidLinks, ok := m.links[terminated]
+ if !ok {
+ return nil
+ }
+ for i := range pidLinks {
+ lib.Log("[%s] LINK process exited: %s. send notify to: %s", m.nodename, terminated, pidLinks[i])
+ m.sendExit(pidLinks[i], terminated, reason)
+
+ // remove A link
+ pids, ok := m.links[pidLinks[i]]
+ if !ok {
+ continue
+ }
+ for k := range pids {
+ if pids[k] != terminated {
+ continue
+ }
+ pids[k] = pids[0]
+ pids = pids[1:]
+ break
}
- if isVirtualPid(terminated) {
- // it was monitored by name and this Pid was created using virtualPid().
- processID := virtualPidToProcessID(terminated)
- message := etf.Tuple{distProtoMONITOR_EXIT, etf.Atom(processID.Name), to, ref, etf.Atom(reason)}
- m.registrar.routeRaw(to.Node, message)
- return
+
+ if len(pids) > 0 {
+ m.links[pidLinks[i]] = pids
+ } else {
+ delete(m.links, pidLinks[i])
}
- // terminated is a real Pid. send it as it is.
- message := etf.Tuple{distProtoMONITOR_EXIT, terminated, to, ref, etf.Atom(reason)}
- m.registrar.routeRaw(to.Node, message)
- return
}
+ // remove link
+ delete(m.links, terminated)
+ return nil
- if isVirtualPid(terminated) {
- // it was monitored by name
- down := gen.MessageDown{
- Ref: ref,
- ProcessID: virtualPidToProcessID(terminated),
- Reason: reason,
+}
+
+func (m *monitor) RouteMonitor(by etf.Pid, pid etf.Pid, ref etf.Ref) error {
+ lib.Log("[%s] MONITOR process: %s => %s", m.nodename, by, pid)
+
+ // If 'process' belongs to this node we should make sure if its alive.
+ // http://erlang.org/doc/reference_manual/processes.html#monitors
+ // If Pid does not exist a gen.MessageDown must be
+ // send immediately with Reason set to noproc.
+ if p := m.router.processByPid(pid); string(pid.Node) == m.nodename && p == nil {
+ return m.sendMonitorExit(by, pid, "noproc", ref)
+ }
+
+ if string(pid.Node) != m.nodename {
+ connection, err := m.router.GetConnection(string(pid.Node))
+ if err != nil {
+ m.sendMonitorExit(by, pid, "noconnection", ref)
+ return err
+ }
+
+ if err := connection.Monitor(by, pid, ref); err != nil {
+ m.sendMonitorExit(by, pid, "noconnection", ref)
+ return err
}
- m.registrar.route(terminated, to, down)
- return
}
+
+ m.mutexProcesses.Lock()
+ l := m.processes[pid]
+ item := monitorItem{
+ pid: by,
+ ref: ref,
+ }
+ m.processes[pid] = append(l, item)
+ m.ref2pid[ref] = pid
+ m.mutexProcesses.Unlock()
+
+ return nil
+}
+
+func (m *monitor) RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error {
+ // If 'process' belongs to this node and does not exist a gen.MessageDown must be
+ // send immediately with Reason set to noproc.
+ if p := m.router.ProcessByName(process.Name); process.Node == m.nodename && p == nil {
+ return m.sendMonitorExitReg(by, process, "noproc", ref)
+ }
+ if process.Node != m.nodename {
+ connection, err := m.router.GetConnection(process.Node)
+ if err != nil {
+ m.sendMonitorExitReg(by, process, "noconnection", ref)
+ return err
+ }
+
+ if err := connection.MonitorReg(by, process, ref); err != nil {
+ m.sendMonitorExitReg(by, process, "noconnection", ref)
+ return err
+ }
+ }
+
+ m.mutexNames.Lock()
+ l := m.names[process]
+ item := monitorItem{
+ pid: by,
+ ref: ref,
+ }
+ m.names[process] = append(l, item)
+ m.ref2name[ref] = process
+ m.mutexNames.Unlock()
+
+ return nil
+}
+
+func (m *monitor) RouteDemonitor(by etf.Pid, ref etf.Ref) error {
+ m.mutexProcesses.RLock()
+ pid, knownRefByPid := m.ref2pid[ref]
+ m.mutexProcesses.RUnlock()
+
+ if knownRefByPid == false {
+ // monitor was created by process name
+ m.mutexNames.Lock()
+ defer m.mutexNames.Unlock()
+ processID, knownRefByName := m.ref2name[ref]
+ if knownRefByName == false {
+ // unknown monitor reference
+ return ErrMonitorUnknown
+ }
+ items := m.names[processID]
+
+ for i := range items {
+ if items[i].pid != by {
+ continue
+ }
+ if items[i].ref != ref {
+ continue
+ }
+
+ items[i] = items[0]
+ items = items[1:]
+
+ if len(items) == 0 {
+ delete(m.names, processID)
+ } else {
+ m.names[processID] = items
+ }
+ delete(m.ref2name, ref)
+
+ if processID.Node != m.nodename {
+ connection, err := m.router.GetConnection(processID.Node)
+ if err != nil {
+ return err
+ }
+ return connection.DemonitorReg(by, processID, ref)
+ }
+ return nil
+ }
+ return nil
+ }
+
+ // monitor was created by pid
+
+ // cheching for monitorItem list
+ m.mutexProcesses.Lock()
+ defer m.mutexProcesses.Unlock()
+ items := m.processes[pid]
+
+ // remove PID from monitoring processes list
+ for i := range items {
+ if items[i].pid != by {
+ continue
+ }
+ if items[i].ref != ref {
+ continue
+ }
+
+ items[i] = items[0]
+ items = items[1:]
+
+ if len(items) == 0 {
+ delete(m.processes, pid)
+ } else {
+ m.processes[pid] = items
+ }
+ delete(m.ref2pid, ref)
+
+ if string(pid.Node) != m.nodename {
+ connection, err := m.router.GetConnection(string(pid.Node))
+ if err != nil {
+ return err
+ }
+ return connection.Demonitor(by, pid, ref)
+ }
+
+ return nil
+ }
+ return nil
+}
+
+func (m *monitor) RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error {
+ m.mutexProcesses.Lock()
+ defer m.mutexProcesses.Unlock()
+
+ items, ok := m.processes[terminated]
+ if !ok {
+ return nil
+ }
+
+ for i := range items {
+ lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid)
+ if items[i].ref != ref {
+ continue
+ }
+
+ delete(m.ref2pid, items[i].ref)
+ m.sendMonitorExit(items[i].pid, terminated, reason, items[i].ref)
+
+ items[i] = items[0]
+ items = items[1:]
+ if len(items) == 0 {
+ delete(m.processes, terminated)
+ return nil
+ }
+ m.processes[terminated] = items
+ return nil
+ }
+
+ return nil
+}
+
+func (m *monitor) RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error {
+ m.mutexNames.Lock()
+ defer m.mutexNames.Unlock()
+
+ items, ok := m.names[terminated]
+ if !ok {
+ return nil
+ }
+
+ for i := range items {
+ lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid)
+ if items[i].ref != ref {
+ continue
+ }
+
+ delete(m.ref2name, items[i].ref)
+ m.sendMonitorExitReg(items[i].pid, terminated, reason, items[i].ref)
+
+ items[i] = items[0]
+ items = items[1:]
+ if len(items) == 0 {
+ delete(m.names, terminated)
+ return nil
+ }
+ m.names[terminated] = items
+ return nil
+ }
+
+ return nil
+}
+
+func (m *monitor) sendMonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error {
+ if string(to.Node) != m.nodename {
+ // remote
+ if reason == "noconnection" {
+ // do nothing. it was a monitor created by the remote node we lost connection to.
+ return nil
+ }
+
+ connection, err := m.router.GetConnection(string(to.Node))
+ if err != nil {
+ return err
+ }
+
+ return connection.MonitorExit(to, terminated, reason, ref)
+ }
+
+ // local
down := gen.MessageDown{
Ref: ref,
Pid: terminated,
Reason: reason,
}
- m.registrar.route(terminated, to, down)
+ from := to
+ return m.router.RouteSend(from, to, down)
}
-func (m *monitor) notifyProcessExit(to etf.Pid, terminated etf.Pid, reason string) {
- // for remote: {3, FromPid, ToPid, Reason}
- if to.Node != etf.Atom(m.registrar.NodeName()) {
+func (m *monitor) sendMonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error {
+ if string(to.Node) != m.nodename {
+ // remote
if reason == "noconnection" {
- return
+ // do nothing
+ return nil
}
- message := etf.Tuple{distProtoEXIT, terminated, to, etf.Atom(reason)}
- m.registrar.routeRaw(to.Node, message)
- return
- }
- // check if 'to' process is still alive. otherwise ignore this event
- if p := m.registrar.getProcessByPid(to); p != nil && p.IsAlive() {
- p.exit(terminated, reason)
+ connection, err := m.router.GetConnection(string(to.Node))
+ if err != nil {
+ return err
+ }
+
+ return connection.MonitorExitReg(to, terminated, reason, ref)
}
-}
-func virtualPid(p gen.ProcessID) etf.Pid {
- pid := etf.Pid{}
- pid.Node = etf.Atom(p.Name + "|" + p.Node) // registered process name
- pid.ID = math.MaxUint64
- pid.Creation = math.MaxUint32
- return pid
+ // local
+ down := gen.MessageDown{
+ Ref: ref,
+ ProcessID: terminated,
+ Reason: reason,
+ }
+ from := to
+ return m.router.RouteSend(from, to, down)
}
-func virtualPidToProcessID(pid etf.Pid) gen.ProcessID {
- s := strings.Split(string(pid.Node), "|")
- if len(s) != 2 {
- return gen.ProcessID{}
+func (m *monitor) sendExit(to etf.Pid, terminated etf.Pid, reason string) error {
+ // for remote: {3, FromPid, ToPid, Reason}
+ if to.Node != etf.Atom(m.nodename) {
+ if reason == "noconnection" {
+ return nil
+ }
+ connection, err := m.router.GetConnection(string(to.Node))
+ if err != nil {
+ return err
+ }
+ return connection.LinkExit(to, terminated, reason)
}
- return gen.ProcessID{s[0], s[1]}
-}
-func isVirtualPid(pid etf.Pid) bool {
- if pid.ID == math.MaxUint64 && pid.Creation == math.MaxUint32 {
- return true
+ // check if 'to' process is still alive
+ if p := m.router.processByPid(to); p != nil {
+ p.exit(terminated, reason)
+ return nil
}
- return false
+ return ErrProcessUnknown
}
diff --git a/node/network.go b/node/network.go
index 06c0af23..40df3d50 100644
--- a/node/network.go
+++ b/node/network.go
@@ -3,559 +3,481 @@ package node
import (
"bytes"
"context"
+ "encoding/pem"
+ "math/big"
+ "sync"
+ "time"
+
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
- "sync"
-
- //"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
- "encoding/pem"
"fmt"
- "runtime"
"github.com/ergo-services/ergo/etf"
"github.com/ergo-services/ergo/gen"
"github.com/ergo-services/ergo/lib"
- "github.com/ergo-services/ergo/node/dist"
-
- "math/big"
"net"
- // "net/http"
"strconv"
"strings"
- "time"
-)
-
-const (
- remoteBehaviorGroup = "ergo:remote"
)
type networkInternal interface {
- Network
- connect(to string) error
+ AddStaticRoute(name string, port uint16, options RouteOptions) error
+ RemoveStaticRoute(name string) bool
+ StaticRoutes() []Route
+ Resolve(peername string) (Route, error)
+ Connect(peername string) error
+ Disconnect(peername string) error
+ Nodes() []string
+
+ GetConnection(peername string) (ConnectionInterface, error)
+
+ connect(to string) (ConnectionInterface, error)
+ stopNetwork()
+}
+
+type connectionInternal struct {
+ conn net.Conn
+ connection ConnectionInterface
}
type network struct {
- registrar registrarInternal
- name string
- opts Options
- ctx context.Context
- remoteSpawnMutex sync.Mutex
+ nodename string
+ ctx context.Context
+ listener net.Listener
+
+ resolver Resolver
+ staticOnly bool
+ staticRoutes map[string]Route
+ staticRoutesMutex sync.Mutex
+
+ connections map[string]connectionInternal
+ mutexConnections sync.RWMutex
+
remoteSpawn map[string]gen.ProcessBehavior
- epmd *epmd
- tlscertServer tls.Certificate
- tlscertClient tls.Certificate
+ remoteSpawnMutex sync.Mutex
+
+ tls TLS
+ proxy Proxy
+ version Version
+ creation uint32
+
+ router CoreRouter
+ handshake HandshakeInterface
+ proto ProtoInterface
}
-func newNetwork(ctx context.Context, name string, opts Options, r registrarInternal) (networkInternal, error) {
+func newNetwork(ctx context.Context, nodename string, options Options, router CoreRouter) (networkInternal, error) {
n := &network{
- name: name,
- opts: opts,
- ctx: ctx,
- registrar: r,
+ nodename: nodename,
+ ctx: ctx,
+ staticOnly: options.StaticRoutesOnly,
+ staticRoutes: make(map[string]Route),
+ connections: make(map[string]connectionInternal),
+ remoteSpawn: make(map[string]gen.ProcessBehavior),
+ resolver: options.Resolver,
+ handshake: options.Handshake,
+ proto: options.Proto,
+ router: router,
+ creation: options.Creation,
}
- ns := strings.Split(name, "@")
- if len(ns) != 2 {
+
+ nn := strings.Split(nodename, "@")
+ if len(nn) != 2 {
return nil, fmt.Errorf("(EMPD) FQDN for node name is required (example: node@hostname)")
}
- port, err := n.listen(ctx, ns[1])
+ n.version, _ = options.Env[EnvKeyVersion].(Version)
+
+ n.tls = options.TLS
+ selfSignedCert, err := generateSelfSignedCert(n.version)
+ if n.tls.Server.Certificate == nil {
+ n.tls.Server = selfSignedCert
+ n.tls.SkipVerify = true
+ }
+ if n.tls.Client.Certificate == nil {
+ n.tls.Client = selfSignedCert
+ }
+
+ err = n.handshake.Init(n.nodename, n.creation, options.Flags)
if err != nil {
return nil, err
}
- n.epmd = &epmd{}
- if err := n.epmd.Init(ctx, name, port, opts); err != nil {
+
+ port, err := n.listen(ctx, nn[1], options.ListenBegin, options.ListenEnd)
+ if err != nil {
return nil, err
}
- return n, nil
-}
-// AddStaticRoute adds static route record into the EPMD client
-func (n *network) AddStaticRoute(name string, port uint16) error {
- tlsEnabled := n.opts.TLSMode != TLSModeDisabled
- return n.epmd.AddStaticRoute(name, port, n.opts.cookie, tlsEnabled)
-}
+ resolverOptions := ResolverOptions{
+ NodeVersion: n.version,
+ HandshakeVersion: n.handshake.Version(),
+ EnabledTLS: n.tls.Enabled,
+ EnabledProxy: n.proxy.Enabled,
+ }
+ if err := n.resolver.Register(nodename, port, resolverOptions); err != nil {
+ return nil, err
+ }
-func (n *network) AddStaticRouteExt(name string, port uint16, cookie string, tls bool) error {
- return n.epmd.AddStaticRoute(name, port, cookie, tls)
+ return n, nil
}
-// RemoveStaticRoute removes static route record from the EPMD client
-func (n *network) RemoveStaticRoute(name string) {
- n.epmd.RemoveStaticRoute(name)
+func (n *network) stopNetwork() {
+ if n.listener != nil {
+ n.listener.Close()
+ }
}
-func (n *network) listen(ctx context.Context, name string) (uint16, error) {
- var TLSenabled bool = true
- var version Version
- version, _ = ctx.Value("version").(Version)
-
- lc := net.ListenConfig{}
- for p := n.opts.ListenRangeBegin; p <= n.opts.ListenRangeEnd; p++ {
- l, err := lc.Listen(ctx, "tcp", net.JoinHostPort(name, strconv.Itoa(int(p))))
- if err != nil {
- continue
- }
+// AddStaticRoute adds a static route to the node with the given name
+func (n *network) AddStaticRoute(name string, port uint16, options RouteOptions) error {
+ ns := strings.Split(name, "@")
+ if len(ns) != 2 {
+ return fmt.Errorf("wrong FQDN")
+ }
+ if _, err := net.LookupHost(ns[1]); err != nil {
+ return err
+ }
- switch n.opts.TLSMode {
- case TLSModeAuto:
- cert, err := generateSelfSignedCert(version)
- if err != nil {
- return 0, fmt.Errorf("Can't generate certificate: %s\n", err)
- }
+ route := Route{
+ Name: name,
+ Host: ns[1],
+ Port: port,
+ RouteOptions: options,
+ }
- n.tlscertServer = cert
- n.tlscertClient = cert
+ n.staticRoutesMutex.Lock()
+ defer n.staticRoutesMutex.Unlock()
- TLSconfig := &tls.Config{
- Certificates: []tls.Certificate{cert},
- InsecureSkipVerify: true,
- }
- l = tls.NewListener(l, TLSconfig)
+ _, exist := n.staticRoutes[name]
+ if exist {
+ return ErrTaken
+ }
+ n.staticRoutes[name] = route
- case TLSModeStrict:
- certServer, err := tls.LoadX509KeyPair(n.opts.TLScrtServer, n.opts.TLSkeyServer)
- if err != nil {
- return 0, fmt.Errorf("Can't load server certificate: %s\n", err)
- }
- certClient, err := tls.LoadX509KeyPair(n.opts.TLScrtServer, n.opts.TLSkeyServer)
- if err != nil {
- return 0, fmt.Errorf("Can't load client certificate: %s\n", err)
- }
+ return nil
+}
- n.tlscertServer = certServer
- n.tlscertClient = certClient
+// RemoveStaticRoute removes static route record. Returns false if it doesn't exist.
+func (n *network) RemoveStaticRoute(name string) bool {
+ n.staticRoutesMutex.Lock()
+ defer n.staticRoutesMutex.Unlock()
+ _, exist := n.staticRoutes[name]
+ if exist {
+ delete(n.staticRoutes, name)
+ return true
+ }
+ return false
+}
- TLSconfig := &tls.Config{
- Certificates: []tls.Certificate{certServer},
- ServerName: "localhost",
- }
- l = tls.NewListener(l, TLSconfig)
+// StaticRoutes returns list of static routes added with AddStaticRoute
+func (n *network) StaticRoutes() []Route {
+ var routes []Route
- default:
- TLSenabled = false
- }
+ n.staticRoutesMutex.Lock()
+ defer n.staticRoutesMutex.Unlock()
+ for _, v := range n.staticRoutes {
+ routes = append(routes, v)
+ }
- go func() {
- for {
- c, err := l.Accept()
- lib.Log("[%s] Accepted new connection from %s", n.name, c.RemoteAddr().String())
+ return routes
+}
- if ctx.Err() != nil {
- // Context was canceled
- c.Close()
- return
- }
+// GetConnection
+func (n *network) GetConnection(peername string) (ConnectionInterface, error) {
+ if peername == n.nodename {
+ return nil, fmt.Errorf("can't connect to itself")
+ }
+ n.mutexConnections.RLock()
+ connectionInternal, ok := n.connections[peername]
+ n.mutexConnections.RUnlock()
+ if ok {
+ return connectionInternal.connection, nil
+ }
- if err != nil {
- lib.Log(err.Error())
- continue
- }
- handshakeOptions := dist.HandshakeOptions{
- Name: n.name,
- Cookie: n.opts.cookie,
- TLS: TLSenabled,
- Hidden: n.opts.Hidden,
- Creation: n.opts.creation,
- Version: n.opts.HandshakeVersion,
- }
+ connection, err := n.connect(peername)
+ if err != nil {
+ lib.Log("[%s] CORE no route to node %q: %s", n.nodename, peername, err)
+ return nil, ErrNoRoute
+ }
- link, e := dist.HandshakeAccept(c, handshakeOptions)
- if e != nil {
- lib.Log("[%s] Can't handshake with %s: %s", n.name, c.RemoteAddr().String(), e)
- c.Close()
- continue
- }
+ return connection, nil
+}
- // start serving this link
- if err := n.serve(ctx, link); err != nil {
- lib.Log("Can't serve connection link due to: %s", err)
- c.Close()
- }
+// Resolve
+func (n *network) Resolve(peername string) (Route, error) {
+ n.staticRoutesMutex.Lock()
+ defer n.staticRoutesMutex.Unlock()
- }
- }()
+ if r, ok := n.staticRoutes[peername]; ok {
+ return r, nil
+ }
- // return port number this node listenig on for the incoming connections
- return p, nil
+ if n.staticOnly {
+ return Route{}, ErrNoRoute
}
- // all the ports within a given range are taken
- return 0, fmt.Errorf("Can't start listener. Port range is taken")
+ return n.resolver.Resolve(peername)
}
-func (n *network) ProvideRemoteSpawn(name string, behavior gen.ProcessBehavior) error {
- return n.registrar.RegisterBehavior(remoteBehaviorGroup, name, behavior, nil)
+// Connect
+func (n *network) Connect(peername string) error {
+ _, err := n.GetConnection(peername)
+ return err
}
-func (n *network) RevokeRemoteSpawn(name string) error {
- return n.registrar.UnregisterBehavior(remoteBehaviorGroup, name)
-}
+// Disconnect
+func (n *network) Disconnect(peername string) error {
+ n.mutexConnections.RLock()
+ connectionInternal, ok := n.connections[peername]
+ n.mutexConnections.RUnlock()
+ if !ok {
+ return ErrNoRoute
+ }
-func (n *network) Resolve(name string) (NetworkRoute, error) {
- return n.epmd.resolve(name)
+ connectionInternal.conn.Close()
+ return nil
}
-func (n *network) serve(ctx context.Context, link *dist.Link) error {
- // define the total number of reader/writer goroutines
- numHandlers := runtime.GOMAXPROCS(n.opts.ConnectionHandlers)
-
- // do not use shared channels within intencive code parts, impacts on a performance
- receivers := struct {
- recv []chan *lib.Buffer
- n int
- i int
- }{
- recv: make([]chan *lib.Buffer, n.opts.RecvQueueLength),
- n: numHandlers,
- }
+// Nodes
+func (n *network) Nodes() []string {
+ list := []string{}
+ n.mutexConnections.RLock()
+ defer n.mutexConnections.RUnlock()
- p := &peer{
- name: link.GetRemoteName(),
- send: make([]chan []etf.Term, numHandlers),
- n: numHandlers,
+ for name := range n.connections {
+ list = append(list, name)
}
+ return list
+}
- if err := n.registrar.registerPeer(p); err != nil {
- // duplicate link?
- return err
- }
+func (n *network) listen(ctx context.Context, hostname string, begin uint16, end uint16) (uint16, error) {
- // run readers for incoming messages
- for i := 0; i < numHandlers; i++ {
- // run packet reader/handler routines (decoder)
- recv := make(chan *lib.Buffer, n.opts.RecvQueueLength)
- receivers.recv[i] = recv
- go link.ReadHandlePacket(ctx, recv, n.handleMessage)
+ lc := net.ListenConfig{
+ KeepAlive: defaultKeepAlivePeriod * time.Second,
}
-
- cacheIsReady := make(chan bool)
-
- // run link reader routine
- go func() {
- var err error
- var packetLength int
- var recv chan *lib.Buffer
-
- linkctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- go func() {
- select {
- case <-linkctx.Done():
- // if node's context is done
- link.Close()
+ for port := begin; port <= end; port++ {
+ hostPort := net.JoinHostPort(hostname, strconv.Itoa(int(port)))
+ listener, err := lc.Listen(ctx, "tcp", hostPort)
+ if err != nil {
+ continue
+ }
+ if n.tls.Enabled {
+ config := tls.Config{
+ Certificates: []tls.Certificate{n.tls.Server},
+ InsecureSkipVerify: n.tls.SkipVerify,
}
- }()
-
- // initializing atom cache if its enabled
- if !n.opts.DisableHeaderAtomCache {
- link.SetAtomCache(etf.NewAtomCache(linkctx))
+ listener = tls.NewListener(listener, &config)
}
- cacheIsReady <- true
+ n.listener = listener
- defer func() {
- link.Close()
- n.registrar.unregisterPeer(link.GetRemoteName())
-
- // close handlers channel
- p.mutex.Lock()
- for i := 0; i < numHandlers; i++ {
- if p.send[i] != nil {
- close(p.send[i])
- }
- if receivers.recv[i] != nil {
- close(receivers.recv[i])
+ go func() {
+ for {
+ c, err := listener.Accept()
+ if err != nil {
+ if ctx.Err() == nil {
+ continue
+ }
+ lib.Log(err.Error())
+ return
}
- }
- p.mutex.Unlock()
- }()
+ lib.Log("[%s] NETWORK accepted new connection from %s", n.nodename, c.RemoteAddr().String())
- b := lib.TakeBuffer()
- for {
- packetLength, err = link.Read(b)
- if err != nil || packetLength == 0 {
- // link was closed or got malformed data
+ peername, protoFlags, err := n.handshake.Accept(c, n.tls.Enabled)
if err != nil {
- fmt.Println("link was closed", link.GetPeerName(), "error:", err)
+ lib.Log("[%s] Can't handshake with %s: %s", n.nodename, c.RemoteAddr().String(), err)
+ c.Close()
+ continue
+ }
+ connection, err := n.proto.Init(n.ctx, c, peername, protoFlags)
+ if err != nil {
+ c.Close()
+ continue
}
- lib.ReleaseBuffer(b)
- return
- }
- // take new buffer for the next reading and append the tail (part of the next packet)
- b1 := lib.TakeBuffer()
- b1.Set(b.B[packetLength:])
- // cut the tail and send it further for handling.
- // buffer b has to be released by the reader of
- // recv channel (link.ReadHandlePacket)
- b.B = b.B[:packetLength]
- recv = receivers.recv[receivers.i]
+ cInternal := connectionInternal{
+ conn: c,
+ connection: connection,
+ }
- recv <- b
+ if _, err := n.registerConnection(peername, cInternal); err != nil {
+ // Race condition:
+ // There must be another goroutine which already created and registered
+ // connection to this node.
+ // Close this connection and use the already registered connection
+ c.Close()
+ continue
+ }
- // set new buffer as a current for the next reading
- b = b1
+ // run serving connection
+ go func(ctx context.Context, ci connectionInternal) {
+ n.proto.Serve(ci.connection, n.router)
+ n.unregisterConnection(peername)
+ n.proto.Terminate(ci.connection)
+ ci.conn.Close()
+ }(ctx, cInternal)
- // round-robin switch to the next receiver
- receivers.i++
- if receivers.i < receivers.n {
- continue
}
- receivers.i = 0
-
- }
- }()
-
- // we should make sure if the cache is ready before we start writers
- <-cacheIsReady
+ }()
- // run readers/writers for incoming/outgoing messages
- for i := 0; i < numHandlers; i++ {
- // run writer routines (encoder)
- send := make(chan []etf.Term, n.opts.SendQueueLength)
- p.mutex.Lock()
- p.send[i] = send
- p.mutex.Unlock()
- go link.Writer(send, n.opts.FragmentationUnit)
+ // return port number this node listenig on for the incoming connections
+ return port, nil
}
- return nil
+ // all ports within a given range are taken
+ return 0, fmt.Errorf("Can't start listener. Port range is taken")
}
-func (n *network) handleMessage(fromNode string, control, message etf.Term) (err error) {
- defer func() {
- if r := recover(); r != nil {
- err = fmt.Errorf("%s", r)
- }
- }()
-
- switch t := control.(type) {
- case etf.Tuple:
- switch act := t.Element(1).(type) {
- case int:
- switch act {
- case distProtoREG_SEND:
- // {6, FromPid, Unused, ToName}
- lib.Log("[%s] CONTROL REG_SEND [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- n.registrar.route(t.Element(2).(etf.Pid), t.Element(4), message)
-
- case distProtoSEND:
- // {2, Unused, ToPid}
- // SEND has no sender pid
- lib.Log("[%s] CONTROL SEND [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- n.registrar.route(etf.Pid{}, t.Element(3), message)
-
- case distProtoLINK:
- // {1, FromPid, ToPid}
- lib.Log("[%s] CONTROL LINK [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- n.registrar.link(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
-
- case distProtoUNLINK:
- // {4, FromPid, ToPid}
- lib.Log("[%s] CONTROL UNLINK [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- n.registrar.unlink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
-
- case distProtoNODE_LINK:
- lib.Log("[%s] CONTROL NODE_LINK [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
-
- case distProtoEXIT:
- // {3, FromPid, ToPid, Reason}
- lib.Log("[%s] CONTROL EXIT [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- terminated := t.Element(2).(etf.Pid)
- reason := fmt.Sprint(t.Element(4))
- n.registrar.processTerminated(terminated, "", string(reason))
-
- case distProtoEXIT2:
- lib.Log("[%s] CONTROL EXIT2 [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
-
- case distProtoMONITOR:
- // {19, FromPid, ToProc, Ref}, where FromPid = monitoring process
- // and ToProc = monitored process pid or name (atom)
- lib.Log("[%s] CONTROL MONITOR [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- n.registrar.monitorProcess(t.Element(2).(etf.Pid), t.Element(3), t.Element(4).(etf.Ref))
-
- case distProtoDEMONITOR:
- // {20, FromPid, ToProc, Ref}, where FromPid = monitoring process
- // and ToProc = monitored process pid or name (atom)
- lib.Log("[%s] CONTROL DEMONITOR [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- n.registrar.demonitorProcess(t.Element(4).(etf.Ref))
-
- case distProtoMONITOR_EXIT:
- // {21, FromProc, ToPid, Ref, Reason}, where FromProc = monitored process
- // pid or name (atom), ToPid = monitoring process, and Reason = exit reason for the monitored process
- lib.Log("[%s] CONTROL MONITOR_EXIT [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- reason := fmt.Sprint(t.Element(5))
- switch terminated := t.Element(2).(type) {
- case etf.Pid:
- n.registrar.processTerminated(terminated, "", string(reason))
- case etf.Atom:
- vpid := virtualPid(gen.ProcessID{string(terminated), fromNode})
- n.registrar.processTerminated(vpid, "", string(reason))
- }
+func (n *network) connect(peername string) (ConnectionInterface, error) {
+ var route Route
+ var c net.Conn
+ var err error
+ var enabledTLS bool
- // Not implemented yet, just stubs. TODO.
- case distProtoSEND_SENDER:
- lib.Log("[%s] CONTROL SEND_SENDER unsupported [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- case distProtoPAYLOAD_EXIT:
- lib.Log("[%s] CONTROL PAYLOAD_EXIT unsupported [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- case distProtoPAYLOAD_EXIT2:
- lib.Log("[%s] CONTROL PAYLOAD_EXIT2 unsupported [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- case distProtoPAYLOAD_MONITOR_P_EXIT:
- lib.Log("[%s] CONTROL PAYLOAD_MONITOR_P_EXIT unsupported [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
-
- // alias support
- case distProtoALIAS_SEND:
- // {33, FromPid, Alias}
- lib.Log("[%s] CONTROL ALIAS_SEND [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- alias := etf.Alias(t.Element(3).(etf.Ref))
- n.registrar.route(t.Element(2).(etf.Pid), alias, message)
-
- case distProtoSPAWN_REQUEST:
- // {29, ReqId, From, GroupLeader, {Module, Function, Arity}, OptList}
- lib.Log("[%s] CONTROL SPAWN_REQUEST [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
- registerName := ""
- for _, option := range t.Element(6).(etf.List) {
- name, ok := option.(etf.Tuple)
- if !ok {
- break
- }
- if name.Element(1).(etf.Atom) == etf.Atom("name") {
- registerName = string(name.Element(2).(etf.Atom))
- }
- }
+ // resolve the route
+ route, err = n.resolver.Resolve(peername)
+ if err != nil {
+ return nil, err
+ }
- from := t.Element(3).(etf.Pid)
- ref := t.Element(2).(etf.Ref)
-
- mfa := t.Element(5).(etf.Tuple)
- module := mfa.Element(1).(etf.Atom)
- function := mfa.Element(2).(etf.Atom)
- var args etf.List
- if str, ok := message.(string); !ok {
- args, _ = message.(etf.List)
- } else {
- // stupid Erlang's strings :). [1,2,3,4,5] sends as a string.
- // args can't be anything but etf.List.
- for i := range []byte(str) {
- args = append(args, str[i])
- }
- }
+ HostPort := net.JoinHostPort(route.Host, strconv.Itoa(int(route.Port)))
+ dialer := net.Dialer{
+ KeepAlive: defaultKeepAlivePeriod * time.Second,
+ }
- rb, err_behavior := n.registrar.RegisteredBehavior(remoteBehaviorGroup, string(module))
- if err_behavior != nil {
- message := etf.Tuple{distProtoSPAWN_REPLY, ref, from, 0, etf.Atom("not_provided")}
- n.registrar.routeRaw(from.Node, message)
- return
+ if route.IsErgo == true {
+ // rely on the route TLS settings if they were defined
+ if route.EnabledTLS {
+ if route.Cert.Certificate == nil {
+ // use the local TLS settings
+ config := tls.Config{
+ Certificates: []tls.Certificate{n.tls.Client},
+ InsecureSkipVerify: n.tls.SkipVerify,
}
- remote_request := gen.RemoteSpawnRequest{
- Ref: ref,
- From: from,
- Function: string(function),
+ tlsdialer := tls.Dialer{
+ NetDialer: &dialer,
+ Config: &config,
}
- process_opts := processOptions{}
- process_opts.Env = map[string]interface{}{"ergo:RemoteSpawnRequest": remote_request}
-
- process, err_spawn := n.registrar.spawn(registerName, process_opts, rb.Behavior, args...)
- if err_spawn != nil {
- message := etf.Tuple{distProtoSPAWN_REPLY, ref, from, 0, etf.Atom(err_spawn.Error())}
- n.registrar.routeRaw(from.Node, message)
- return
+ c, err = tlsdialer.DialContext(n.ctx, "tcp", HostPort)
+ } else {
+ // use the route TLS settings
+ config := tls.Config{
+ Certificates: []tls.Certificate{route.Cert},
}
- message := etf.Tuple{distProtoSPAWN_REPLY, ref, from, 0, process.Self()}
- n.registrar.routeRaw(from.Node, message)
-
- case distProtoSPAWN_REPLY:
- // {31, ReqId, To, Flags, Result}
- lib.Log("[%s] CONTROL SPAWN_REPLY [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
-
- to := t.Element(3).(etf.Pid)
- process := n.registrar.ProcessByPid(to)
- if process == nil {
- return
+ tlsdialer := tls.Dialer{
+ NetDialer: &dialer,
+ Config: &config,
}
- ref := t.Element(2).(etf.Ref)
- //flags := t.Element(4)
- process.PutSyncReply(ref, t.Element(5))
-
- default:
- lib.Log("[%s] CONTROL unknown command [from %s]: %#v", n.registrar.NodeName(), fromNode, control)
+ c, err = tlsdialer.DialContext(n.ctx, "tcp", HostPort)
}
- default:
- err = fmt.Errorf("unsupported message %#v", control)
+ enabledTLS = true
+
+ } else {
+ // TLS disabled on a remote node
+ c, err = dialer.DialContext(n.ctx, "tcp", HostPort)
}
- }
- return
-}
+ } else {
+ // rely on the local TLS settings
+ if n.tls.Enabled {
+ config := tls.Config{
+ Certificates: []tls.Certificate{n.tls.Client},
+ InsecureSkipVerify: n.tls.SkipVerify,
+ }
+ tlsdialer := tls.Dialer{
+ NetDialer: &dialer,
+ Config: &config,
+ }
+ c, err = tlsdialer.DialContext(n.ctx, "tcp", HostPort)
+ enabledTLS = true
-func (n *network) connect(to string) error {
- var nr NetworkRoute
- var err error
- var c net.Conn
- if nr, err = n.epmd.resolve(string(to)); err != nil {
- return fmt.Errorf("Can't resolve port for %s: %s", to, err)
+ } else {
+ c, err = dialer.DialContext(n.ctx, "tcp", HostPort)
+ }
}
- if nr.Cookie == "" {
- nr.Cookie = n.opts.cookie
+
+ // check if we couldn't establish a connection with the node
+ if err != nil {
+ return nil, err
}
- ns := strings.Split(to, "@")
- TLSenabled := false
+ // handshake
+ handshake := route.Handshake
+ if handshake == nil {
+ // use default handshake
+ handshake = n.handshake
+ }
- switch n.opts.TLSMode {
- case TLSModeAuto:
- tlsdialer := tls.Dialer{
- Config: &tls.Config{
- Certificates: []tls.Certificate{n.tlscertClient},
- InsecureSkipVerify: true,
- },
- }
- c, err = tlsdialer.DialContext(n.ctx, "tcp", net.JoinHostPort(ns[1], strconv.Itoa(nr.Port)))
- TLSenabled = true
-
- case TLSModeStrict:
- tlsdialer := tls.Dialer{
- Config: &tls.Config{
- Certificates: []tls.Certificate{n.tlscertClient},
- },
- }
- c, err = tlsdialer.DialContext(n.ctx, "tcp", net.JoinHostPort(ns[1], strconv.Itoa(nr.Port)))
- TLSenabled = true
+ protoFlags, err := n.handshake.Start(c, enabledTLS)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
- default:
- dialer := net.Dialer{}
- c, err = dialer.DialContext(n.ctx, "tcp", net.JoinHostPort(ns[1], strconv.Itoa(nr.Port)))
+ // proto
+ proto := route.Proto
+ if proto == nil {
+ // use default proto
+ proto = n.proto
}
+ connection, err := n.proto.Init(n.ctx, c, peername, protoFlags)
if err != nil {
- lib.Log("Error calling net.Dialer.DialerContext : %s", err.Error())
- return err
+ c.Close()
+ return nil, err
+ }
+ cInternal := connectionInternal{
+ conn: c,
+ connection: connection,
}
- handshakeOptions := dist.HandshakeOptions{
- Name: n.name,
- Cookie: nr.Cookie,
- TLS: TLSenabled,
- Hidden: false,
- Creation: n.opts.creation,
- Version: n.opts.HandshakeVersion,
+ if registered, err := n.registerConnection(peername, cInternal); err != nil {
+ // Race condition:
+ // There must be another goroutine which already created and registered
+ // connection to this node.
+ // Close this connection and use the already registered one
+ c.Close()
+ return registered.connection, nil
}
- link, e := dist.Handshake(c, handshakeOptions)
- if e != nil {
- return e
+
+ // run serving connection
+ go func(ctx context.Context, ci connectionInternal) {
+ n.proto.Serve(ci.connection, n.router)
+ n.unregisterConnection(peername)
+ n.proto.Terminate(ci.connection)
+ ci.conn.Close()
+ }(n.ctx, cInternal)
+
+ return connection, nil
+}
+
+func (n *network) registerConnection(peername string, ci connectionInternal) (connectionInternal, error) {
+ lib.Log("[%s] NETWORK registering peer %#v", n.nodename, peername)
+ n.mutexConnections.Lock()
+ defer n.mutexConnections.Unlock()
+
+ if registered, exist := n.connections[peername]; exist {
+ // already registered
+ return registered, ErrTaken
}
+ n.connections[peername] = ci
+ return ci, nil
+}
- if err := n.serve(n.ctx, link); err != nil {
- c.Close()
- return err
+func (n *network) unregisterConnection(peername string) {
+ lib.Log("[%s] NETWORK unregistering peer %v", n.nodename, peername)
+ n.mutexConnections.Lock()
+ _, exist := n.connections[peername]
+ delete(n.connections, peername)
+ n.mutexConnections.Unlock()
+
+ if exist {
+ n.router.RouteNodeDown(peername)
}
- return nil
}
func generateSelfSignedCert(version Version) (tls.Certificate, error) {
@@ -609,26 +531,71 @@ func generateSelfSignedCert(version Version) (tls.Certificate, error) {
return tls.X509KeyPair(certPEM.Bytes(), certPrivKeyPEM.Bytes())
}
-type peer struct {
- name string
- send []chan []etf.Term
- i int
- n int
-
- mutex sync.Mutex
+//
+// Connection interface default callbacks
+//
+func (c *Connection) Send(from gen.Process, to etf.Pid, message etf.Term) error {
+ return ErrUnsupported
+}
+func (c *Connection) SendReg(from gen.Process, to gen.ProcessID, message etf.Term) error {
+ return ErrUnsupported
+}
+func (c *Connection) SendAlias(from gen.Process, to etf.Alias, message etf.Term) error {
+ return ErrUnsupported
+}
+func (c *Connection) Link(local gen.Process, remote etf.Pid) error {
+ return ErrUnsupported
+}
+func (c *Connection) Unlink(local gen.Process, remote etf.Pid) error {
+ return ErrUnsupported
+}
+func (c *Connection) LinkExit(local etf.Pid, remote etf.Pid, reason string) error {
+ return ErrUnsupported
+}
+func (c *Connection) Monitor(local gen.Process, remote etf.Pid, ref etf.Ref) error {
+ return ErrUnsupported
+}
+func (c *Connection) MonitorReg(local gen.Process, remote gen.ProcessID, ref etf.Ref) error {
+ return ErrUnsupported
+}
+func (c *Connection) Demonitor(by etf.Pid, process etf.Pid, ref etf.Ref) error {
+ return ErrUnsupported
+}
+func (c *Connection) DemonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error {
+ return ErrUnsupported
+}
+func (c *Connection) MonitorExitReg(process gen.Process, reason string, ref etf.Ref) error {
+ return ErrUnsupported
+}
+func (c *Connection) MonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error {
+ return ErrUnsupported
+}
+func (c *Connection) SpawnRequest(behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error {
+ return ErrUnsupported
+}
+func (c *Connection) SpawnReply(to etf.Pid, ref etf.Ref, pid etf.Pid) error {
+ return ErrUnsupported
+}
+func (c *Connection) SpawnReplyError(to etf.Pid, ref etf.Ref, err error) error {
+ return ErrUnsupported
+}
+func (c *Connection) Proxy() error {
+ return ErrUnsupported
+}
+func (c *Connection) ProxyReg() error {
+ return ErrUnsupported
}
-func (p *peer) getChannel() chan []etf.Term {
- p.mutex.Lock()
- defer p.mutex.Unlock()
-
- c := p.send[p.i]
-
- p.i++
- if p.i < p.n {
- return c
- }
-
- p.i = 0
- return c
+//
+// Handshake interface default callbacks
+//
+func (h *Handshake) Start(c net.Conn) (Flags, error) {
+ return Flags{}, ErrUnsupported
+}
+func (h *Handshake) Accept(c net.Conn) (string, Flags, error) {
+ return "", Flags{}, ErrUnsupported
+}
+func (h *Handshake) Version() HandshakeVersion {
+ var v HandshakeVersion
+ return v
}
diff --git a/node/node.go b/node/node.go
index 4dfd4e28..6214a8c8 100644
--- a/node/node.go
+++ b/node/node.go
@@ -3,6 +3,7 @@ package node
import (
"context"
"fmt"
+ "runtime"
"strings"
"time"
@@ -12,109 +13,94 @@ import (
)
const (
- appBehaviorGroup = "ergo:applications"
+ appBehaviorGroup = "ergo:applications"
+ remoteBehaviorGroup = "ergo:remote"
)
-type nodeInternal interface {
- Node
- registrarInternal
-}
-
// node instance of created node using CreateNode
type node struct {
- registrarInternal
- networkInternal
+ coreInternal
name string
- cookie string
creation uint32
- opts Options
context context.Context
stop context.CancelFunc
version Version
}
// StartWithContext create new node with specified context, name and cookie string
-func StartWithContext(ctx context.Context, name string, cookie string, opts Options) (nodeInternal, error) {
-
- lib.Log("Start with name '%s' and cookie '%s'", name, cookie)
- nodectx, nodestop := context.WithCancel(ctx)
-
- // Creation must be > 0 so make 'or 0x1'
- creation := uint32(time.Now().Unix()) | 1
+func StartWithContext(ctx context.Context, name string, cookie string, opts Options) (Node, error) {
- node := &node{
- cookie: cookie,
- context: nodectx,
- stop: nodestop,
- creation: creation,
- }
+ lib.Log("Start node with name %q and cookie %q", name, cookie)
- if name == "" {
- return nil, fmt.Errorf("Node name must be defined")
- }
- // set defaults
- if opts.ListenRangeBegin == 0 {
- opts.ListenRangeBegin = defaultListenRangeBegin
+ if len(strings.Split(name, "@")) != 2 {
+ return nil, fmt.Errorf("incorrect FQDN node name (example: node@localhost)")
}
- if opts.ListenRangeEnd == 0 {
- opts.ListenRangeEnd = defaultListenRangeEnd
+ if opts.Creation == 0 {
+ opts.Creation = uint32(time.Now().Unix())
}
- lib.Log("Listening range: %d...%d", opts.ListenRangeBegin, opts.ListenRangeEnd)
- if opts.EPMDPort == 0 {
- opts.EPMDPort = defaultEPMDPort
- }
- if opts.EPMDPort != 4369 {
- lib.Log("Using custom EPMD port: %d", opts.EPMDPort)
+ if opts.Flags.Enable == false {
+ opts.Flags = DefaultFlags()
}
- if opts.SendQueueLength == 0 {
- opts.SendQueueLength = defaultSendQueueLength
+ // set defaults listening port range
+ if opts.Listen > 0 {
+ opts.ListenBegin = opts.Listen
+ opts.ListenEnd = opts.Listen
+ lib.Log("Node listening port: %d", opts.Listen)
+ } else {
+ if opts.ListenBegin == 0 {
+ opts.ListenBegin = defaultListenBegin
+ }
+ if opts.ListenEnd == 0 {
+ opts.ListenEnd = defaultListenEnd
+ }
+ lib.Log("Node listening range: %d...%d", opts.ListenBegin, opts.ListenEnd)
}
- if opts.RecvQueueLength == 0 {
- opts.RecvQueueLength = defaultRecvQueueLength
+ if opts.Handshake == nil {
+ return nil, fmt.Errorf("Handshake must be defined")
}
-
- if opts.FragmentationUnit < 1500 {
- opts.FragmentationUnit = defaultFragmentationUnit
+ if opts.Proto == nil {
+ return nil, fmt.Errorf("Proto must be defined")
}
-
- // must be 5 or 6
- if opts.HandshakeVersion != 5 && opts.HandshakeVersion != 6 {
- opts.HandshakeVersion = defaultHandshakeVersion
+ if opts.StaticRoutesOnly == false && opts.Resolver == nil {
+ return nil, fmt.Errorf("Resolver must be defined if StaticRoutesOnly == false")
}
- if opts.Hidden {
- lib.Log("Running as hidden node")
+ nodectx, nodestop := context.WithCancel(ctx)
+ node := &node{
+ name: name,
+ context: nodectx,
+ stop: nodestop,
+ creation: opts.Creation,
}
- if len(strings.Split(name, "@")) != 2 {
- return nil, fmt.Errorf("incorrect FQDN node name (example: node@localhost)")
+ // create a copy of envs
+ copyEnv := make(map[gen.EnvKey]interface{})
+ for k, v := range opts.Env {
+ copyEnv[k] = v
}
- opts.cookie = cookie
- opts.creation = creation
- node.opts = opts
- node.name = name
+ // set global variable 'ergo:Node'
+ copyEnv[EnvKeyNode] = Node(node)
+ opts.Env = copyEnv
- registrar := newRegistrar(nodectx, name, creation, node)
- network, err := newNetwork(nodectx, name, opts, registrar)
+ core, err := newCore(nodectx, name, opts)
if err != nil {
return nil, err
}
+ node.coreInternal = core
- node.registrarInternal = registrar
- node.networkInternal = network
-
- // load applications
for _, app := range opts.Applications {
+ // load applications
name, err := node.ApplicationLoad(app)
if err != nil {
nodestop()
return nil, err
}
+ // start applications
_, err = node.ApplicationStart(name)
if err != nil {
nodestop()
@@ -125,40 +111,12 @@ func StartWithContext(ctx context.Context, name string, cookie string, opts Opti
return node, nil
}
-// IsAlive returns true if node is running
-func (n *node) IsAlive() bool {
- return n.context.Err() == nil
-}
-
-// Wait waits until node stopped
-func (n *node) Wait() {
- <-n.context.Done()
-}
-
-// Uptime return uptime in seconds
-func (n *node) Uptime() int64 {
- return time.Now().Unix() - int64(n.creation)
-}
-
// Version returns version of the node
func (n *node) Version() Version {
return n.version
}
-// WaitWithTimeout waits until node stopped. Return ErrTimeout
-// if given timeout is exceeded
-func (n *node) WaitWithTimeout(d time.Duration) error {
-
- timer := time.NewTimer(d)
- defer timer.Stop()
-
- select {
- case <-timer.C:
- return ErrTimeout
- case <-n.context.Done():
- return nil
- }
-}
+// Spawn
func (n *node) Spawn(name string, opts gen.ProcessOptions, object gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) {
// process started by node has no parent
options := processOptions{
@@ -167,19 +125,44 @@ func (n *node) Spawn(name string, opts gen.ProcessOptions, object gen.ProcessBeh
return n.spawn(name, options, object, args...)
}
+// RegisterName
+func (n *node) RegisterName(name string, pid etf.Pid) error {
+ return n.registerName(name, pid)
+}
+
+// UnregisterName
+func (n *node) UnregisterName(name string) error {
+ return n.unregisterName(name)
+}
+
+// Stop
func (n *node) Stop() {
- n.stop()
+ n.coreStop()
}
+// Name
func (n *node) Name() string {
return n.name
}
-func (n *node) RegisterName(name string, pid etf.Pid) error {
- return n.registerName(name, pid)
+// IsAlive
+func (n *node) IsAlive() bool {
+ return n.coreIsAlive()
}
-func (n *node) UnregisterName(name string) error {
- return n.unregisterName(name)
+
+// Uptime
+func (n *node) Uptime() int64 {
+ return n.coreUptime()
+}
+
+// Wait
+func (n *node) Wait() {
+ n.coreWait()
+}
+
+// WaitWithTimeout
+func (n *node) WaitWithTimeout(d time.Duration) error {
+ return n.coreWaitWithTimeout(d)
}
// LoadedApplications returns a list of loaded applications (including running applications)
@@ -328,8 +311,8 @@ func (n *node) applicationStart(startType, appName string, args ...etf.Term) (ge
}
}
- env := map[string]interface{}{
- "spec": spec,
+ env := map[gen.EnvKey]interface{}{
+ gen.EnvKeySpec: spec,
}
options := gen.ProcessOptions{
Env: env,
@@ -369,15 +352,23 @@ func (n *node) ApplicationStop(name string) error {
}
return nil
}
+
+// Links
func (n *node) Links(process etf.Pid) []etf.Pid {
return n.processLinks(process)
}
+
+// Monitors
func (n *node) Monitors(process etf.Pid) []etf.Pid {
return n.processMonitors(process)
}
+
+// MonitorsByName
func (n *node) MonitorsByName(process etf.Pid) []gen.ProcessID {
return n.processMonitorsByName(process)
}
+
+// MonitoredBy
func (n *node) MonitoredBy(process etf.Pid) []etf.Pid {
return n.processMonitoredBy(process)
}
@@ -424,3 +415,37 @@ func (n *node) RevokeRPC(module, function string) error {
return nil
}
+
+// ProvideRemoteSpawn
+func (n *node) ProvideRemoteSpawn(name string, behavior gen.ProcessBehavior) error {
+ return n.RegisterBehavior(remoteBehaviorGroup, name, behavior, nil)
+}
+
+// RevokeRemoteSpawn
+func (n *node) RevokeRemoteSpawn(name string) error {
+ return n.UnregisterBehavior(remoteBehaviorGroup, name)
+}
+
+// DefaultFlags
+func DefaultFlags() Flags {
+ return Flags{
+ EnableHeaderAtomCache: true,
+ EnableBigCreation: true,
+ EnableBigPidRef: true,
+ EnableFragmentation: true,
+ EnableAlias: true,
+ EnableRemoteSpawn: true,
+ }
+
+}
+
+// DefaultProtoOptions
+func DefaultProtoOptions() ProtoOptions {
+ return ProtoOptions{
+ NumHandlers: runtime.NumCPU(),
+ MaxMessageSize: 0, // no limit
+ SendQueueLength: DefaultProtoSendQueueLength,
+ RecvQueueLength: DefaultProtoRecvQueueLength,
+ FragmentationUnit: DefaultProroFragmentationUnit,
+ }
+}
diff --git a/node/process.go b/node/process.go
index 0c81a3a6..5d6d8867 100644
--- a/node/process.go
+++ b/node/process.go
@@ -12,17 +12,18 @@ import (
)
const (
+ // DefaultProcessMailboxSize
DefaultProcessMailboxSize = 100
)
type process struct {
- registrarInternal
+ coreInternal
sync.RWMutex
name string
self etf.Pid
behavior gen.ProcessBehavior
- env map[string]interface{}
+ env map[gen.EnvKey]interface{}
parent *process
groupLeader gen.Process
@@ -36,10 +37,11 @@ type process struct {
kill context.CancelFunc
exit processExitFunc
- replyMutex sync.Mutex
+ replyMutex sync.RWMutex
reply map[etf.Ref]chan etf.Term
- trapExit bool
+ trapExit bool
+ compression bool
}
type processOptions struct {
@@ -49,14 +51,17 @@ type processOptions struct {
type processExitFunc func(from etf.Pid, reason string) error
+// Self
func (p *process) Self() etf.Pid {
return p.self
}
+// Name
func (p *process) Name() string {
return p.name
}
+// RegisterName
func (p *process) RegisterName(name string) error {
if p.behavior == nil {
return ErrProcessTerminated
@@ -64,6 +69,7 @@ func (p *process) RegisterName(name string) error {
return p.registerName(name, p.self)
}
+// UnregisterName
func (p *process) UnregisterName(name string) error {
if p.behavior == nil {
return ErrProcessTerminated
@@ -78,6 +84,7 @@ func (p *process) UnregisterName(name string) error {
return p.unregisterName(name)
}
+// Kill
func (p *process) Kill() {
if p.behavior == nil {
return
@@ -85,6 +92,7 @@ func (p *process) Kill() {
p.kill()
}
+// Exit
func (p *process) Exit(reason string) error {
if p.behavior == nil {
return ErrProcessTerminated
@@ -92,10 +100,12 @@ func (p *process) Exit(reason string) error {
return p.exit(p.self, reason)
}
+// Context
func (p *process) Context() context.Context {
return p.context
}
+// Parent
func (p *process) Parent() gen.Process {
if p.parent == nil {
return nil
@@ -103,6 +113,7 @@ func (p *process) Parent() gen.Process {
return p.parent
}
+// GroupLeader
func (p *process) GroupLeader() gen.Process {
if p.groupLeader == nil {
return nil
@@ -110,22 +121,32 @@ func (p *process) GroupLeader() gen.Process {
return p.groupLeader
}
+// Links
func (p *process) Links() []etf.Pid {
return p.processLinks(p.self)
}
+
+// Monitors
func (p *process) Monitors() []etf.Pid {
return p.processMonitors(p.self)
}
+
+// MonitorsByName
func (p *process) MonitorsByName() []gen.ProcessID {
return p.processMonitorsByName(p.self)
}
+
+// MonitoredBy
func (p *process) MonitoredBy() []etf.Pid {
return p.processMonitoredBy(p.self)
}
+
+// Aliases
func (p *process) Aliases() []etf.Alias {
return p.aliases
}
+// Info
func (p *process) Info() gen.ProcessInfo {
if p.behavior == nil {
return gen.ProcessInfo{}
@@ -154,13 +175,27 @@ func (p *process) Info() gen.ProcessInfo {
}
}
+// Send
func (p *process) Send(to interface{}, message etf.Term) error {
if p.behavior == nil {
return ErrProcessTerminated
}
- return p.route(p.self, to, message)
+ switch receiver := to.(type) {
+ case etf.Pid:
+ return p.RouteSend(p.self, receiver, message)
+ case string:
+ return p.RouteSendReg(p.self, gen.ProcessID{Name: receiver, Node: string(p.self.Node)}, message)
+ case etf.Atom:
+ return p.RouteSendReg(p.self, gen.ProcessID{Name: string(receiver), Node: string(p.self.Node)}, message)
+ case gen.ProcessID:
+ return p.RouteSendReg(p.self, receiver, message)
+ case etf.Alias:
+ return p.RouteSendAlias(p.self, receiver, message)
+ }
+ return fmt.Errorf("Unknown receiver type")
}
+// SendAfter
func (p *process) SendAfter(to interface{}, message etf.Term, after time.Duration) context.CancelFunc {
//TODO: should we control the number of timers/goroutines have been created this way?
ctx, cancel := context.WithCancel(p.context)
@@ -175,13 +210,14 @@ func (p *process) SendAfter(to interface{}, message etf.Term, after time.Duratio
return
case <-timer.C:
if p.IsAlive() {
- p.route(p.self, to, message)
+ p.Send(to, message)
}
}
}()
return cancel
}
+// CreateAlias
func (p *process) CreateAlias() (etf.Alias, error) {
if p.behavior == nil {
return etf.Alias{}, ErrProcessTerminated
@@ -189,6 +225,7 @@ func (p *process) CreateAlias() (etf.Alias, error) {
return p.newAlias(p)
}
+// DeleteAlias
func (p *process) DeleteAlias(alias etf.Alias) error {
if p.behavior == nil {
return ErrProcessTerminated
@@ -196,11 +233,12 @@ func (p *process) DeleteAlias(alias etf.Alias) error {
return p.deleteAlias(p, alias)
}
-func (p *process) ListEnv() map[string]interface{} {
+// ListEnv
+func (p *process) ListEnv() map[gen.EnvKey]interface{} {
p.RLock()
defer p.RUnlock()
- env := make(map[string]interface{})
+ env := make(map[gen.EnvKey]interface{})
if p.groupLeader != nil {
for key, value := range p.groupLeader.ListEnv() {
@@ -219,7 +257,8 @@ func (p *process) ListEnv() map[string]interface{} {
return env
}
-func (p *process) SetEnv(name string, value interface{}) {
+// SetEnv
+func (p *process) SetEnv(name gen.EnvKey, value interface{}) {
p.Lock()
defer p.Unlock()
if value == nil {
@@ -229,7 +268,8 @@ func (p *process) SetEnv(name string, value interface{}) {
p.env[name] = value
}
-func (p *process) Env(name string) interface{} {
+// Env
+func (p *process) Env(name gen.EnvKey) interface{} {
p.RLock()
defer p.RUnlock()
@@ -244,12 +284,14 @@ func (p *process) Env(name string) interface{} {
return nil
}
+// Wait
func (p *process) Wait() {
if p.IsAlive() {
<-p.context.Done()
}
}
+// WaitWithTimeout
func (p *process) WaitWithTimeout(d time.Duration) error {
if !p.IsAlive() {
return nil
@@ -266,47 +308,79 @@ func (p *process) WaitWithTimeout(d time.Duration) error {
}
}
+// Link
func (p *process) Link(with etf.Pid) {
if p.behavior == nil {
return
}
- p.link(p.self, with)
+ p.RouteLink(p.self, with)
}
+// Unlink
func (p *process) Unlink(with etf.Pid) {
- p.Lock()
- defer p.Unlock()
if p.behavior == nil {
return
}
- p.unlink(p.self, with)
+ p.RouteUnlink(p.self, with)
}
+// IsAlive
func (p *process) IsAlive() bool {
- p.Lock()
- defer p.Unlock()
if p.behavior == nil {
return false
}
return p.context.Err() == nil
}
+// NodeName
+func (p *process) NodeName() string {
+ return p.coreNodeName()
+}
+
+// NodeStop
+func (p *process) NodeStop() {
+ p.coreStop()
+}
+
+// NodeUptime
+func (p *process) NodeUptime() int64 {
+ return p.coreUptime()
+}
+
+// Children
func (p *process) Children() ([]etf.Pid, error) {
c, err := p.directRequest(gen.MessageDirectChildren{}, 5)
- if err == nil {
- return c.([]etf.Pid), nil
+ if err != nil {
+ return []etf.Pid{}, err
+ }
+ children, correct := c.([]etf.Pid)
+ if correct == false {
+ return []etf.Pid{}, err
}
- return []etf.Pid{}, err
+ return children, nil
}
+// SetTrapExit
func (p *process) SetTrapExit(trap bool) {
p.trapExit = trap
}
+// TrapExit
func (p *process) TrapExit() bool {
return p.trapExit
}
+// SetCompression
+func (p *process) SetCompression(enable bool) {
+ p.compression = enable
+}
+
+// Compression
+func (p *process) Compression() bool {
+ return p.compression
+}
+
+// Behavior
func (p *process) Behavior() gen.ProcessBehavior {
p.Lock()
defer p.Unlock()
@@ -316,10 +390,12 @@ func (p *process) Behavior() gen.ProcessBehavior {
return p.behavior
}
+// Direct
func (p *process) Direct(request interface{}) (interface{}, error) {
return p.directRequest(request, gen.DefaultCallTimeout)
}
+// DirectWithTimeout
func (p *process) DirectWithTimeout(request interface{}, timeout int) (interface{}, error) {
if timeout < 1 {
timeout = 5
@@ -327,41 +403,69 @@ func (p *process) DirectWithTimeout(request interface{}, timeout int) (interface
return p.directRequest(request, timeout)
}
+// MonitorNode
func (p *process) MonitorNode(name string) etf.Ref {
- return p.monitorNode(p.self, name)
+ ref := p.MakeRef()
+ p.monitorNode(p.self, name, ref)
+ return ref
}
+// DemonitorNode
func (p *process) DemonitorNode(ref etf.Ref) bool {
return p.demonitorNode(ref)
}
+// MonitorProcess
func (p *process) MonitorProcess(process interface{}) etf.Ref {
ref := p.MakeRef()
- p.monitorProcess(p.self, process, ref)
+ switch mp := process.(type) {
+ case etf.Pid:
+ p.RouteMonitor(p.self, mp, ref)
+ return ref
+ case gen.ProcessID:
+ p.RouteMonitorReg(p.self, mp, ref)
+ return ref
+ case string:
+ p.RouteMonitorReg(p.self, gen.ProcessID{Name: mp, Node: string(p.self.Node)}, ref)
+ return ref
+ case etf.Atom:
+ p.RouteMonitorReg(p.self, gen.ProcessID{Name: string(mp), Node: string(p.self.Node)}, ref)
+ return ref
+ }
+
+ // create fake gen.ProcessID. Monitor will send MessageDown with "noproc" as a reason
+ p.RouteMonitorReg(p.self, gen.ProcessID{Node: string(p.self.Node)}, ref)
return ref
}
+// DemonitorProcess
func (p *process) DemonitorProcess(ref etf.Ref) bool {
- return p.demonitorProcess(ref)
+ if err := p.RouteDemonitor(p.self, ref); err != nil {
+ return false
+ }
+ return true
}
+// RemoteSpawn makes request to spawn new process on a remote node
func (p *process) RemoteSpawn(node string, object string, opts gen.RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error) {
- ref := p.MakeRef()
- optlist := etf.List{}
- if opts.RegisterName != "" {
- optlist = append(optlist, etf.Tuple{etf.Atom("name"), etf.Atom(opts.RegisterName)})
+ return p.RemoteSpawnWithTimeout(gen.DefaultCallTimeout, node, object, opts, args...)
+}
+// RemoteSpawnWithTimeout makes request to spawn new process on a remote node with given timeout
+func (p *process) RemoteSpawnWithTimeout(timeout int, node string, object string, opts gen.RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error) {
+ ref := p.MakeRef()
+ p.PutSyncRequest(ref)
+ request := gen.RemoteSpawnRequest{
+ From: p.self,
+ Ref: ref,
+ Options: opts,
+ }
+ if err := p.RouteSpawnRequest(node, object, request, args...); err != nil {
+ p.CancelSyncRequest(ref)
+ return etf.Pid{}, err
}
- if opts.Timeout == 0 {
- opts.Timeout = gen.DefaultCallTimeout
- }
- control := etf.Tuple{distProtoSPAWN_REQUEST, ref, p.self, p.self,
- // {M,F,A}
- etf.Tuple{etf.Atom(object), etf.Atom(opts.Function), len(args)},
- optlist,
- }
- p.SendSyncRequestRaw(ref, etf.Atom(node), append([]etf.Term{control}, args)...)
- reply, err := p.WaitSyncReply(ref, opts.Timeout)
+
+ reply, err := p.WaitSyncReply(ref, timeout)
if err != nil {
return etf.Pid{}, err
}
@@ -374,17 +478,18 @@ func (p *process) RemoteSpawn(node string, object string, opts gen.RemoteSpawnOp
case etf.Pid:
m := etf.Ref{} // empty reference
if opts.Monitor != m {
- p.monitorProcess(p.self, r, opts.Monitor)
+ p.RouteMonitor(p.self, r, opts.Monitor)
}
if opts.Link {
- p.Link(r)
+ p.RouteLink(p.self, r)
}
return r, nil
case etf.Atom:
switch string(r) {
case ErrTaken.Error():
return etf.Pid{}, ErrTaken
-
+ case ErrBehaviorUnknown.Error():
+ return etf.Pid{}, ErrBehaviorUnknown
}
return etf.Pid{}, fmt.Errorf(string(r))
}
@@ -392,6 +497,7 @@ func (p *process) RemoteSpawn(node string, object string, opts gen.RemoteSpawnOp
return etf.Pid{}, fmt.Errorf("unknown result: %#v", reply)
}
+// Spawn
func (p *process) Spawn(name string, opts gen.ProcessOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) {
options := processOptions{
ProcessOptions: opts,
@@ -434,38 +540,29 @@ func (p *process) directRequest(request interface{}, timeout int) (interface{},
}
}
-func (p *process) SendSyncRequestRaw(ref etf.Ref, node etf.Atom, messages ...etf.Term) error {
+// PutSyncRequest
+func (p *process) PutSyncRequest(ref etf.Ref) {
if p.reply == nil {
- return ErrProcessTerminated
+ return
}
reply := make(chan etf.Term, 2)
p.replyMutex.Lock()
- defer p.replyMutex.Unlock()
- p.reply[ref] = reply
- return p.routeRaw(node, messages...)
-}
-func (p *process) SendSyncRequest(ref etf.Ref, to interface{}, message etf.Term) error {
- if p.reply == nil {
- return ErrProcessTerminated
- }
- p.replyMutex.Lock()
- defer p.replyMutex.Unlock()
-
- reply := make(chan etf.Term, 2)
p.reply[ref] = reply
-
- return p.Send(to, message)
+ p.replyMutex.Unlock()
}
+// PutSyncReply
func (p *process) PutSyncReply(ref etf.Ref, reply etf.Term) error {
if p.reply == nil {
return ErrProcessTerminated
}
- p.replyMutex.Lock()
+
+ p.replyMutex.RLock()
rep, ok := p.reply[ref]
- p.replyMutex.Unlock()
+ p.replyMutex.RUnlock()
+
if !ok {
- // ignored, no process waiting for the reply
+ // ignore this reply, no process waiting for it
return nil
}
select {
@@ -475,10 +572,22 @@ func (p *process) PutSyncReply(ref etf.Ref, reply etf.Term) error {
return nil
}
-func (p *process) WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error) {
+// CancelSyncRequest
+func (p *process) CancelSyncRequest(ref etf.Ref) {
p.replyMutex.Lock()
- reply, wait_for_reply := p.reply[ref]
+ delete(p.reply, ref)
p.replyMutex.Unlock()
+}
+
+// WaitSyncReply
+func (p *process) WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error) {
+ if p.reply == nil {
+ return nil, ErrProcessTerminated
+ }
+
+ p.replyMutex.RLock()
+ reply, wait_for_reply := p.reply[ref]
+ p.replyMutex.RUnlock()
if !wait_for_reply {
return nil, fmt.Errorf("Unknown request")
@@ -507,6 +616,7 @@ func (p *process) WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error) {
}
+// ProcessChannels
func (p *process) ProcessChannels() gen.ProcessChannels {
return gen.ProcessChannels{
Mailbox: p.mailBox,
diff --git a/node/registrar.go b/node/registrar.go
deleted file mode 100644
index 6bb64746..00000000
--- a/node/registrar.go
+++ /dev/null
@@ -1,765 +0,0 @@
-package node
-
-import (
- "context"
- "fmt"
- "runtime"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/ergo-services/ergo/etf"
- "github.com/ergo-services/ergo/gen"
- "github.com/ergo-services/ergo/lib"
-)
-
-const (
- startPID = 1000
-)
-
-type registrar struct {
- monitor
- ctx context.Context
-
- nextPID uint64
- uniqID uint64
- nodename string
- creation uint32
-
- net networkInternal
- node nodeInternal
-
- names map[string]etf.Pid
- mutexNames sync.Mutex
- aliases map[etf.Alias]*process
- mutexAliases sync.Mutex
- processes map[uint64]*process
- mutexProcesses sync.Mutex
- peers map[string]*peer
- mutexPeers sync.Mutex
-
- behaviors map[string]map[string]gen.RegisteredBehavior
- mutexBehaviors sync.Mutex
-}
-
-type registrarInternal interface {
- gen.Registrar
- monitorInternal
-
- spawn(name string, opts processOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error)
- registerName(name string, pid etf.Pid) error
- unregisterName(name string) error
- registerPeer(peer *peer) error
- unregisterPeer(name string)
- newAlias(p *process) (etf.Alias, error)
- deleteAlias(owner *process, alias etf.Alias) error
- getProcessByPid(etf.Pid) *process
-
- route(from etf.Pid, to etf.Term, message etf.Term) error
- routeRaw(nodename etf.Atom, messages ...etf.Term) error
-}
-
-func newRegistrar(ctx context.Context, nodename string, creation uint32, node nodeInternal) registrarInternal {
- r := ®istrar{
- ctx: ctx,
- nextPID: startPID,
- uniqID: uint64(time.Now().UnixNano()),
- net: node.(networkInternal),
- node: node.(nodeInternal),
- nodename: nodename,
- creation: creation,
- names: make(map[string]etf.Pid),
- aliases: make(map[etf.Alias]*process),
- processes: make(map[uint64]*process),
- peers: make(map[string]*peer),
- behaviors: make(map[string]map[string]gen.RegisteredBehavior),
- }
- r.monitor = newMonitor(r)
- return r
-}
-
-func (r *registrar) NodeName() string {
- return r.node.Name()
-}
-
-func (r *registrar) NodeStop() {
- r.node.Stop()
-}
-
-func (r *registrar) newPID() etf.Pid {
- // http://erlang.org/doc/apps/erts/erl_ext_dist.html#pid_ext
- // https://stackoverflow.com/questions/243363/can-someone-explain-the-structure-of-a-pid-in-erlang
- i := atomic.AddUint64(&r.nextPID, 1)
- return etf.Pid{
- Node: etf.Atom(r.nodename),
- ID: i,
- Creation: r.creation,
- }
-
-}
-
-// MakeRef returns atomic reference etf.Ref within this node
-func (r *registrar) MakeRef() (ref etf.Ref) {
- ref.Node = etf.Atom(r.nodename)
- ref.Creation = r.creation
- nt := atomic.AddUint64(&r.uniqID, 1)
- ref.ID[0] = uint32(uint64(nt) & ((2 << 17) - 1))
- ref.ID[1] = uint32(uint64(nt) >> 46)
-
- return
-}
-
-func (r *registrar) IsAlias(alias etf.Alias) bool {
- r.mutexAliases.Lock()
- _, ok := r.aliases[alias]
- r.mutexAliases.Unlock()
- return ok
-}
-
-func (r *registrar) newAlias(p *process) (etf.Alias, error) {
- var alias etf.Alias
-
- // chech if its alive
- r.mutexProcesses.Lock()
- _, exist := r.processes[p.self.ID]
- r.mutexProcesses.Unlock()
- if !exist {
- return alias, ErrProcessUnknown
- }
-
- alias = etf.Alias(r.MakeRef())
- lib.Log("[%s] REGISTRAR create process alias for %v: %s", r.nodename, p.self, alias)
-
- r.mutexAliases.Lock()
- r.aliases[alias] = p
- r.mutexAliases.Unlock()
-
- p.Lock()
- p.aliases = append(p.aliases, alias)
- p.Unlock()
- return alias, nil
-}
-
-func (r *registrar) deleteAlias(owner *process, alias etf.Alias) error {
- lib.Log("[%s] REGISTRAR delete process alias %v for %v", r.nodename, alias, owner.self)
-
- r.mutexAliases.Lock()
- p, alias_exist := r.aliases[alias]
- r.mutexAliases.Unlock()
-
- if !alias_exist {
- return ErrAliasUnknown
- }
-
- r.mutexProcesses.Lock()
- _, process_exist := r.processes[owner.self.ID]
- r.mutexProcesses.Unlock()
-
- if !process_exist {
- return ErrProcessUnknown
- }
- if p.self != owner.self {
- return ErrAliasOwner
- }
-
- p.Lock()
- for i := range p.aliases {
- if alias != p.aliases[i] {
- continue
- }
- delete(r.aliases, alias)
- p.aliases[i] = p.aliases[0]
- p.aliases = p.aliases[1:]
- p.Unlock()
- return nil
- }
-
- p.Unlock()
- fmt.Println("Bug: Process lost its alias. Please, report this issue")
- r.mutexAliases.Lock()
- delete(r.aliases, alias)
- r.mutexAliases.Unlock()
-
- return ErrAliasUnknown
-}
-
-func (r *registrar) newProcess(name string, behavior gen.ProcessBehavior, opts processOptions) (*process, error) {
-
- var parentContext context.Context
-
- mailboxSize := DefaultProcessMailboxSize
- if opts.MailboxSize > 0 {
- mailboxSize = int(opts.MailboxSize)
- }
-
- parentContext = r.ctx
-
- processContext, kill := context.WithCancel(parentContext)
- if opts.Context != nil {
- processContext, _ = context.WithCancel(opts.Context)
- }
-
- pid := r.newPID()
-
- // set global variable 'node'
- if opts.Env == nil {
- opts.Env = make(map[string]interface{})
- }
- opts.Env["ergo:Node"] = r.node.(Node)
-
- process := &process{
- registrarInternal: r,
-
- self: pid,
- name: name,
- behavior: behavior,
- env: opts.Env,
-
- parent: opts.parent,
- groupLeader: opts.GroupLeader,
-
- mailBox: make(chan gen.ProcessMailboxMessage, mailboxSize),
- gracefulExit: make(chan gen.ProcessGracefulExitRequest, mailboxSize),
- direct: make(chan gen.ProcessDirectMessage),
-
- context: processContext,
- kill: kill,
-
- reply: make(map[etf.Ref]chan etf.Term),
- }
-
- process.exit = func(from etf.Pid, reason string) error {
- lib.Log("[%s] EXIT from %s to %s with reason: %s", r.nodename, from, pid, reason)
- if processContext.Err() != nil {
- // process is already died
- return ErrProcessUnknown
- }
-
- ex := gen.ProcessGracefulExitRequest{
- From: from,
- Reason: reason,
- }
-
- // use select just in case if this process isn't been started yet
- // or ProcessLoop is already exited (has been set to nil)
- // otherwise it cause infinity lock
- select {
- case process.gracefulExit <- ex:
- default:
- return ErrProcessBusy
- }
-
- // let the process decide whether to stop itself, otherwise its going to be killed
- if !process.trapExit {
- process.kill()
- }
- return nil
- }
-
- if name != "" {
- lib.Log("[%s] REGISTRAR registering name (%s): %s", r.nodename, pid, name)
- r.mutexNames.Lock()
- if _, exist := r.names[name]; exist {
- r.mutexNames.Unlock()
- return nil, ErrTaken
- }
- r.names[name] = process.self
- r.mutexNames.Unlock()
- }
-
- lib.Log("[%s] REGISTRAR registering process: %s", r.nodename, pid)
- r.mutexProcesses.Lock()
- r.processes[process.self.ID] = process
- r.mutexProcesses.Unlock()
-
- return process, nil
-}
-
-func (r *registrar) deleteProcess(pid etf.Pid) {
- r.mutexProcesses.Lock()
- p, exist := r.processes[pid.ID]
- if !exist {
- r.mutexProcesses.Unlock()
- return
- }
- lib.Log("[%s] REGISTRAR unregistering process: %s", r.nodename, p.self)
- delete(r.processes, pid.ID)
- r.mutexProcesses.Unlock()
-
- r.mutexNames.Lock()
- if (p.name) != "" {
- lib.Log("[%s] REGISTRAR unregistering name (%s): %s", r.nodename, p.self, p.name)
- delete(r.names, p.name)
- }
-
- // delete names registered with this pid
- for name, pid := range r.names {
- if p.self == pid {
- delete(r.names, name)
- }
- }
- r.mutexNames.Unlock()
-
- r.mutexAliases.Lock()
- for alias := range r.aliases {
- delete(r.aliases, alias)
- }
- r.mutexAliases.Unlock()
-
- return
-}
-
-func (r *registrar) spawn(name string, opts processOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) {
-
- process, err := r.newProcess(name, behavior, opts)
- if err != nil {
- return nil, err
- }
-
- initProcess := func() (ps gen.ProcessState, err error) {
- if lib.CatchPanic() {
- defer func() {
- if rcv := recover(); rcv != nil {
- pc, fn, line, _ := runtime.Caller(2)
- fmt.Printf("Warning: initialization process failed %s[%q] %#v at %s[%s:%d]\n",
- process.self, name, rcv, runtime.FuncForPC(pc).Name(), fn, line)
- r.deleteProcess(process.self)
- err = fmt.Errorf("panic")
- }
- }()
- }
-
- ps, err = behavior.ProcessInit(process, args...)
- return
- }
-
- processState, err := initProcess()
- if err != nil {
- return nil, err
- }
-
- started := make(chan bool)
- defer close(started)
-
- cleanProcess := func(reason string) {
- // set gracefulExit to nil before we start termination handling
- process.gracefulExit = nil
- r.deleteProcess(process.self)
- // invoke cancel context to prevent memory leaks
- // and propagate context canelation
- process.Kill()
- // notify all the linked process and monitors
- r.processTerminated(process.self, name, reason)
- // make the rest empty
- process.Lock()
- process.aliases = []etf.Alias{}
-
- // Do not clean self and name. Sometimes its good to know what pid
- // (and what name) was used by the dead process. (gen.Applications is using it)
- // process.name = ""
- // process.self = etf.Pid{}
-
- process.behavior = nil
- process.parent = nil
- process.groupLeader = nil
- process.exit = nil
- process.kill = nil
- process.mailBox = nil
- process.direct = nil
- process.env = nil
- process.reply = nil
- process.Unlock()
- }
-
- go func(ps gen.ProcessState) {
- if lib.CatchPanic() {
- defer func() {
- if rcv := recover(); rcv != nil {
- pc, fn, line, _ := runtime.Caller(2)
- fmt.Printf("Warning: process terminated %s[%q] %#v at %s[%s:%d]\n",
- process.self, name, rcv, runtime.FuncForPC(pc).Name(), fn, line)
- cleanProcess("panic")
- }
- }()
- }
-
- // start process loop
- reason := behavior.ProcessLoop(ps, started)
- // process stopped
- cleanProcess(reason)
-
- }(processState)
-
- // wait for the starting process loop
- <-started
- return process, nil
-}
-
-func (r *registrar) registerName(name string, pid etf.Pid) error {
- lib.Log("[%s] REGISTRAR registering name %s", r.nodename, name)
- r.mutexNames.Lock()
- defer r.mutexNames.Unlock()
- if _, ok := r.names[name]; ok {
- // already registered
- return ErrTaken
- }
- r.names[name] = pid
- return nil
-}
-
-func (r *registrar) unregisterName(name string) error {
- lib.Log("[%s] REGISTRAR unregistering name %s", r.nodename, name)
- r.mutexNames.Lock()
- defer r.mutexNames.Unlock()
- if _, ok := r.names[name]; ok {
- delete(r.names, name)
- return nil
- }
- return ErrNameUnknown
-}
-
-func (r *registrar) registerPeer(peer *peer) error {
- lib.Log("[%s] REGISTRAR registering peer %#v", r.nodename, peer.name)
- r.mutexPeers.Lock()
- defer r.mutexPeers.Unlock()
-
- if _, ok := r.peers[peer.name]; ok {
- // already registered
- return ErrTaken
- }
- r.peers[peer.name] = peer
- return nil
-}
-
-func (r *registrar) unregisterPeer(name string) {
- lib.Log("[%s] REGISTRAR unregistering peer %v", r.nodename, name)
- r.mutexPeers.Lock()
- if _, ok := r.peers[name]; ok {
- delete(r.peers, name)
- // mutex must be unlocked before we call nodeDown
- r.mutexPeers.Unlock()
- r.nodeDown(name)
- return
- }
- r.mutexPeers.Unlock()
-}
-
-func (r *registrar) RegisterBehavior(group, name string, behavior gen.ProcessBehavior, data interface{}) error {
- lib.Log("[%s] REGISTRAR registering behavior %q in group %q ", r.nodename, name, group)
- var groupBehaviors map[string]gen.RegisteredBehavior
- var exist bool
-
- r.mutexBehaviors.Lock()
- defer r.mutexBehaviors.Unlock()
-
- groupBehaviors, exist = r.behaviors[group]
- if !exist {
- groupBehaviors = make(map[string]gen.RegisteredBehavior)
- r.behaviors[group] = groupBehaviors
- }
-
- _, exist = groupBehaviors[name]
- if exist {
- return ErrTaken
- }
-
- rb := gen.RegisteredBehavior{
- Behavior: behavior,
- Data: data,
- }
- groupBehaviors[name] = rb
- return nil
-}
-
-func (r *registrar) RegisteredBehavior(group, name string) (gen.RegisteredBehavior, error) {
- var groupBehaviors map[string]gen.RegisteredBehavior
- var rb gen.RegisteredBehavior
- var exist bool
-
- r.mutexBehaviors.Lock()
- defer r.mutexBehaviors.Unlock()
-
- groupBehaviors, exist = r.behaviors[group]
- if !exist {
- return rb, ErrBehaviorGroupUnknown
- }
-
- rb, exist = groupBehaviors[name]
- if !exist {
- return rb, ErrBehaviorUnknown
- }
- return rb, nil
-}
-
-func (r *registrar) RegisteredBehaviorGroup(group string) []gen.RegisteredBehavior {
- var groupBehaviors map[string]gen.RegisteredBehavior
- var exist bool
- var listrb []gen.RegisteredBehavior
-
- r.mutexBehaviors.Lock()
- defer r.mutexBehaviors.Unlock()
-
- groupBehaviors, exist = r.behaviors[group]
- if !exist {
- return listrb
- }
-
- for _, v := range groupBehaviors {
- listrb = append(listrb, v)
- }
- return listrb
-}
-
-func (r *registrar) UnregisterBehavior(group, name string) error {
- lib.Log("[%s] REGISTRAR unregistering behavior %s in group %s ", r.nodename, name, group)
- var groupBehaviors map[string]gen.RegisteredBehavior
- var exist bool
-
- r.mutexBehaviors.Lock()
- defer r.mutexBehaviors.Unlock()
-
- groupBehaviors, exist = r.behaviors[group]
- if !exist {
- return ErrBehaviorUnknown
- }
- delete(groupBehaviors, name)
-
- // remove group if its empty
- if len(groupBehaviors) == 0 {
- delete(r.behaviors, group)
- }
- return nil
-}
-
-func (r *registrar) IsProcessAlive(process gen.Process) bool {
- pid := process.Self()
- p := r.ProcessByPid(pid)
- if p == nil {
- return false
- }
-
- return p.IsAlive()
-}
-
-func (r *registrar) ProcessInfo(pid etf.Pid) (gen.ProcessInfo, error) {
- p := r.ProcessByPid(pid)
- if p == nil {
- return gen.ProcessInfo{}, fmt.Errorf("undefined")
- }
-
- return p.Info(), nil
-}
-
-func (r *registrar) ProcessByPid(pid etf.Pid) gen.Process {
- if p := r.getProcessByPid(pid); p != nil {
- return p
- }
- // we must return nil explicitly, otherwise returning value is not nil
- // even for the nil(*process) due to the nature of interface type
- return nil
-}
-
-func (r *registrar) getProcessByPid(pid etf.Pid) *process {
- r.mutexProcesses.Lock()
- defer r.mutexProcesses.Unlock()
- if p, ok := r.processes[pid.ID]; ok {
- return p
- }
- // unknown process
- return nil
-}
-
-func (r *registrar) ProcessByAlias(alias etf.Alias) gen.Process {
- r.mutexAliases.Lock()
- defer r.mutexAliases.Unlock()
- if p, ok := r.aliases[alias]; ok {
- return p
- }
- // unknown process
- return nil
-}
-
-func (r *registrar) ProcessByName(name string) gen.Process {
- var pid etf.Pid
- if name != "" {
- // requesting Process by name
- r.mutexNames.Lock()
-
- if p, ok := r.names[name]; ok {
- pid = p
- } else {
- r.mutexNames.Unlock()
- return nil
- }
- r.mutexNames.Unlock()
- }
-
- return r.ProcessByPid(pid)
-}
-
-func (r *registrar) ProcessList() []gen.Process {
- list := []gen.Process{}
- r.mutexProcesses.Lock()
- for _, p := range r.processes {
- list = append(list, p)
- }
- r.mutexProcesses.Unlock()
- return list
-}
-
-func (r *registrar) PeerList() []string {
- list := []string{}
- for n, _ := range r.peers {
- list = append(list, n)
- }
- return list
-}
-
-// route message to a local/remote process
-func (r *registrar) route(from etf.Pid, to etf.Term, message etf.Term) error {
-next:
- switch tto := to.(type) {
- case etf.Pid:
- lib.Log("[%s] REGISTRAR sending message by pid %s", r.nodename, tto)
- if string(tto.Node) == r.nodename {
- // local route
- r.mutexProcesses.Lock()
- p, exist := r.processes[tto.ID]
- r.mutexProcesses.Unlock()
- if !exist {
- return ErrProcessUnknown
- }
- select {
- case p.mailBox <- gen.ProcessMailboxMessage{from, message}:
- default:
- return fmt.Errorf("WARNING! mailbox of %s is full. dropped message from %s", p.Self(), from)
- }
- return nil
- }
-
- r.mutexPeers.Lock()
- peer, ok := r.peers[string(tto.Node)]
- r.mutexPeers.Unlock()
- if !ok {
- if err := r.net.connect(string(tto.Node)); err != nil {
- lib.Log("[%s] Can't connect to %v: %s", r.nodename, tto.Node, err)
- return fmt.Errorf("Can't connect to %s: %s", tto.Node, err)
- }
-
- r.mutexPeers.Lock()
- peer, _ = r.peers[string(tto.Node)]
- r.mutexPeers.Unlock()
- }
-
- send := peer.getChannel()
- send <- []etf.Term{etf.Tuple{distProtoSEND, etf.Atom(""), tto}, message}
-
- case gen.ProcessID:
- lib.Log("[%s] REGISTRAR sending message by gen.ProcessID %#v", r.nodename, tto)
-
- if tto.Node == r.nodename {
- // local route
- to = tto.Name
- goto next
- }
-
- // sending to remote node
- r.mutexPeers.Lock()
- peer, ok := r.peers[tto.Node]
- r.mutexPeers.Unlock()
- if !ok {
- // initiate connection and make yet another attempt to deliver this message
- if err := r.net.connect(tto.Node); err != nil {
- lib.Log("[%s] Can't connect to %v: %s", r.nodename, tto.Node, err)
- return fmt.Errorf("Can't connect to %s: %s", tto.Node, err)
- }
-
- r.mutexPeers.Lock()
- peer, _ = r.peers[tto.Node]
- r.mutexPeers.Unlock()
- }
-
- send := peer.getChannel()
- send <- []etf.Term{etf.Tuple{distProtoREG_SEND, from, etf.Atom(""), etf.Atom(tto.Name)}, message}
-
- case string:
- lib.Log("[%s] REGISTRAR sending message by name %#v", r.nodename, tto)
- r.mutexNames.Lock()
- if pid, ok := r.names[tto]; ok {
- to = pid
- r.mutexNames.Unlock()
- goto next
- }
- r.mutexNames.Unlock()
-
- case etf.Atom:
- lib.Log("[%s] REGISTRAR sending message by name %#v", r.nodename, tto)
- r.mutexNames.Lock()
- if pid, ok := r.names[string(tto)]; ok {
- to = pid
- r.mutexNames.Unlock()
- goto next
- }
- r.mutexNames.Unlock()
-
- case etf.Alias:
- lib.Log("[%s] REGISTRAR sending message by alias %s", r.nodename, tto)
- r.mutexAliases.Lock()
- if string(tto.Node) == r.nodename {
- // local route by alias
- if p, ok := r.aliases[tto]; ok {
- to = p.self
- r.mutexAliases.Unlock()
- goto next
- }
- }
- r.mutexAliases.Unlock()
-
- r.mutexPeers.Lock()
- peer, ok := r.peers[string(tto.Node)]
- r.mutexPeers.Unlock()
- if !ok {
- if err := r.net.connect(string(tto.Node)); err != nil {
- lib.Log("[%s] Can't connect to %v: %s", r.nodename, tto.Node, err)
- return fmt.Errorf("Can't connect to %s: %s", tto.Node, err)
- }
-
- r.mutexPeers.Lock()
- peer, _ = r.peers[string(tto.Node)]
- r.mutexPeers.Unlock()
- }
-
- send := peer.getChannel()
- send <- []etf.Term{etf.Tuple{distProtoALIAS_SEND, from, tto}, message}
-
- default:
- lib.Log("[%s] unsupported receiver type %#v", r.nodename, tto)
- return fmt.Errorf("unsupported receiver type %#v", tto)
- }
-
- return nil
-}
-
-func (r *registrar) routeRaw(nodename etf.Atom, messages ...etf.Term) error {
- r.mutexPeers.Lock()
- peer, ok := r.peers[string(nodename)]
- r.mutexPeers.Unlock()
- if len(messages) == 0 {
- return fmt.Errorf("nothing to send")
- }
- if !ok {
- // initiate connection and make yet another attempt to deliver this message
- if err := r.net.connect(string(nodename)); err != nil {
- lib.Log("[%s] Can't connect to %v: %s", r.nodename, nodename, err)
- return err
- }
-
- r.mutexPeers.Lock()
- peer, _ = r.peers[string(nodename)]
- r.mutexPeers.Unlock()
- }
-
- send := peer.getChannel()
- send <- messages
- return nil
-}
diff --git a/node/types.go b/node/types.go
index 7cd3d708..b8629728 100644
--- a/node/types.go
+++ b/node/types.go
@@ -1,7 +1,10 @@
package node
import (
+ "context"
+ "crypto/tls"
"fmt"
+ "io"
"time"
"github.com/ergo-services/ergo/etf"
@@ -17,69 +20,63 @@ var (
ErrNameOwner = fmt.Errorf("Not an owner")
ErrProcessBusy = fmt.Errorf("Process is busy")
ErrProcessUnknown = fmt.Errorf("Unknown process")
+ ErrProcessIncarnation = fmt.Errorf("Process ID belongs to the previous incarnation")
ErrProcessTerminated = fmt.Errorf("Process terminated")
+ ErrMonitorUnknown = fmt.Errorf("Unknown monitor reference")
+ ErrSenderUnknown = fmt.Errorf("Unknown sender")
ErrBehaviorUnknown = fmt.Errorf("Unknown behavior")
ErrBehaviorGroupUnknown = fmt.Errorf("Unknown behavior group")
ErrAliasUnknown = fmt.Errorf("Unknown alias")
ErrAliasOwner = fmt.Errorf("Not an owner")
+ ErrNoRoute = fmt.Errorf("No route to node")
ErrTaken = fmt.Errorf("Resource is taken")
ErrTimeout = fmt.Errorf("Timed out")
ErrFragmented = fmt.Errorf("Fragmented data")
+
+ ErrUnsupported = fmt.Errorf("Not supported")
)
// Distributed operations codes (http://www.erlang.org/doc/apps/erts/erl_dist_protocol.html)
const (
- distProtoLINK = 1
- distProtoSEND = 2
- distProtoEXIT = 3
- distProtoUNLINK = 4
- distProtoNODE_LINK = 5
- distProtoREG_SEND = 6
- distProtoGROUP_LEADER = 7
- distProtoEXIT2 = 8
- distProtoSEND_TT = 12
- distProtoEXIT_TT = 13
- distProtoREG_SEND_TT = 16
- distProtoEXIT2_TT = 18
- distProtoMONITOR = 19
- distProtoDEMONITOR = 20
- distProtoMONITOR_EXIT = 21
- distProtoSEND_SENDER = 22
- distProtoSEND_SENDER_TT = 23
- distProtoPAYLOAD_EXIT = 24
- distProtoPAYLOAD_EXIT_TT = 25
- distProtoPAYLOAD_EXIT2 = 26
- distProtoPAYLOAD_EXIT2_TT = 27
- distProtoPAYLOAD_MONITOR_P_EXIT = 28
- distProtoSPAWN_REQUEST = 29
- distProtoSPAWN_REQUEST_TT = 30
- distProtoSPAWN_REPLY = 31
- distProtoSPAWN_REPLY_TT = 32
- distProtoALIAS_SEND = 33
- distProtoALIAS_SEND_TT = 34
- distProtoUNLINK_ID = 35
- distProtoUNLINK_ID_ACK = 36
-
- defaultListenRangeBegin uint16 = 15000
- defaultListenRangeEnd uint16 = 65000
- defaultEPMDPort uint16 = 4369
- defaultSendQueueLength int = 100
- defaultRecvQueueLength int = 100
- defaultFragmentationUnit = 65000
- defaultHandshakeVersion = 5
+ // node options
+ defaultListenBegin uint16 = 15000
+ defaultListenEnd uint16 = 65000
+ defaultKeepAlivePeriod time.Duration = 5
+
+ EnvKeyVersion gen.EnvKey = "ergo:Version"
+ EnvKeyNode gen.EnvKey = "ergo:Node"
+ EnvKeyRemoteSpawn gen.EnvKey = "ergo:RemoteSpawn"
+
+ DefaultProtoRecvQueueLength int = 100
+ DefaultProtoSendQueueLength int = 100
+ DefaultProroFragmentationUnit int = 65000
)
type Node interface {
- gen.Registrar
- Network
+ gen.Core
+ // Name returns node name
Name() string
+ // IsAlive returns true if node is still alive
IsAlive() bool
+ // Uptime returns node uptime in seconds
Uptime() int64
+ // Version return node version
Version() Version
+ // ListEnv returns a map of configured Node environment variables.
+ ListEnv() map[gen.EnvKey]interface{}
+ // SetEnv set node environment variable with given name. Use nil value to remove variable with given name. Ignores names with "ergo:" as a prefix.
+ SetEnv(name gen.EnvKey, value interface{})
+ // Env returns value associated with given environment name.
+ Env(name gen.EnvKey) interface{}
+
+ // Spawn spawns a new process
Spawn(name string, opts gen.ProcessOptions, object gen.ProcessBehavior, args ...etf.Term) (gen.Process, error)
+ // RegisterName
RegisterName(name string, pid etf.Pid) error
+ // UnregisterName
UnregisterName(name string) error
+
LoadedApplications() []gen.ApplicationInfo
WhichApplications() []gen.ApplicationInfo
ApplicationInfo(name string) (gen.ApplicationInfo, error)
@@ -89,8 +86,28 @@ type Node interface {
ApplicationStartPermanent(appName string, args ...etf.Term) (gen.Process, error)
ApplicationStartTransient(appName string, args ...etf.Term) (gen.Process, error)
ApplicationStop(appName string) error
+
ProvideRPC(module string, function string, fun gen.RPC) error
RevokeRPC(module, function string) error
+ ProvideRemoteSpawn(name string, object gen.ProcessBehavior) error
+ RevokeRemoteSpawn(name string) error
+
+ // AddStaticRoute adds static route for the given node name which makes node skip resolving process
+ AddStaticRoute(name string, port uint16, options RouteOptions) error
+ // AddStaticRouteExt adds static route with extra options
+ RemoveStaticRoute(name string) bool
+ // StaticRoutes returns list of routes added using AddStaticRoute
+ StaticRoutes() []Route
+
+ // Resolve
+ Resolve(peername string) (Route, error)
+
+ // Connect sets up a connection to node
+ Connect(nodename string) error
+ // Disconnect close connection to the node
+ Disconnect(nodename string) error
+ // Nodes returns the list of connected nodes
+ Nodes() []string
Links(process etf.Pid) []etf.Pid
Monitors(process etf.Pid) []etf.Pid
@@ -102,66 +119,266 @@ type Node interface {
WaitWithTimeout(d time.Duration) error
}
+// Version
type Version struct {
Release string
Prefix string
OTP int
}
-type Network interface {
- AddStaticRoute(name string, port uint16) error
- AddStaticRouteExt(name string, port uint16, cookie string, tls bool) error
- RemoveStaticRoute(name string)
- Resolve(name string) (NetworkRoute, error)
+// CoreRouter routes messages from/to remote node
+type CoreRouter interface {
- ProvideRemoteSpawn(name string, object gen.ProcessBehavior) error
- RevokeRemoteSpawn(name string) error
-}
+ //
+ // implemented by core
+ //
+
+ // RouteSend routes message by Pid
+ RouteSend(from etf.Pid, to etf.Pid, message etf.Term) error
+ // RouteSendReg routes message by registered process name (gen.ProcessID)
+ RouteSendReg(from etf.Pid, to gen.ProcessID, message etf.Term) error
+ // RouteSendAlias routes message by process alias
+ RouteSendAlias(from etf.Pid, to etf.Alias, message etf.Term) error
+
+ ProcessByPid(pid etf.Pid) gen.Process
+ ProcessByName(name string) gen.Process
+ ProcessByAlias(alias etf.Alias) gen.Process
+
+ GetConnection(nodename string) (ConnectionInterface, error)
+
+ RouteSpawnRequest(node string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error
+ RouteSpawnReply(to etf.Pid, ref etf.Ref, result etf.Term) error
+ RouteProxy() error
-type NetworkRoute struct {
- Port int
- Cookie string
- TLS bool
+ //
+ // implemented by monitor
+ //
+
+ // RouteLink makes linking of the given two processes
+ RouteLink(pidA etf.Pid, pidB etf.Pid) error
+ // RouteUnlink makes unlinking of the given two processes
+ RouteUnlink(pidA etf.Pid, pidB etf.Pid) error
+ // RouteExit routes MessageExit to the linked process
+ RouteExit(to etf.Pid, terminated etf.Pid, reason string) error
+ // RouteMonitorReg makes monitor to the given registered process name (gen.ProcessID)
+ RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error
+ // RouteMonitor makes monitor to the given Pid
+ RouteMonitor(by etf.Pid, process etf.Pid, ref etf.Ref) error
+ RouteDemonitor(by etf.Pid, ref etf.Ref) error
+ RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error
+ RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error
+ // RouteNodeDown
+ RouteNodeDown(name string)
}
-// Options struct with bootstrapping options for CreateNode
+// Options defines bootstrapping options for the node
type Options struct {
- Applications []gen.ApplicationBehavior
- ListenRangeBegin uint16
- ListenRangeEnd uint16
- Hidden bool
- EPMDPort uint16
- DisableEPMDServer bool
- DisableEPMD bool // use static routes only
- SendQueueLength int
- RecvQueueLength int
- FragmentationUnit int
- DisableHeaderAtomCache bool
- TLSMode TLSModeType
- TLScrtServer string
- TLSkeyServer string
- TLScrtClient string
- TLSkeyClient string
- // HandshakeVersion. Allowed values 5 or 6. Default version is 5
- HandshakeVersion int
- // ConnectionHandlers defines the number of readers/writers per connection. Default is the number of CPU.
- ConnectionHandlers int
-
- cookie string
- creation uint32
-}
-
-// TLSmodeType should be one of TLSmodeDisabled (default), TLSmodeAuto or TLSmodeStrict
-type TLSmodeType string
-
-// TLSmodeType should be one of TLSmodeDisabled (default), TLSmodeAuto or TLSmodeStrict
-type TLSModeType string
+ // Applications application list that must be started
+ Applications []gen.ApplicationBehavior
-const (
- // TLSModeDisabled no TLS encryption
- TLSModeDisabled TLSModeType = ""
- // TLSModeAuto generate self-signed certificate
- TLSModeAuto TLSModeType = "auto"
- // TLSModeStrict with validation certificate
- TLSModeStrict TLSModeType = "strict"
-)
+ // Env node environment
+ Env map[gen.EnvKey]interface{}
+
+ // Creation. Default value: uint32(time.Now().Unix())
+ Creation uint32
+
+ // Flags defines enabled options for the running node
+ Flags Flags
+
+ // Listen defines a listening port number for accepting incoming connections.
+ Listen uint16
+ // ListenBegin and ListenEnd define a range of the port numbers where
+ // the node looking for available free port number for the listening.
+ // Default values 15000 and 65000 accordingly
+ ListenBegin uint16
+ ListenEnd uint16
+
+ // TLS settings
+ TLS TLS
+
+ // StaticRoutesOnly disables resolving service (default is EPMD client) and
+ // makes resolving localy only for nodes added using gen.AddStaticRoute
+ StaticRoutesOnly bool
+
+ // Resolver defines a resolving service (default is EPMD service, client and server)
+ Resolver Resolver
+
+ // Compression enables compression for outgoing messages
+ Compression bool
+
+ // Handshake defines a handshake handler. By default is using
+ // DIST handshake created with dist.CreateHandshake(...)
+ Handshake HandshakeInterface
+
+ // Proto defines a proto handler. By default is using
+ // DIST proto created with dist.CreateProto(...)
+ Proto ProtoInterface
+
+ // enable Ergo Cloud support
+ Cloud Cloud
+}
+
+type TLS struct {
+ Enabled bool
+ Server tls.Certificate
+ Client tls.Certificate
+ SkipVerify bool
+}
+
+type Cloud struct {
+ Enabled bool
+ ID string
+ Cookie string
+}
+
+type Proxy struct {
+ Enabled bool
+}
+
+// Connection
+type Connection struct {
+ ConnectionInterface
+}
+
+// ConnectionInterface
+type ConnectionInterface interface {
+ Send(from gen.Process, to etf.Pid, message etf.Term) error
+ SendReg(from gen.Process, to gen.ProcessID, message etf.Term) error
+ SendAlias(from gen.Process, to etf.Alias, message etf.Term) error
+
+ Link(local etf.Pid, remote etf.Pid) error
+ Unlink(local etf.Pid, remote etf.Pid) error
+ LinkExit(to etf.Pid, terminated etf.Pid, reason string) error
+
+ Monitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error
+ Demonitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error
+ MonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error
+
+ MonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error
+ DemonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error
+ MonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error
+
+ SpawnRequest(behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error
+ SpawnReply(to etf.Pid, ref etf.Ref, spawned etf.Pid) error
+ SpawnReplyError(to etf.Pid, ref etf.Ref, err error) error
+
+ Proxy() error
+}
+
+// Handshake template struct for the custom Handshake implementation
+type Handshake struct {
+ HandshakeInterface
+}
+
+// Handshake defines handshake interface
+type HandshakeInterface interface {
+ // Init initialize handshake.
+ Init(nodename string, creation uint32, flags Flags) error
+ // Start initiates handshake process. Argument tls means the connection is wrapped by TLS
+ // Returns Flags received from the peer during handshake
+ Start(conn io.ReadWriter, tls bool) (Flags, error)
+ // Accept accepts handshake process initiated by another side of this connection.
+ // Returns the name of connected peer and Flags received from the peer.
+ Accept(conn io.ReadWriter, tls bool) (string, Flags, error)
+ // Version handshake version. Must be implemented if this handshake is going to be used
+ // for the accepting connections (this method is used in registration on the Resolver)
+ Version() HandshakeVersion
+}
+
+type HandshakeVersion int
+
+// Proto template struct for the custom Proto implementation
+type Proto struct {
+ ProtoInterface
+}
+
+// Proto defines proto interface for the custom Proto implementation
+type ProtoInterface interface {
+ // Init initialize connection handler
+ Init(ctx context.Context, conn io.ReadWriter, peername string, flags Flags) (ConnectionInterface, error)
+ // Serve connection
+ Serve(connection ConnectionInterface, router CoreRouter)
+ // Terminate invoked once Serve callback is finished
+ Terminate(connection ConnectionInterface)
+}
+
+// ProtoOptions
+type ProtoOptions struct {
+ // NumHandlers defines the number of readers/writers per connection. Default is the number of CPU
+ NumHandlers int
+ // MaxMessageSize limit the message size. Default 0 (no limit)
+ MaxMessageSize int
+ // SendQueueLength defines queue size of handler for the outgoing messages. Default 100.
+ SendQueueLength int
+ // RecvQueueLength defines queue size of handler for the incoming messages. Default 100.
+ RecvQueueLength int
+ // FragmentationUnit defines unit size for the fragmentation feature. Default 65000
+ FragmentationUnit int
+ // Compression enables compression for the outgoing messages
+ Compression bool
+ // Proxy defines proxy settings
+ Proxy Proxy
+ // Custom brings a custom set of options to the ProtoInterface.Serve handler
+ Custom CustomProtoOptions
+}
+
+// CustomProtoOptions a custom set of proto options
+type CustomProtoOptions interface{}
+
+// Flags
+type Flags struct {
+ // Enable enable flags customization
+ Enable bool
+ // EnableHeaderAtomCache enables header atom cache feature
+ EnableHeaderAtomCache bool
+ // EnableBigCreation
+ EnableBigCreation bool
+ // EnableBigPidRef accepts a larger amount of data in pids and references
+ EnableBigPidRef bool
+ // EnableFragmentation enables fragmentation feature for the sending data
+ EnableFragmentation bool
+ // EnableAlias accepts process aliases
+ EnableAlias bool
+ // EnableRemoteSpawn accepts remote spawn request
+ EnableRemoteSpawn bool
+ // Compression compression support
+ Compression bool
+}
+
+// Resolver defines resolving interface
+type Resolver interface {
+ Register(nodename string, port uint16, options ResolverOptions) error
+ Resolve(peername string) (Route, error)
+}
+
+// ResolverOptions defines resolving options
+type ResolverOptions struct {
+ NodeVersion Version
+ HandshakeVersion HandshakeVersion
+ EnabledTLS bool
+ EnabledProxy bool
+}
+
+// Route
+type Route struct {
+ Name string
+ Host string
+ Port uint16
+ RouteOptions
+}
+
+// RouteOptions
+type RouteOptions struct {
+ Cookie string
+ EnabledTLS bool
+ EnabledProxy bool
+ IsErgo bool
+
+ Cert tls.Certificate
+ Handshake HandshakeInterface
+ Proto ProtoInterface
+ Custom CustomRouteOptions
+}
+
+// CustomRouteOptions a custom set of route options
+type CustomRouteOptions interface{}
diff --git a/proto/dist/epmd.go b/proto/dist/epmd.go
new file mode 100644
index 00000000..382ca321
--- /dev/null
+++ b/proto/dist/epmd.go
@@ -0,0 +1,229 @@
+package dist
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ergo-services/ergo/lib"
+)
+
+type registeredNode struct {
+ port uint16
+ hidden bool
+ hi uint16
+ lo uint16
+ extra []byte
+}
+
+type epmd struct {
+ port uint16
+ nodes map[string]registeredNode
+ nodesMutex sync.Mutex
+}
+
+func startServerEPMD(ctx context.Context, host string, port uint16) error {
+ lc := net.ListenConfig{}
+ listener, err := lc.Listen(ctx, "tcp", net.JoinHostPort(host, strconv.Itoa(int(port))))
+ if err != nil {
+ lib.Log("Can't start embedded EPMD service: %s", err)
+ return err
+ }
+
+ epmd := epmd{
+ port: port,
+ nodes: make(map[string]registeredNode),
+ }
+ go epmd.serve(listener)
+ lib.Log("Started embedded EMPD service and listen port: %d", port)
+
+ return nil
+}
+
+func (e *epmd) serve(l net.Listener) {
+ for {
+ c, err := l.Accept()
+ if err != nil {
+ lib.Log("EPMD server stopped: %s", err.Error())
+ return
+ }
+ lib.Log("EPMD accepted new connection from %s", c.RemoteAddr().String())
+ go e.handle(c)
+ }
+}
+
+func (e *epmd) handle(c net.Conn) {
+ var name string
+ var node registeredNode
+ buf := make([]byte, 1024)
+
+ defer c.Close()
+ for {
+ n, err := c.Read(buf)
+ lib.Log("Request from EPMD client: %v", buf[:n])
+ if err != nil {
+ lib.Log("EPMD unregistering node: '%s'", name)
+ e.nodesMutex.Lock()
+ delete(e.nodes, name)
+ e.nodesMutex.Unlock()
+ return
+ }
+ // buf[0:1] - length
+ if uint16(n-2) != binary.BigEndian.Uint16(buf[0:2]) {
+ continue
+ }
+
+ switch buf[2] {
+ case epmdAliveReq:
+ name, node, err = e.readAliveReq(buf[3:])
+ if err != nil {
+ // send error and close connection
+ e.sendAliveResp(c, 1)
+ return
+ }
+
+ // check if node with this name is already registered
+ e.nodesMutex.Lock()
+ _, exist := e.nodes[name]
+ e.nodesMutex.Unlock()
+ if exist {
+ // send error and close connection
+ e.sendAliveResp(c, 1)
+ return
+ }
+
+ // send alive response
+ if err := e.sendAliveResp(c, 0); err != nil {
+ return
+ }
+
+ // register new node
+ e.nodesMutex.Lock()
+ e.nodes[name] = node
+ e.nodesMutex.Unlock()
+
+ // enable keep alive on this connection
+ if tcp, ok := c.(*net.TCPConn); ok {
+ tcp.SetKeepAlive(true)
+ tcp.SetKeepAlivePeriod(15 * time.Second)
+ tcp.SetNoDelay(true)
+ }
+ continue
+ case epmdPortPleaseReq:
+ requestedName := string(buf[3:n])
+
+ e.nodesMutex.Lock()
+ node, exist := e.nodes[requestedName]
+ e.nodesMutex.Unlock()
+
+ if exist == false {
+ lib.Log("EPMD: looking for '%s'. Not found", name)
+ c.Write([]byte{epmdPortResp, 1})
+ return
+ }
+ e.sendPortPleaseResp(c, requestedName, node)
+ return
+ case epmdNamesReq:
+ e.sendNamesResp(c, buf[3:n])
+ return
+ default:
+ lib.Log("unknown EPMD request")
+ return
+ }
+
+ }
+}
+
+func (e *epmd) readAliveReq(req []byte) (string, registeredNode, error) {
+ if len(req) < 10 {
+ return "", registeredNode{}, fmt.Errorf("Malformed EPMD request %v", req)
+ }
+ // Name length
+ l := binary.BigEndian.Uint16(req[8:10])
+ // Name
+ name := string(req[10 : 10+l])
+ // Hidden
+ hidden := false
+ if req[2] == 72 {
+ hidden = true
+ }
+ // node
+ node := registeredNode{
+ port: binary.BigEndian.Uint16(req[0:2]),
+ hidden: hidden,
+ hi: binary.BigEndian.Uint16(req[4:6]),
+ lo: binary.BigEndian.Uint16(req[6:8]),
+ extra: req[10+l:],
+ }
+
+ return name, node, nil
+}
+
+func (e *epmd) sendAliveResp(c net.Conn, code int) error {
+ buf := make([]byte, 4)
+ buf[0] = epmdAliveResp
+ buf[1] = byte(code)
+
+ // Creation. Ergo doesn't use it. Just for Erlang nodes.
+ binary.BigEndian.PutUint16(buf[2:], uint16(1))
+ _, err := c.Write(buf)
+ return err
+}
+
+func (e *epmd) sendPortPleaseResp(c net.Conn, name string, node registeredNode) {
+ buf := make([]byte, 12+len(name)+2+len(node.extra))
+ buf[0] = epmdPortResp
+
+ // Result 0
+ buf[1] = 0
+ // Port
+ binary.BigEndian.PutUint16(buf[2:4], uint16(node.port))
+ // Hidden
+ if node.hidden {
+ buf[4] = 72
+ } else {
+ buf[4] = 77
+ }
+ // Protocol TCP
+ buf[5] = 0
+ // Highest version
+ binary.BigEndian.PutUint16(buf[6:8], uint16(node.hi))
+ // Lowest version
+ binary.BigEndian.PutUint16(buf[8:10], uint16(node.lo))
+ // Name
+ binary.BigEndian.PutUint16(buf[10:12], uint16(len(name)))
+ offset := 12 + len(name)
+ copy(buf[12:offset], name)
+ // Extra
+ l := len(node.extra)
+ binary.BigEndian.PutUint16(buf[offset:offset+2], uint16(l))
+ copy(buf[offset+2:offset+2+l], node.extra)
+ // send
+ c.Write(buf)
+ return
+}
+
+func (e *epmd) sendNamesResp(c net.Conn, req []byte) {
+ var str strings.Builder
+ var s string
+ var buf [4]byte
+
+ binary.BigEndian.PutUint32(buf[0:4], uint32(e.port))
+ str.WriteString(string(buf[0:]))
+
+ e.nodesMutex.Lock()
+ for k, v := range e.nodes {
+ // io:format("name ~ts at port ~p~n", [NodeName, Port]).
+ s = fmt.Sprintf("name %s at port %d\n", k, v.port)
+ str.WriteString(s)
+ }
+ e.nodesMutex.Unlock()
+
+ c.Write([]byte(str.String()))
+ return
+}
diff --git a/proto/dist/flusher.go b/proto/dist/flusher.go
new file mode 100644
index 00000000..43ebad09
--- /dev/null
+++ b/proto/dist/flusher.go
@@ -0,0 +1,98 @@
+package dist
+
+import (
+ "bufio"
+ "io"
+ "sync"
+ "time"
+)
+
+var (
+ // KeepAlive packet is just 4 bytes with zero value
+ keepAlivePacket = []byte{0, 0, 0, 0}
+)
+
+func newLinkFlusher(w io.Writer, latency time.Duration) *linkFlusher {
+ return &linkFlusher{
+ latency: latency,
+ writer: bufio.NewWriter(w),
+ w: w, // in case if we skip buffering
+ }
+}
+
+type linkFlusher struct {
+ mutex sync.Mutex
+ latency time.Duration
+ writer *bufio.Writer
+ w io.Writer
+
+ timer *time.Timer
+ pending bool
+}
+
+func (lf *linkFlusher) Write(b []byte) (int, error) {
+ lf.mutex.Lock()
+ defer lf.mutex.Unlock()
+
+ l := len(b)
+ lenB := l
+
+ // long data write directly to the socket.
+ if l > 64000 {
+ for {
+ n, e := lf.w.Write(b[lenB-l:])
+ if e != nil {
+ return n, e
+ }
+ // check if something left
+ l -= n
+ if l > 0 {
+ continue
+ }
+ return lenB, nil
+ }
+ }
+
+ // write data to the buffer
+ for {
+ n, e := lf.writer.Write(b)
+ if e != nil {
+ return n, e
+ }
+ // check if something left
+ l -= n
+ if l > 0 {
+ continue
+ }
+ break
+ }
+
+ if lf.pending {
+ return lenB, nil
+ }
+
+ lf.pending = true
+
+ if lf.timer != nil {
+ lf.timer.Reset(lf.latency)
+ return lenB, nil
+ }
+
+ lf.timer = time.AfterFunc(lf.latency, func() {
+
+ lf.mutex.Lock()
+ defer lf.mutex.Unlock()
+
+ lf.writer.Flush()
+ lf.pending = false
+ })
+
+ return lenB, nil
+
+}
+
+func (lf *linkFlusher) Stop() {
+ if lf.timer != nil {
+ lf.timer.Stop()
+ }
+}
diff --git a/proto/dist/handshake.go b/proto/dist/handshake.go
new file mode 100644
index 00000000..07677656
--- /dev/null
+++ b/proto/dist/handshake.go
@@ -0,0 +1,770 @@
+package dist
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/rand"
+ "time"
+
+ "github.com/ergo-services/ergo/lib"
+ "github.com/ergo-services/ergo/node"
+)
+
+const (
+ HandshakeVersion5 node.HandshakeVersion = 5
+ HandshakeVersion6 node.HandshakeVersion = 6
+
+ DefaultHandshakeVersion = HandshakeVersion5
+ DefaultHandshakeTimeout = 5 * time.Second
+
+ // distribution flags are defined here https://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-flags
+ flagPublished nodeFlagId = 0x1
+ flagAtomCache = 0x2
+ flagExtendedReferences = 0x4
+ flagDistMonitor = 0x8
+ flagFunTags = 0x10
+ flagDistMonitorName = 0x20
+ flagHiddenAtomCache = 0x40
+ flagNewFunTags = 0x80
+ flagExtendedPidsPorts = 0x100
+ flagExportPtrTag = 0x200
+ flagBitBinaries = 0x400
+ flagNewFloats = 0x800
+ flagUnicodeIO = 0x1000
+ flagDistHdrAtomCache = 0x2000
+ flagSmallAtomTags = 0x4000
+ flagUTF8Atoms = 0x10000
+ flagMapTag = 0x20000
+ flagBigCreation = 0x40000
+ flagSendSender = 0x80000 // since OTP.21 enable replacement for SEND (distProtoSEND by distProtoSEND_SENDER)
+ flagBigSeqTraceLabels = 0x100000
+ flagExitPayload = 0x400000 // since OTP.22 enable replacement for EXIT, EXIT2, MONITOR_P_EXIT
+ flagFragments = 0x800000
+ flagHandshake23 = 0x1000000 // new connection setup handshake (version 6) introduced in OTP 23
+ flagUnlinkID = 0x2000000
+ // for 64bit flags
+ flagSpawn = 1 << 32
+ flagNameMe = 1 << 33
+ flagV4NC = 1 << 34
+ flagAlias = 1 << 35
+)
+
+type nodeFlagId uint64
+type nodeFlags nodeFlagId
+
+func (nf nodeFlags) toUint32() uint32 {
+ return uint32(nf)
+}
+
+func (nf nodeFlags) toUint64() uint64 {
+ return uint64(nf)
+}
+
+func (nf nodeFlags) isSet(f nodeFlagId) bool {
+ return (uint64(nf) & uint64(f)) != 0
+}
+
+func toNodeFlags(f ...nodeFlagId) nodeFlags {
+ var flags uint64
+ for _, v := range f {
+ flags |= uint64(v)
+ }
+ return nodeFlags(flags)
+}
+
+// DistHandshake implements Erlang handshake
+type DistHandshake struct {
+ node.Handshake
+ nodename string
+ flags node.Flags
+ creation uint32
+ challenge uint32
+ options HandshakeOptions
+}
+
+type HandshakeOptions struct {
+ Timeout time.Duration
+ Version node.HandshakeVersion // 5 or 6
+ Cookie string
+}
+
+func CreateHandshake(options HandshakeOptions) node.HandshakeInterface {
+ // must be 5 or 6
+ if options.Version != HandshakeVersion5 && options.Version != HandshakeVersion6 {
+ options.Version = DefaultHandshakeVersion
+ }
+
+ if options.Timeout == 0 {
+ options.Timeout = DefaultHandshakeTimeout
+ }
+ return &DistHandshake{
+ options: options,
+ challenge: rand.Uint32(),
+ }
+}
+
+// Init implements Handshake interface mothod
+func (dh *DistHandshake) Init(nodename string, creation uint32, flags node.Flags) error {
+ dh.nodename = nodename
+ dh.creation = creation
+ dh.flags = flags
+ return nil
+}
+
+func (dh *DistHandshake) Version() node.HandshakeVersion {
+ return dh.options.Version
+}
+
+func (dh *DistHandshake) Start(conn io.ReadWriter, tls bool) (node.Flags, error) {
+
+ var peer_challenge uint32
+ var peer_nodeFlags nodeFlags
+ var peer_Flags node.Flags
+
+ node_nodeFlags := composeFlags(dh.flags)
+
+ b := lib.TakeBuffer()
+ defer lib.ReleaseBuffer(b)
+
+ var await []byte
+
+ if dh.options.Version == HandshakeVersion5 {
+ dh.composeName(b, tls, node_nodeFlags)
+ // the next message must be send_status 's' or send_challenge 'n' (for
+ // handshake version 5) or 'N' (for handshake version 6)
+ await = []byte{'s', 'n', 'N'}
+ } else {
+ dh.composeNameVersion6(b, tls, node_nodeFlags)
+ await = []byte{'s', 'N'}
+ }
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_Flags, e
+ }
+
+ // define timeout for the handshaking
+ timer := time.NewTimer(dh.options.Timeout)
+ defer timer.Stop()
+
+ asyncReadChannel := make(chan error, 2)
+ asyncRead := func() {
+ _, e := b.ReadDataFrom(conn, 512)
+ asyncReadChannel <- e
+ }
+
+ // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake
+ // Every message in the handshake starts with a 16-bit big-endian integer,
+ // which contains the message length (not counting the two initial bytes).
+ // In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice
+ // that after the handshake, the distribution switches to 4 byte packet headers.
+ expectingBytes := 2
+ if tls {
+ // TLS connection has 4 bytes packet length header
+ expectingBytes = 4
+ }
+
+ for {
+ go asyncRead()
+
+ select {
+ case <-timer.C:
+ return peer_Flags, fmt.Errorf("handshake timeout")
+
+ case e := <-asyncReadChannel:
+ if e != nil {
+ return peer_Flags, e
+ }
+
+ next:
+ l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes])
+ buffer := b.B[expectingBytes:]
+
+ if len(buffer) < int(l) {
+ return peer_Flags, fmt.Errorf("malformed handshake (wrong packet length)")
+ }
+
+ // chech if we got correct message type regarding to 'await' value
+ if bytes.Count(await, buffer[0:1]) == 0 {
+ return peer_Flags, fmt.Errorf("malformed handshake (wrong response)")
+ }
+
+ switch buffer[0] {
+ case 'n':
+ // 'n' + 2 (version) + 4 (flags) + 4 (challenge) + name...
+ if len(b.B) < 12 {
+ return peer_Flags, fmt.Errorf("malformed handshake ('n')")
+ }
+
+ // ignore peer_name value if we initiate the connection
+ peer_challenge, _, peer_nodeFlags = dh.readChallenge(b.B[1:])
+ if peer_challenge == 0 {
+ return peer_Flags, fmt.Errorf("malformed handshake (mismatch handshake version")
+ }
+ b.Reset()
+
+ dh.composeChallengeReply(b, peer_challenge, tls)
+
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_Flags, e
+ }
+ // add 's' status for the case if we got it after 'n' or 'N' message
+ // yes, sometime it happens
+ await = []byte{'s', 'a'}
+
+ case 'N':
+ // Peer support version 6.
+
+ // The new challenge message format (version 6)
+ // 8 (flags) + 4 (Creation) + 2 (NameLen) + Name
+ if len(buffer) < 16 {
+ return peer_Flags, fmt.Errorf("malformed handshake ('N' length)")
+ }
+
+ // ignore peer_name value if we initiate the connection
+ peer_challenge, _, peer_nodeFlags = dh.readChallengeVersion6(buffer[1:])
+ b.Reset()
+
+ if dh.options.Version == HandshakeVersion5 {
+ // upgrade handshake to version 6 by sending complement message
+ dh.composeComplement(b, node_nodeFlags, tls)
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_Flags, e
+ }
+ }
+
+ dh.composeChallengeReply(b, peer_challenge, tls)
+
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_Flags, e
+ }
+
+ // add 's' (send_status message) for the case if we got it after 'n' or 'N' message
+ await = []byte{'s', 'a'}
+
+ case 'a':
+ // 'a' + 16 (digest)
+ if len(buffer) != 17 {
+ return peer_Flags, fmt.Errorf("malformed handshake ('a' length of digest)")
+ }
+
+ // 'a' + 16 (digest)
+ digest := genDigest(dh.challenge, dh.options.Cookie)
+ if bytes.Compare(buffer[1:17], digest) != 0 {
+ return peer_Flags, fmt.Errorf("malformed handshake ('a' digest)")
+ }
+
+ // handshaked
+ peer_Flags = node.DefaultFlags()
+ peer_Flags.EnableFragmentation = peer_nodeFlags.isSet(flagFragments)
+ peer_Flags.EnableBigCreation = peer_nodeFlags.isSet(flagBigCreation)
+ peer_Flags.EnableHeaderAtomCache = peer_nodeFlags.isSet(flagDistHdrAtomCache)
+ peer_Flags.EnableAlias = peer_nodeFlags.isSet(flagAlias)
+ peer_Flags.EnableRemoteSpawn = peer_nodeFlags.isSet(flagSpawn)
+ peer_Flags.EnableBigPidRef = peer_nodeFlags.isSet(flagV4NC)
+ return peer_Flags, nil
+
+ case 's':
+ if dh.readStatus(buffer[1:]) == false {
+ return peer_Flags, fmt.Errorf("handshake negotiation failed")
+ }
+
+ await = []byte{'n', 'N'}
+ // "sok"
+ if len(buffer) > 4 {
+ b.B = b.B[expectingBytes+3:]
+ goto next
+ }
+ b.Reset()
+
+ default:
+ return peer_Flags, fmt.Errorf("malformed handshake ('%c' digest)", buffer[0])
+ }
+
+ }
+
+ }
+
+}
+
+func (dh *DistHandshake) Accept(conn io.ReadWriter, tls bool) (string, node.Flags, error) {
+ var peer_challenge uint32
+ var peer_name string
+ var peer_nodeFlags nodeFlags
+ var peer_Flags node.Flags
+ var err error
+
+ node_nodeFlags := composeFlags(dh.flags)
+
+ b := lib.TakeBuffer()
+ defer lib.ReleaseBuffer(b)
+
+ var await []byte
+
+ // define timeout for the handshaking
+ timer := time.NewTimer(dh.options.Timeout)
+ defer timer.Stop()
+
+ asyncReadChannel := make(chan error, 2)
+ asyncRead := func() {
+ _, e := b.ReadDataFrom(conn, 512)
+ asyncReadChannel <- e
+ }
+
+ // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake
+ // Every message in the handshake starts with a 16-bit big-endian integer,
+ // which contains the message length (not counting the two initial bytes).
+ // In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice
+ // that after the handshake, the distribution switches to 4 byte packet headers.
+ expectingBytes := 2
+ if tls {
+ // TLS connection has 4 bytes packet length header
+ expectingBytes = 4
+ }
+
+ // the comming message must be 'receive_name' as an answer for the
+ // 'send_name' message request we just sent
+ await = []byte{'n', 'N'}
+
+ for {
+ go asyncRead()
+
+ select {
+ case <-timer.C:
+ return peer_name, peer_Flags, fmt.Errorf("handshake accept timeout")
+ case e := <-asyncReadChannel:
+ if e != nil {
+ return peer_name, peer_Flags, e
+ }
+
+ if b.Len() < expectingBytes+1 {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake (too short packet)")
+ }
+
+ next:
+ l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes])
+ buffer := b.B[expectingBytes:]
+
+ if len(buffer) < int(l) {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake (wrong packet length)")
+ }
+
+ if bytes.Count(await, buffer[0:1]) == 0 {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake (wrong response %d)", buffer[0])
+ }
+
+ switch buffer[0] {
+ case 'n':
+ if len(buffer) < 8 {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake ('n' length)")
+ }
+
+ peer_name, peer_nodeFlags, err = dh.readName(buffer[1:])
+ if err != nil {
+ return peer_name, peer_Flags, err
+ }
+ b.Reset()
+ dh.composeStatus(b, tls)
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake ('n' accept name)")
+ }
+
+ b.Reset()
+ if peer_nodeFlags.isSet(flagHandshake23) {
+ dh.composeChallengeVersion6(b, tls, node_nodeFlags)
+ await = []byte{'s', 'r', 'c'}
+ } else {
+ dh.composeChallenge(b, tls, node_nodeFlags)
+ await = []byte{'s', 'r'}
+ }
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_name, peer_Flags, e
+ }
+
+ case 'N':
+ // The new challenge message format (version 6)
+ // 8 (flags) + 4 (Creation) + 2 (NameLen) + Name
+ if len(buffer) < 16 {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake ('N' length)")
+ }
+ peer_name, peer_nodeFlags, err = dh.readNameVersion6(buffer[1:])
+ if err != nil {
+ return peer_name, peer_Flags, err
+ }
+ b.Reset()
+ dh.composeStatus(b, tls)
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake ('N' accept name)")
+ }
+
+ b.Reset()
+ dh.composeChallengeVersion6(b, tls, node_nodeFlags)
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_name, peer_Flags, e
+ }
+
+ await = []byte{'s', 'r'}
+
+ case 'c':
+ if len(buffer) < 9 {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake ('c' length)")
+ }
+ peer_nodeFlags = dh.readComplement(buffer[1:], peer_nodeFlags)
+
+ await = []byte{'r'}
+
+ if len(buffer) > 9 {
+ b.B = b.B[expectingBytes+9:]
+ goto next
+ }
+ b.Reset()
+
+ case 'r':
+ var valid bool
+ if len(buffer) < 19 {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake ('r' length)")
+ }
+
+ peer_challenge, valid = dh.validateChallengeReply(buffer[1:])
+ if valid == false {
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake ('r' invalid reply)")
+ }
+ b.Reset()
+
+ dh.composeChallengeAck(b, peer_challenge, tls)
+ if e := b.WriteDataTo(conn); e != nil {
+ return peer_name, peer_Flags, e
+ }
+
+ // handshaked
+ peer_Flags = node.DefaultFlags()
+ peer_Flags.EnableFragmentation = peer_nodeFlags.isSet(flagFragments)
+ peer_Flags.EnableBigCreation = peer_nodeFlags.isSet(flagBigCreation)
+ peer_Flags.EnableHeaderAtomCache = peer_nodeFlags.isSet(flagDistHdrAtomCache)
+ peer_Flags.EnableAlias = peer_nodeFlags.isSet(flagAlias)
+ peer_Flags.EnableRemoteSpawn = peer_nodeFlags.isSet(flagSpawn)
+ peer_Flags.EnableBigPidRef = peer_nodeFlags.isSet(flagV4NC)
+
+ return peer_name, peer_Flags, nil
+
+ case 's':
+ if dh.readStatus(buffer[1:]) == false {
+ return peer_name, peer_Flags, fmt.Errorf("link status != ok")
+ }
+
+ await = []byte{'c', 'r'}
+ if len(buffer) > 4 {
+ b.B = b.B[expectingBytes+3:]
+ goto next
+ }
+ b.Reset()
+
+ default:
+ return peer_name, peer_Flags, fmt.Errorf("malformed handshake (unknown code %d)", b.B[0])
+ }
+
+ }
+
+ }
+}
+
+// private functions
+
+func (dh *DistHandshake) composeName(b *lib.Buffer, tls bool, flags nodeFlags) {
+ version := uint16(dh.options.Version)
+ if tls {
+ b.Allocate(11)
+ dataLength := 7 + len(dh.nodename) // byte + uint16 + uint32 + len(dh.nodename)
+ binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
+ b.B[4] = 'n'
+ binary.BigEndian.PutUint16(b.B[5:7], version) // uint16
+ binary.BigEndian.PutUint32(b.B[7:11], flags.toUint32()) // uint32
+ b.Append([]byte(dh.nodename))
+ return
+ }
+
+ b.Allocate(9)
+ dataLength := 7 + len(dh.nodename) // byte + uint16 + uint32 + len(dh.nodename)
+ binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
+ b.B[2] = 'n'
+ binary.BigEndian.PutUint16(b.B[3:5], version) // uint16
+ binary.BigEndian.PutUint32(b.B[5:9], flags.toUint32()) // uint32
+ b.Append([]byte(dh.nodename))
+}
+
+func (dh *DistHandshake) composeNameVersion6(b *lib.Buffer, tls bool, flags nodeFlags) {
+ creation := uint32(dh.creation)
+ if tls {
+ b.Allocate(19)
+ dataLength := 15 + len(dh.nodename) // 1 + 8 (flags) + 4 (creation) + 2 (len dh.nodename)
+ binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
+ b.B[4] = 'N'
+ binary.BigEndian.PutUint64(b.B[5:13], flags.toUint64()) // uint64
+ binary.BigEndian.PutUint32(b.B[13:17], creation) //uint32
+ binary.BigEndian.PutUint16(b.B[17:19], uint16(len(dh.nodename))) // uint16
+ b.Append([]byte(dh.nodename))
+ return
+ }
+
+ b.Allocate(17)
+ dataLength := 15 + len(dh.nodename) // 1 + 8 (flags) + 4 (creation) + 2 (len dh.nodename)
+ binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
+ b.B[2] = 'N'
+ binary.BigEndian.PutUint64(b.B[3:11], flags.toUint64()) // uint64
+ binary.BigEndian.PutUint32(b.B[11:15], creation) // uint32
+ binary.BigEndian.PutUint16(b.B[15:17], uint16(len(dh.nodename))) // uint16
+ b.Append([]byte(dh.nodename))
+}
+
+func (dh *DistHandshake) readName(b []byte) (string, nodeFlags, error) {
+ if len(b[6:]) > 250 {
+ return "", 0, fmt.Errorf("Malformed node name")
+ }
+ nodename := string(b[6:])
+ flags := nodeFlags(binary.BigEndian.Uint32(b[2:6]))
+
+ // don't care of version value. its always == 5 according to the spec
+ // version := binary.BigEndian.Uint16(b[0:2])
+
+ return nodename, flags, nil
+}
+
+func (dh *DistHandshake) readNameVersion6(b []byte) (string, nodeFlags, error) {
+ nameLen := int(binary.BigEndian.Uint16(b[12:14]))
+ if nameLen > 250 {
+ return "", 0, fmt.Errorf("Malformed node name")
+ }
+ nodename := string(b[14 : 14+nameLen])
+ flags := nodeFlags(binary.BigEndian.Uint64(b[0:8]))
+
+ // don't care of peer creation value
+ // creation:= binary.BigEndian.Uint32(b[8:12]),
+
+ return nodename, flags, nil
+}
+
+func (dh *DistHandshake) composeStatus(b *lib.Buffer, tls bool) {
+ // there are few options for the status: ok, ok_simultaneous, nok, not_allowed, alive
+ // More details here: https://erlang.org/doc/apps/erts/erl_dist_protocol.html#the-handshake-in-detail
+ // support "ok" only, in any other cases link will be just closed
+
+ if tls {
+ b.Allocate(4)
+ dataLength := 3 // 's' + "ok"
+ binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
+ b.Append([]byte("sok"))
+ return
+ }
+
+ b.Allocate(2)
+ dataLength := 3 // 's' + "ok"
+ binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
+ b.Append([]byte("sok"))
+
+}
+
+func (dh *DistHandshake) readStatus(msg []byte) bool {
+ if string(msg[:2]) == "ok" {
+ return true
+ }
+
+ return false
+}
+
+func (dh *DistHandshake) composeChallenge(b *lib.Buffer, tls bool, flags nodeFlags) {
+ if tls {
+ b.Allocate(15)
+ dataLength := uint32(11 + len(dh.nodename))
+ binary.BigEndian.PutUint32(b.B[0:4], dataLength)
+ b.B[4] = 'n'
+ binary.BigEndian.PutUint16(b.B[5:7], uint16(dh.options.Version)) // uint16
+ binary.BigEndian.PutUint32(b.B[7:11], flags.toUint32()) // uint32
+ binary.BigEndian.PutUint32(b.B[11:15], dh.challenge) // uint32
+ b.Append([]byte(dh.nodename))
+ return
+ }
+
+ b.Allocate(13)
+ dataLength := 11 + len(dh.nodename)
+ binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
+ b.B[2] = 'n'
+ binary.BigEndian.PutUint16(b.B[3:5], uint16(dh.options.Version)) // uint16
+ binary.BigEndian.PutUint32(b.B[5:9], flags.toUint32()) // uint32
+ binary.BigEndian.PutUint32(b.B[9:13], dh.challenge) // uint32
+ b.Append([]byte(dh.nodename))
+}
+
+func (dh *DistHandshake) composeChallengeVersion6(b *lib.Buffer, tls bool, flags nodeFlags) {
+ if tls {
+ // 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(dh.nodename))
+ b.Allocate(23)
+ dataLength := 19 + len(dh.nodename)
+ binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
+ b.B[4] = 'N'
+ binary.BigEndian.PutUint64(b.B[5:13], uint64(flags)) // uint64
+ binary.BigEndian.PutUint32(b.B[13:17], dh.challenge) // uint32
+ binary.BigEndian.PutUint32(b.B[17:21], dh.creation) // uint32
+ binary.BigEndian.PutUint16(b.B[21:23], uint16(len(dh.nodename))) // uint16
+ b.Append([]byte(dh.nodename))
+ return
+ }
+
+ // 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(dh.nodename))
+ b.Allocate(21)
+ dataLength := 19 + len(dh.nodename)
+ binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
+ b.B[2] = 'N'
+ binary.BigEndian.PutUint64(b.B[3:11], uint64(flags)) // uint64
+ binary.BigEndian.PutUint32(b.B[11:15], dh.challenge) // uint32
+ binary.BigEndian.PutUint32(b.B[15:19], dh.creation) // uint32
+ binary.BigEndian.PutUint16(b.B[19:21], uint16(len(dh.nodename))) // uint16
+ b.Append([]byte(dh.nodename))
+}
+
+// returns challange, nodename, nodeFlags
+func (dh *DistHandshake) readChallenge(msg []byte) (challenge uint32, nodename string, flags nodeFlags) {
+ version := binary.BigEndian.Uint16(msg[0:2])
+ if version != uint16(HandshakeVersion5) {
+ return
+ }
+ challenge = binary.BigEndian.Uint32(msg[6:10])
+ nodename = string(msg[10:])
+ flags = nodeFlags(binary.BigEndian.Uint32(msg[2:6]))
+ return
+}
+
+func (dh *DistHandshake) readChallengeVersion6(msg []byte) (challenge uint32, nodename string, flags nodeFlags) {
+ lenName := int(binary.BigEndian.Uint16(msg[16:18]))
+ challenge = binary.BigEndian.Uint32(msg[8:12])
+ nodename = string(msg[18 : 18+lenName])
+ flags = nodeFlags(binary.BigEndian.Uint64(msg[0:8]))
+
+ // don't care about 'creation'
+ // creation := binary.BigEndian.Uint32(msg[12:16]),
+ return
+}
+
+func (dh *DistHandshake) readComplement(msg []byte, peer_flags nodeFlags) nodeFlags {
+ flags := uint64(binary.BigEndian.Uint32(msg[0:4])) << 32
+ peer_flags = nodeFlags(peer_flags.toUint64() | flags)
+ // creation = binary.BigEndian.Uint32(msg[4:8])
+ return peer_flags
+}
+
+func (dh *DistHandshake) validateChallengeReply(b []byte) (uint32, bool) {
+ challenge := binary.BigEndian.Uint32(b[:4])
+ digestB := b[4:]
+
+ digestA := genDigest(dh.challenge, dh.options.Cookie)
+ return challenge, bytes.Equal(digestA[:], digestB)
+}
+
+func (dh *DistHandshake) composeChallengeAck(b *lib.Buffer, peer_challenge uint32, tls bool) {
+ if tls {
+ b.Allocate(5)
+ dataLength := uint32(17) // 'a' + 16 (digest)
+ binary.BigEndian.PutUint32(b.B[0:4], dataLength)
+ b.B[4] = 'a'
+ digest := genDigest(peer_challenge, dh.options.Cookie)
+ b.Append(digest)
+ return
+ }
+
+ b.Allocate(3)
+ dataLength := uint16(17) // 'a' + 16 (digest)
+ binary.BigEndian.PutUint16(b.B[0:2], dataLength)
+ b.B[2] = 'a'
+ digest := genDigest(peer_challenge, dh.options.Cookie)
+ b.Append(digest)
+}
+
+func (dh *DistHandshake) composeChallengeReply(b *lib.Buffer, challenge uint32, tls bool) {
+ if tls {
+ digest := genDigest(challenge, dh.options.Cookie)
+ b.Allocate(9)
+ dataLength := 5 + len(digest) // 1 (byte) + 4 (challenge) + 16 (digest)
+ binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
+ b.B[4] = 'r'
+ binary.BigEndian.PutUint32(b.B[5:9], dh.challenge) // uint32
+ b.Append(digest)
+ return
+ }
+
+ b.Allocate(7)
+ digest := genDigest(challenge, dh.options.Cookie)
+ dataLength := 5 + len(digest) // 1 (byte) + 4 (challenge) + 16 (digest)
+ binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
+ b.B[2] = 'r'
+ binary.BigEndian.PutUint32(b.B[3:7], dh.challenge) // uint32
+ b.Append(digest)
+}
+
+func (dh *DistHandshake) composeComplement(b *lib.Buffer, flags nodeFlags, tls bool) {
+ // cast must cast creation to int32 in order to follow the
+ // erlang's handshake. Ergo don't care of it.
+ node_flags := uint32(flags.toUint64() >> 32)
+ if tls {
+ b.Allocate(13)
+ dataLength := 9 // 1 + 4 (flag high) + 4 (creation)
+ binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength))
+ b.B[4] = 'c'
+ binary.BigEndian.PutUint32(b.B[5:9], node_flags)
+ binary.BigEndian.PutUint32(b.B[9:13], dh.creation)
+ return
+ }
+
+ dataLength := 9 // 1 + 4 (flag high) + 4 (creation)
+ b.Allocate(11)
+ binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength))
+ b.B[2] = 'c'
+ binary.BigEndian.PutUint32(b.B[3:7], node_flags)
+ binary.BigEndian.PutUint32(b.B[7:11], dh.creation)
+}
+
+func genDigest(challenge uint32, cookie string) []byte {
+ s := fmt.Sprintf("%s%d", cookie, challenge)
+ digest := md5.Sum([]byte(s))
+ return digest[:]
+}
+
+func composeFlags(flags node.Flags) nodeFlags {
+
+ // default flags
+ enabledFlags := []nodeFlagId{
+ flagPublished,
+ flagUnicodeIO,
+ flagDistMonitor,
+ flagDistMonitorName,
+ flagExtendedPidsPorts,
+ flagExtendedReferences,
+ flagAtomCache,
+ flagHiddenAtomCache,
+ flagNewFunTags,
+ flagSmallAtomTags,
+ flagUTF8Atoms,
+ flagMapTag,
+ flagHandshake23,
+ }
+
+ // optional flags
+ if flags.EnableHeaderAtomCache {
+ enabledFlags = append(enabledFlags, flagDistHdrAtomCache)
+ }
+ if flags.EnableFragmentation {
+ enabledFlags = append(enabledFlags, flagFragments)
+ }
+ if flags.EnableBigCreation {
+ enabledFlags = append(enabledFlags, flagBigCreation)
+ }
+ if flags.EnableAlias {
+ enabledFlags = append(enabledFlags, flagAlias)
+ }
+ if flags.EnableBigPidRef {
+ enabledFlags = append(enabledFlags, flagV4NC)
+ }
+ if flags.EnableRemoteSpawn {
+ enabledFlags = append(enabledFlags, flagSpawn)
+ }
+ return toNodeFlags(enabledFlags...)
+}
diff --git a/proto/dist/proto.go b/proto/dist/proto.go
new file mode 100644
index 00000000..876a5a0a
--- /dev/null
+++ b/proto/dist/proto.go
@@ -0,0 +1,1335 @@
+package dist
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/rand"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ergo-services/ergo/etf"
+ "github.com/ergo-services/ergo/gen"
+ "github.com/ergo-services/ergo/lib"
+ "github.com/ergo-services/ergo/node"
+)
+
+var (
+ ErrMissingInCache = fmt.Errorf("missing in cache")
+ ErrMalformed = fmt.Errorf("malformed")
+ ErrOverloadConnection = fmt.Errorf("connection buffer is overloaded")
+)
+
+func init() {
+ rand.Seed(time.Now().UTC().UnixNano())
+}
+
+const (
+ defaultLatency = 200 * time.Nanosecond // for linkFlusher
+
+ defaultCleanTimeout = 5 * time.Second // for checkClean
+ defaultCleanDeadline = 30 * time.Second // for checkClean
+
+ // http://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution_header
+ protoDist = 131
+ protoDistCompressed = 80
+ protoDistMessage = 68
+ protoDistFragment1 = 69
+ protoDistFragmentN = 70
+)
+
+type fragmentedPacket struct {
+ buffer *lib.Buffer
+ disordered *lib.Buffer
+ disorderedSlices map[uint64][]byte
+ fragmentID uint64
+ lastUpdate time.Time
+}
+
+type distConnection struct {
+ node.Connection
+
+ nodename string
+ peername string
+ ctx context.Context
+
+ // peer flags
+ flags node.Flags
+
+ // compression
+ compression bool
+
+ // socket
+ conn io.ReadWriter
+ cancelContext context.CancelFunc
+
+ // route incoming messages
+ router node.CoreRouter
+
+ // writer
+ flusher *linkFlusher
+
+ // senders list of channels for the sending goroutines
+ senders senders
+ // receivers list of channels for the receiving goroutines
+ receivers receivers
+
+ // atom cache for incoming messages
+ cacheIn [2048]*etf.Atom
+ cacheInMutex sync.RWMutex
+
+ // atom cache for outgoing messages
+ cacheOut *etf.AtomCache
+
+ // fragmentation sequence ID
+ sequenceID int64
+ fragments map[uint64]*fragmentedPacket
+ fragmentsMutex sync.Mutex
+
+ // check and clean lost fragments
+ checkCleanPending bool
+ checkCleanTimer *time.Timer
+ checkCleanTimeout time.Duration // default is 5 seconds
+ checkCleanDeadline time.Duration // how long we wait for the next fragment of the certain sequenceID. Default is 30 seconds
+}
+
+type distProto struct {
+ node.Proto
+ nodename string
+ options node.ProtoOptions
+}
+
+func CreateProto(nodename string, options node.ProtoOptions) node.ProtoInterface {
+ return &distProto{
+ nodename: nodename,
+ options: options,
+ }
+}
+
+//
+// node.Proto interface implementation
+//
+
+type senders struct {
+ sender []*senderChannel
+ n int32
+ i int32
+}
+
+type senderChannel struct {
+ sync.Mutex
+ sendChannel chan *sendMessage
+}
+
+type sendMessage struct {
+ control etf.Term
+ payload etf.Term
+ compression bool
+}
+
+type receivers struct {
+ recv []chan *lib.Buffer
+ n int32
+ i int32
+}
+
+func (dp *distProto) Init(ctx context.Context, conn io.ReadWriter, peername string, flags node.Flags) (node.ConnectionInterface, error) {
+ connection := &distConnection{
+ nodename: dp.nodename,
+ peername: peername,
+ flags: flags,
+ compression: dp.options.Compression,
+ conn: conn,
+ }
+ connection.ctx, connection.cancelContext = context.WithCancel(ctx)
+
+ // initializing atom cache if its enabled
+ if flags.EnableHeaderAtomCache {
+ connection.cacheOut = etf.StartAtomCache()
+ }
+
+ // create connection buffering
+ connection.flusher = newLinkFlusher(conn, defaultLatency)
+
+ // do not use shared channels within intencive code parts, impacts on a performance
+ connection.receivers = receivers{
+ recv: make([]chan *lib.Buffer, dp.options.NumHandlers),
+ n: int32(dp.options.NumHandlers),
+ }
+
+ // run readers for incoming messages
+ for i := 0; i < dp.options.NumHandlers; i++ {
+ // run packet reader routines (decoder)
+ recv := make(chan *lib.Buffer, dp.options.RecvQueueLength)
+ connection.receivers.recv[i] = recv
+ go connection.receiver(recv)
+ }
+
+ connection.senders = senders{
+ sender: make([]*senderChannel, dp.options.NumHandlers),
+ n: int32(dp.options.NumHandlers),
+ }
+
+ // run readers/writers for incoming/outgoing messages
+ for i := 0; i < dp.options.NumHandlers; i++ {
+ // run writer routines (encoder)
+ send := make(chan *sendMessage, dp.options.SendQueueLength)
+ connection.senders.sender[i] = &senderChannel{
+ sendChannel: send,
+ }
+ go connection.sender(send, dp.options, connection.flags)
+ }
+
+ return connection, nil
+}
+
+func (dp *distProto) Serve(ci node.ConnectionInterface, router node.CoreRouter) {
+ connection, ok := ci.(*distConnection)
+ if !ok {
+ fmt.Println("conn is not a *distConnection type")
+ return
+ }
+
+ connection.router = router
+
+ // run read loop
+ var err error
+ var packetLength int
+
+ b := lib.TakeBuffer()
+ for {
+ packetLength, err = connection.read(b, dp.options.MaxMessageSize)
+
+ // validation
+ if err != nil || packetLength == 0 {
+ // link was closed or got malformed data
+ if err != nil {
+ fmt.Println("link was closed", connection.peername, "error:", err)
+ }
+ lib.ReleaseBuffer(b)
+ return
+ }
+
+ // check the context if it was cancelled
+ if connection.ctx.Err() != nil {
+ // canceled
+ lib.ReleaseBuffer(b)
+ return
+ }
+
+ // take the new buffer for the next reading and append the tail
+ // (which is part of the next packet)
+ b1 := lib.TakeBuffer()
+ b1.Set(b.B[packetLength:])
+
+ // cut the tail and send it further for handling.
+ // buffer b has to be released by the reader of
+ // recv channel (link.ReadHandlePacket)
+ b.B = b.B[:packetLength]
+ connection.receivers.recv[connection.receivers.i] <- b
+
+ // set new buffer as a current for the next reading
+ b = b1
+
+ // round-robin switch to the next receiver
+ connection.receivers.i++
+ if connection.receivers.i < connection.receivers.n {
+ continue
+ }
+ connection.receivers.i = 0
+ }
+
+}
+
+func (dp *distProto) Terminate(ci node.ConnectionInterface) {
+ connection, ok := ci.(*distConnection)
+ if !ok {
+ fmt.Println("conn is not a *distConnection type")
+ return
+ }
+
+ for i := 0; i < dp.options.NumHandlers; i++ {
+ sender := connection.senders.sender[i]
+ if sender != nil {
+ sender.Lock()
+ close(sender.sendChannel)
+ sender.sendChannel = nil
+ sender.Unlock()
+ connection.senders.sender[i] = nil
+ }
+ if connection.receivers.recv[i] != nil {
+ close(connection.receivers.recv[i])
+ }
+ }
+ if connection.cacheOut != nil {
+ connection.cacheOut.Stop()
+ }
+ connection.flusher.Stop()
+ connection.cancelContext()
+}
+
+// node.Connection interface implementation
+
+func (dc *distConnection) Send(from gen.Process, to etf.Pid, message etf.Term) error {
+ var compression bool
+
+ if dc.flags.Compression {
+ if dc.compression == true {
+ compression = true
+ } else {
+ compression = from.Compression()
+ }
+ }
+
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoSEND, etf.Atom(""), to},
+ payload: message,
+ compression: compression,
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) SendReg(from gen.Process, to gen.ProcessID, message etf.Term) error {
+ var compression bool
+
+ if dc.flags.Compression {
+ if dc.compression == true {
+ compression = true
+ } else {
+ compression = from.Compression()
+ }
+ }
+
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoREG_SEND, from.Self(), etf.Atom(""), etf.Atom(to.Name)},
+ payload: message,
+ compression: compression,
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) SendAlias(from gen.Process, to etf.Alias, message etf.Term) error {
+ var compression bool
+
+ if dc.flags.EnableAlias == false {
+ return node.ErrUnsupported
+ }
+
+ if dc.flags.Compression {
+ if dc.compression == true {
+ compression = true
+ } else {
+ compression = from.Compression()
+ }
+ }
+
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoALIAS_SEND, from.Self(), to},
+ payload: message,
+ compression: compression,
+ }
+ return dc.send(msg)
+}
+
+func (dc *distConnection) Link(local etf.Pid, remote etf.Pid) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoLINK, local, remote},
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) Unlink(local etf.Pid, remote etf.Pid) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoUNLINK, local, remote},
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) LinkExit(to etf.Pid, terminated etf.Pid, reason string) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoEXIT, terminated, to, etf.Atom(reason)},
+ }
+ return dc.send(msg)
+}
+
+func (dc *distConnection) Monitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoMONITOR, local, remote, ref},
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) MonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoMONITOR, local, etf.Atom(remote.Name), ref},
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) Demonitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoDEMONITOR, local, remote, ref},
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) DemonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoDEMONITOR, local, etf.Atom(remote.Name), ref},
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) MonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoMONITOR_EXIT, etf.Atom(terminated.Name), to, ref, etf.Atom(reason)},
+ }
+ return dc.send(msg)
+}
+func (dc *distConnection) MonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoMONITOR_EXIT, terminated, to, ref, etf.Atom(reason)},
+ }
+ return dc.send(msg)
+}
+
+func (dc *distConnection) SpawnRequest(behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error {
+ if dc.flags.EnableRemoteSpawn == false {
+ return node.ErrUnsupported
+ }
+
+ optlist := etf.List{}
+ if request.Options.Name != "" {
+ optlist = append(optlist, etf.Tuple{etf.Atom("name"), etf.Atom(request.Options.Name)})
+
+ }
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoSPAWN_REQUEST, request.Ref, request.From, request.From,
+ // {M,F,A}
+ etf.Tuple{etf.Atom(behaviorName), etf.Atom(request.Options.Function), len(args)},
+ optlist,
+ },
+ payload: args,
+ }
+ return dc.send(msg)
+}
+
+func (dc *distConnection) SpawnReply(to etf.Pid, ref etf.Ref, pid etf.Pid) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoSPAWN_REPLY, ref, to, 0, pid},
+ }
+ return dc.send(msg)
+}
+
+func (dc *distConnection) SpawnReplyError(to etf.Pid, ref etf.Ref, err error) error {
+ msg := &sendMessage{
+ control: etf.Tuple{distProtoSPAWN_REPLY, ref, to, 0, etf.Atom(err.Error())},
+ }
+ return dc.send(msg)
+}
+
+func (dc *distConnection) Proxy() error {
+ return nil
+}
+
+//
+// internal
+//
+
+func (dc *distConnection) read(b *lib.Buffer, max int) (int, error) {
+ // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#protocol-between-connected-nodes
+ expectingBytes := 4
+
+ for {
+ if b.Len() < expectingBytes {
+ n, e := b.ReadDataFrom(dc.conn, max)
+ if n == 0 {
+ // link was closed
+ return 0, nil
+ }
+
+ if e != nil && e != io.EOF {
+ // something went wrong
+ return 0, e
+ }
+
+ // check onemore time if we should read more data
+ continue
+ }
+
+ packetLength := binary.BigEndian.Uint32(b.B[:4])
+ if packetLength == 0 {
+ // it was "software" keepalive
+ expectingBytes = 4
+ continue
+ }
+
+ if b.Len() < int(packetLength)+4 {
+ expectingBytes = int(packetLength) + 4
+ continue
+ }
+
+ return int(packetLength) + 4, nil
+ }
+
+}
+
+type deferrMissing struct {
+ b *lib.Buffer
+ c int
+}
+
+func (dc *distConnection) receiver(recv <-chan *lib.Buffer) {
+ var b *lib.Buffer
+ var missing deferrMissing
+ var Timeout <-chan time.Time
+
+ // cancel connection context if something went wrong
+ // it will cause closing connection with stopping all
+ // goroutines around this connection
+ defer dc.cancelContext()
+
+ deferrChannel := make(chan deferrMissing, 100)
+ defer close(deferrChannel)
+
+ timer := lib.TakeTimer()
+ defer lib.ReleaseTimer(timer)
+
+ dChannel := deferrChannel
+
+ for {
+ select {
+ case missing = <-dChannel:
+ b = missing.b
+ default:
+ if len(deferrChannel) > 0 {
+ timer.Reset(150 * time.Millisecond)
+ Timeout = timer.C
+ } else {
+ Timeout = nil
+ }
+ select {
+ case b = <-recv:
+ if b == nil {
+ // channel was closed
+ return
+ }
+ case <-Timeout:
+ dChannel = deferrChannel
+ continue
+ }
+ }
+
+ // read and decode received packet
+ control, message, err := dc.decodePacket(b.B)
+
+ if err == ErrMissingInCache {
+ if b == missing.b && missing.c > 100 {
+ fmt.Println("Error: Disordered data at the link with", dc.peername, ". Close connection")
+ dc.cancelContext()
+ lib.ReleaseBuffer(b)
+ return
+ }
+
+ if b == missing.b {
+ missing.c++
+ } else {
+ missing.b = b
+ missing.c = 0
+ }
+
+ select {
+ case deferrChannel <- missing:
+ // read recv channel
+ dChannel = nil
+ continue
+ default:
+ fmt.Println("Error: Mess at the link with", dc.peername, ". Close connection")
+ dc.cancelContext()
+ lib.ReleaseBuffer(b)
+ return
+ }
+ }
+
+ dChannel = deferrChannel
+
+ if err != nil {
+ fmt.Println("Malformed Dist proto at the link with", dc.peername, err)
+ dc.cancelContext()
+ lib.ReleaseBuffer(b)
+ return
+ }
+
+ if control == nil {
+ // fragment
+ continue
+ }
+
+ // handle message
+ if err := dc.handleMessage(control, message); err != nil {
+ fmt.Printf("Malformed Control packet at the link with %s: %#v\n", dc.peername, control)
+ dc.cancelContext()
+ lib.ReleaseBuffer(b)
+ return
+ }
+
+ // we have to release this buffer
+ lib.ReleaseBuffer(b)
+
+ }
+}
+
+func (dc *distConnection) decodePacket(packet []byte) (etf.Term, etf.Term, error) {
+ if len(packet) < 5 {
+ return nil, nil, fmt.Errorf("malformed packet")
+ }
+
+ // [:3] length
+ switch packet[4] {
+ case protoDist:
+ return dc.decodeDist(packet[5:])
+ default:
+ // unknown proto
+ return nil, nil, fmt.Errorf("unknown/unsupported proto")
+ }
+
+}
+
+func (dc *distConnection) decodeDist(packet []byte) (etf.Term, etf.Term, error) {
+ switch packet[0] {
+ case protoDistCompressed:
+ // do we need it?
+ // zip.NewReader(...)
+ // ...unzipping to the new buffer b (lib.TakeBuffer)
+ // just in case: if b[0] == protoDistCompressed return error
+ // otherwise it will cause recursive call and im not sure if its ok
+ // return l.decodeDist(b)
+
+ case protoDistMessage:
+ var control, message etf.Term
+ var cache []etf.Atom
+ var err error
+
+ cache, packet, err = dc.decodeDistHeaderAtomCache(packet[1:])
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ decodeOptions := etf.DecodeOptions{
+ // FIXME must be used from peer's flag
+ FlagBigPidRef: false,
+ }
+
+ // decode control message
+ control, packet, err = etf.Decode(packet, cache, decodeOptions)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(packet) == 0 {
+ return control, nil, nil
+ }
+
+ // decode payload message
+ message, packet, err = etf.Decode(packet, cache, decodeOptions)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(packet) != 0 {
+ return nil, nil, fmt.Errorf("packet has extra %d byte(s)", len(packet))
+ }
+
+ return control, message, nil
+
+ case protoDistFragment1, protoDistFragmentN:
+ first := packet[0] == protoDistFragment1
+ if len(packet) < 18 {
+ return nil, nil, fmt.Errorf("malformed fragment")
+ }
+
+ // We should decode first fragment in order to process Atom Cache Header
+ // to get rid the case when we get the first fragment of the packet
+ // and the next packet is not the part of the fragmented packet, but with
+ // the ids were encoded in the first fragment
+ if first {
+ dc.decodeDistHeaderAtomCache(packet[1:])
+ }
+
+ if assembled, err := dc.decodeFragment(packet[1:], first); assembled != nil {
+ if err != nil {
+ return nil, nil, err
+ }
+ defer lib.ReleaseBuffer(assembled)
+ return dc.decodeDist(assembled.B)
+ } else {
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return nil, nil, nil
+ }
+
+ return nil, nil, fmt.Errorf("unknown packet type %d", packet[0])
+}
+
+func (dc *distConnection) handleMessage(control, message etf.Term) (err error) {
+ defer func() {
+ if lib.CatchPanic() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("%s", r)
+ }
+ }
+ }()
+
+ switch t := control.(type) {
+ case etf.Tuple:
+ switch act := t.Element(1).(type) {
+ case int:
+ switch act {
+ case distProtoREG_SEND:
+ // {6, FromPid, Unused, ToName}
+ lib.Log("[%s] CONTROL REG_SEND [from %s]: %#v", dc.nodename, dc.peername, control)
+ to := gen.ProcessID{
+ Node: dc.nodename,
+ Name: string(t.Element(4).(etf.Atom)),
+ }
+ dc.router.RouteSendReg(t.Element(2).(etf.Pid), to, message)
+ return nil
+
+ case distProtoSEND:
+ // {2, Unused, ToPid}
+ // SEND has no sender pid
+ lib.Log("[%s] CONTROL SEND [from %s]: %#v", dc.nodename, dc.peername, control)
+ dc.router.RouteSend(etf.Pid{}, t.Element(3).(etf.Pid), message)
+ return nil
+
+ case distProtoLINK:
+ // {1, FromPid, ToPid}
+ lib.Log("[%s] CONTROL LINK [from %s]: %#v", dc.nodename, dc.peername, control)
+ dc.router.RouteLink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
+ return nil
+
+ case distProtoUNLINK:
+ // {4, FromPid, ToPid}
+ lib.Log("[%s] CONTROL UNLINK [from %s]: %#v", dc.nodename, dc.peername, control)
+ dc.router.RouteUnlink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid))
+ return nil
+
+ case distProtoNODE_LINK:
+ lib.Log("[%s] CONTROL NODE_LINK [from %s]: %#v", dc.nodename, dc.peername, control)
+ return nil
+
+ case distProtoEXIT:
+ // {3, FromPid, ToPid, Reason}
+ lib.Log("[%s] CONTROL EXIT [from %s]: %#v", dc.nodename, dc.peername, control)
+ terminated := t.Element(2).(etf.Pid)
+ to := t.Element(3).(etf.Pid)
+ reason := fmt.Sprint(t.Element(4))
+ dc.router.RouteExit(to, terminated, string(reason))
+ return nil
+
+ case distProtoEXIT2:
+ lib.Log("[%s] CONTROL EXIT2 [from %s]: %#v", dc.nodename, dc.peername, control)
+ return nil
+
+ case distProtoMONITOR:
+ // {19, FromPid, ToProc, Ref}, where FromPid = monitoring process
+ // and ToProc = monitored process pid or name (atom)
+ lib.Log("[%s] CONTROL MONITOR [from %s]: %#v", dc.nodename, dc.peername, control)
+
+ fromPid := t.Element(2).(etf.Pid)
+ ref := t.Element(4).(etf.Ref)
+ // if monitoring by pid
+ if to, ok := t.Element(3).(etf.Pid); ok {
+ dc.router.RouteMonitor(fromPid, to, ref)
+ return nil
+ }
+
+ // if monitoring by process name
+ if to, ok := t.Element(3).(etf.Atom); ok {
+ processID := gen.ProcessID{
+ Node: dc.nodename,
+ Name: string(to),
+ }
+ dc.router.RouteMonitorReg(fromPid, processID, ref)
+ return nil
+ }
+
+ return fmt.Errorf("malformed monitor message")
+
+ case distProtoDEMONITOR:
+ // {20, FromPid, ToProc, Ref}, where FromPid = monitoring process
+ // and ToProc = monitored process pid or name (atom)
+ lib.Log("[%s] CONTROL DEMONITOR [from %s]: %#v", dc.nodename, dc.peername, control)
+ ref := t.Element(4).(etf.Ref)
+ fromPid := t.Element(2).(etf.Pid)
+ dc.router.RouteDemonitor(fromPid, ref)
+ return nil
+
+ case distProtoMONITOR_EXIT:
+ // {21, FromProc, ToPid, Ref, Reason}, where FromProc = monitored process
+ // pid or name (atom), ToPid = monitoring process, and Reason = exit reason for the monitored process
+ lib.Log("[%s] CONTROL MONITOR_EXIT [from %s]: %#v", dc.nodename, dc.peername, control)
+ reason := fmt.Sprint(t.Element(5))
+ ref := t.Element(4).(etf.Ref)
+ switch terminated := t.Element(2).(type) {
+ case etf.Pid:
+ dc.router.RouteMonitorExit(terminated, reason, ref)
+ return nil
+ case etf.Atom:
+ processID := gen.ProcessID{Name: string(terminated), Node: dc.peername}
+ dc.router.RouteMonitorExitReg(processID, reason, ref)
+ return nil
+ }
+ return fmt.Errorf("malformed monitor exit message")
+
+ // Not implemented yet, just stubs. TODO.
+ case distProtoSEND_SENDER:
+ lib.Log("[%s] CONTROL SEND_SENDER unsupported [from %s]: %#v", dc.nodename, dc.peername, control)
+ return nil
+ case distProtoPAYLOAD_EXIT:
+ lib.Log("[%s] CONTROL PAYLOAD_EXIT unsupported [from %s]: %#v", dc.nodename, dc.peername, control)
+ return nil
+ case distProtoPAYLOAD_EXIT2:
+ lib.Log("[%s] CONTROL PAYLOAD_EXIT2 unsupported [from %s]: %#v", dc.nodename, dc.peername, control)
+ return nil
+ case distProtoPAYLOAD_MONITOR_P_EXIT:
+ lib.Log("[%s] CONTROL PAYLOAD_MONITOR_P_EXIT unsupported [from %s]: %#v", dc.nodename, dc.peername, control)
+ return nil
+
+ // alias support
+ case distProtoALIAS_SEND:
+ // {33, FromPid, Alias}
+ lib.Log("[%s] CONTROL ALIAS_SEND [from %s]: %#v", dc.nodename, dc.peername, control)
+ alias := etf.Alias(t.Element(3).(etf.Ref))
+ dc.router.RouteSendAlias(t.Element(2).(etf.Pid), alias, message)
+ return nil
+
+ case distProtoSPAWN_REQUEST:
+ // {29, ReqId, From, GroupLeader, {Module, Function, Arity}, OptList}
+ lib.Log("[%s] CONTROL SPAWN_REQUEST [from %s]: %#v", dc.nodename, dc.peername, control)
+ registerName := ""
+ for _, option := range t.Element(6).(etf.List) {
+ name, ok := option.(etf.Tuple)
+ if !ok || len(name) != 2 {
+ return fmt.Errorf("malformed spawn request")
+ }
+ switch name.Element(1) {
+ case etf.Atom("name"):
+ registerName = string(name.Element(2).(etf.Atom))
+ }
+ }
+
+ from := t.Element(3).(etf.Pid)
+ ref := t.Element(2).(etf.Ref)
+
+ mfa := t.Element(5).(etf.Tuple)
+ module := mfa.Element(1).(etf.Atom)
+ function := mfa.Element(2).(etf.Atom)
+ var args etf.List
+ if str, ok := message.(string); !ok {
+ args, _ = message.(etf.List)
+ } else {
+ // stupid Erlang's strings :). [1,2,3,4,5] sends as a string.
+ // args can't be anything but etf.List.
+ for i := range []byte(str) {
+ args = append(args, str[i])
+ }
+ }
+
+ spawnRequestOptions := gen.RemoteSpawnOptions{
+ Name: registerName,
+ Function: string(function),
+ }
+ spawnRequest := gen.RemoteSpawnRequest{
+ From: from,
+ Ref: ref,
+ Options: spawnRequestOptions,
+ }
+ dc.router.RouteSpawnRequest(dc.nodename, string(module), spawnRequest, args...)
+ return nil
+
+ case distProtoSPAWN_REPLY:
+ // {31, ReqId, To, Flags, Result}
+ lib.Log("[%s] CONTROL SPAWN_REPLY [from %s]: %#v", dc.nodename, dc.peername, control)
+ to := t.Element(3).(etf.Pid)
+ ref := t.Element(2).(etf.Ref)
+ dc.router.RouteSpawnReply(to, ref, t.Element(5))
+ return nil
+
+ default:
+ lib.Log("[%s] CONTROL unknown command [from %s]: %#v", dc.nodename, dc.peername, control)
+ return fmt.Errorf("unknown control command %#v", control)
+ }
+ }
+ }
+
+ return fmt.Errorf("unsupported control message %#v", control)
+}
+
+func (dc *distConnection) decodeFragment(packet []byte, first bool) (*lib.Buffer, error) {
+ dc.fragmentsMutex.Lock()
+ defer dc.fragmentsMutex.Unlock()
+
+ if dc.fragments == nil {
+ dc.fragments = make(map[uint64]*fragmentedPacket)
+ }
+
+ sequenceID := binary.BigEndian.Uint64(packet)
+ fragmentID := binary.BigEndian.Uint64(packet[8:])
+ if fragmentID == 0 {
+ return nil, fmt.Errorf("fragmentID can't be 0")
+ }
+
+ fragmented, ok := dc.fragments[sequenceID]
+ if !ok {
+ fragmented = &fragmentedPacket{
+ buffer: lib.TakeBuffer(),
+ disordered: lib.TakeBuffer(),
+ disorderedSlices: make(map[uint64][]byte),
+ lastUpdate: time.Now(),
+ }
+ fragmented.buffer.AppendByte(protoDistMessage)
+ dc.fragments[sequenceID] = fragmented
+ }
+
+ // until we get the first item everything will be treated as disordered
+ if first {
+ fragmented.fragmentID = fragmentID + 1
+ }
+
+ if fragmented.fragmentID-fragmentID != 1 {
+ // got the next fragment. disordered
+ slice := fragmented.disordered.Extend(len(packet) - 16)
+ copy(slice, packet[16:])
+ fragmented.disorderedSlices[fragmentID] = slice
+ } else {
+ // order is correct. just append
+ fragmented.buffer.Append(packet[16:])
+ fragmented.fragmentID = fragmentID
+ }
+
+ // check whether we have disordered slices and try
+ // to append them if it does fit
+ if fragmented.fragmentID > 0 && len(fragmented.disorderedSlices) > 0 {
+ for i := fragmented.fragmentID - 1; i > 0; i-- {
+ if slice, ok := fragmented.disorderedSlices[i]; ok {
+ fragmented.buffer.Append(slice)
+ delete(fragmented.disorderedSlices, i)
+ fragmented.fragmentID = i
+ continue
+ }
+ break
+ }
+ }
+
+ fragmented.lastUpdate = time.Now()
+
+ if fragmented.fragmentID == 1 && len(fragmented.disorderedSlices) == 0 {
+ // it was the last fragment
+ delete(dc.fragments, sequenceID)
+ lib.ReleaseBuffer(fragmented.disordered)
+ return fragmented.buffer, nil
+ }
+
+ if dc.checkCleanPending {
+ return nil, nil
+ }
+
+ if dc.checkCleanTimer != nil {
+ dc.checkCleanTimer.Reset(dc.checkCleanTimeout)
+ return nil, nil
+ }
+
+ dc.checkCleanTimer = time.AfterFunc(dc.checkCleanTimeout, func() {
+ dc.fragmentsMutex.Lock()
+ defer dc.fragmentsMutex.Unlock()
+
+ if dc.checkCleanTimeout == 0 {
+ dc.checkCleanTimeout = defaultCleanTimeout
+ }
+ if dc.checkCleanDeadline == 0 {
+ dc.checkCleanDeadline = defaultCleanDeadline
+ }
+
+ valid := time.Now().Add(-dc.checkCleanDeadline)
+ for sequenceID, fragmented := range dc.fragments {
+ if fragmented.lastUpdate.Before(valid) {
+ // dropping due to exceeded deadline
+ delete(dc.fragments, sequenceID)
+ }
+ }
+ if len(dc.fragments) == 0 {
+ dc.checkCleanPending = false
+ return
+ }
+
+ dc.checkCleanPending = true
+ dc.checkCleanTimer.Reset(dc.checkCleanTimeout)
+ })
+
+ return nil, nil
+}
+
+func (dc *distConnection) decodeDistHeaderAtomCache(packet []byte) ([]etf.Atom, []byte, error) {
+ // all the details are here https://erlang.org/doc/apps/erts/erl_ext_dist.html#normal-distribution-header
+
+ // number of atom references are present in package
+ references := int(packet[0])
+ if references == 0 {
+ return nil, packet[1:], nil
+ }
+
+ cache := make([]etf.Atom, references)
+ flagsLen := references/2 + 1
+ if len(packet) < 1+flagsLen {
+ // malformed
+ return nil, nil, ErrMalformed
+ }
+ flags := packet[1 : flagsLen+1]
+
+ // The least significant bit in a half byte is flag LongAtoms.
+ // If it is set, 2 bytes are used for atom lengths instead of 1 byte
+ // in the distribution header.
+ headerAtomLength := 1 // if 'LongAtom' is not set
+
+ // extract this bit. just increase headereAtomLength if this flag is set
+ lastByte := flags[len(flags)-1]
+ shift := uint((references & 0x01) * 4)
+ headerAtomLength += int((lastByte >> shift) & 0x01)
+
+ // 1 (number of references) + references/2+1 (length of flags)
+ packet = packet[1+flagsLen:]
+
+ for i := 0; i < references; i++ {
+ if len(packet) < 1+headerAtomLength {
+ // malformed
+ return nil, nil, ErrMalformed
+ }
+ shift = uint((i & 0x01) * 4)
+ flag := (flags[i/2] >> shift) & 0x0F
+ isNewReference := flag&0x08 == 0x08
+ idxReference := uint16(flag & 0x07)
+ idxInternal := uint16(packet[0])
+ idx := (idxReference << 8) | idxInternal
+
+ if isNewReference {
+ atomLen := uint16(packet[1])
+ if headerAtomLength == 2 {
+ atomLen = binary.BigEndian.Uint16(packet[1:3])
+ }
+ // extract atom
+ packet = packet[1+headerAtomLength:]
+ if len(packet) < int(atomLen) {
+ // malformed
+ return nil, nil, ErrMalformed
+ }
+ atom := etf.Atom(packet[:atomLen])
+ // store in temporary cache for decoding
+ cache[i] = atom
+
+ // store in link' cache
+ dc.cacheInMutex.Lock()
+ dc.cacheIn[idx] = &atom
+ dc.cacheInMutex.Unlock()
+ packet = packet[atomLen:]
+ continue
+ }
+
+ dc.cacheInMutex.RLock()
+ c := dc.cacheIn[idx]
+ dc.cacheInMutex.RUnlock()
+ if c == nil {
+ return cache, packet, ErrMissingInCache
+ }
+ cache[i] = *c
+ packet = packet[1:]
+ }
+
+ return cache, packet, nil
+}
+
+func (dc *distConnection) encodeDistHeaderAtomCache(b *lib.Buffer,
+ writerAtomCache map[etf.Atom]etf.CacheItem,
+ encodingAtomCache *etf.ListAtomCache) {
+
+ n := encodingAtomCache.Len()
+ if n == 0 {
+ b.AppendByte(0)
+ return
+ }
+
+ b.AppendByte(byte(n)) // write NumberOfAtomCache
+
+ lenFlags := n/2 + 1
+ b.Extend(lenFlags)
+
+ flags := b.B[1 : lenFlags+1]
+ flags[lenFlags-1] = 0 // clear last byte to make sure we have valid LongAtom flag
+
+ for i := 0; i < len(encodingAtomCache.L); i++ {
+ shift := uint((i & 0x01) * 4)
+ idxReference := byte(encodingAtomCache.L[i].ID >> 8) // SegmentIndex
+ idxInternal := byte(encodingAtomCache.L[i].ID & 255) // InternalSegmentIndex
+
+ cachedItem := writerAtomCache[encodingAtomCache.L[i].Name]
+ if !cachedItem.Encoded {
+ idxReference |= 8 // set NewCacheEntryFlag
+ }
+
+ // we have to clear before reuse
+ if shift == 0 {
+ flags[i/2] = 0
+ }
+ flags[i/2] |= idxReference << shift
+
+ if cachedItem.Encoded {
+ b.AppendByte(idxInternal)
+ continue
+ }
+
+ if encodingAtomCache.HasLongAtom {
+ // 1 (InternalSegmentIndex) + 2 (length) + name
+ allocLen := 1 + 2 + len(encodingAtomCache.L[i].Name)
+ buf := b.Extend(allocLen)
+ buf[0] = idxInternal
+ binary.BigEndian.PutUint16(buf[1:3], uint16(len(encodingAtomCache.L[i].Name)))
+ copy(buf[3:], encodingAtomCache.L[i].Name)
+ } else {
+
+ // 1 (InternalSegmentIndex) + 1 (length) + name
+ allocLen := 1 + 1 + len(encodingAtomCache.L[i].Name)
+ buf := b.Extend(allocLen)
+ buf[0] = idxInternal
+ buf[1] = byte(len(encodingAtomCache.L[i].Name))
+ copy(buf[2:], encodingAtomCache.L[i].Name)
+ }
+
+ cachedItem.Encoded = true
+ writerAtomCache[encodingAtomCache.L[i].Name] = cachedItem
+ }
+
+ if encodingAtomCache.HasLongAtom {
+ shift := uint((n & 0x01) * 4)
+ flags[lenFlags-1] |= 1 << shift // set LongAtom = 1
+ }
+}
+
+func (dc *distConnection) sender(send <-chan *sendMessage, options node.ProtoOptions, peerFlags node.Flags) {
+ var encodingAtomCache *etf.ListAtomCache
+ var writerAtomCache map[etf.Atom]etf.CacheItem
+ var linkAtomCache *etf.AtomCache
+ var lastCacheID int16 = -1
+
+ var lenControl, lenMessage, lenAtomCache, lenPacket, startDataPosition int
+ var atomCacheBuffer, packetBuffer *lib.Buffer
+ var message *sendMessage
+ var err error
+
+ // cancel connection context if something went wrong
+ // it will cause closing connection with stopping all
+ // goroutines around this connection
+ defer dc.cancelContext()
+
+ cacheEnabled := peerFlags.EnableHeaderAtomCache && dc.cacheOut != nil
+ fragmentationEnabled := peerFlags.EnableFragmentation && options.FragmentationUnit > 0
+
+ // Header atom cache is encoded right after the control/message encoding process
+ // but should be stored as a first item in the packet.
+ // Thats why we do reserve some space for it in order to get rid
+ // of reallocation packetBuffer data
+ reserveHeaderAtomCache := 8192
+
+ if cacheEnabled {
+ encodingAtomCache = etf.TakeListAtomCache()
+ defer etf.ReleaseListAtomCache(encodingAtomCache)
+ writerAtomCache = make(map[etf.Atom]etf.CacheItem)
+ linkAtomCache = dc.cacheOut
+ }
+
+ encodeOptions := etf.EncodeOptions{
+ LinkAtomCache: linkAtomCache,
+ WriterAtomCache: writerAtomCache,
+ EncodingAtomCache: encodingAtomCache,
+ FlagBigCreation: peerFlags.EnableBigCreation,
+ FlagBigPidRef: peerFlags.EnableBigPidRef,
+ }
+
+ for {
+ message = <-send
+
+ if message == nil {
+ // channel was closed
+ return
+ }
+
+ packetBuffer = lib.TakeBuffer()
+ lenControl, lenMessage, lenAtomCache, lenPacket, startDataPosition = 0, 0, 0, 0, reserveHeaderAtomCache
+
+ // do reserve for the header 8K, should be enough
+ packetBuffer.Allocate(reserveHeaderAtomCache)
+
+ // clear encoding cache
+ if cacheEnabled {
+ encodingAtomCache.Reset()
+ }
+
+ // encode Control
+ err = etf.Encode(message.control, packetBuffer, encodeOptions)
+ if err != nil {
+ fmt.Println(err)
+ lib.ReleaseBuffer(packetBuffer)
+ continue
+ }
+ lenControl = packetBuffer.Len() - reserveHeaderAtomCache
+
+ // encode Message if present
+ if message.payload != nil {
+ err = etf.Encode(message.payload, packetBuffer, encodeOptions)
+ if err != nil {
+ fmt.Println(err)
+ lib.ReleaseBuffer(packetBuffer)
+ continue
+ }
+
+ }
+ lenMessage = packetBuffer.Len() - reserveHeaderAtomCache - lenControl
+
+ // encode Header Atom Cache if its enabled
+ if cacheEnabled && encodingAtomCache.Len() > 0 {
+ atomCacheBuffer = lib.TakeBuffer()
+ dc.encodeDistHeaderAtomCache(atomCacheBuffer, writerAtomCache, encodingAtomCache)
+ lenAtomCache = atomCacheBuffer.Len()
+
+ if lenAtomCache > reserveHeaderAtomCache-22 {
+ // are you serious? ))) what da hell you just sent?
+ // FIXME i'm gonna fix it if someone report about this issue :)
+ fmt.Println("WARNING: exceed atom header cache size limit. please report about this issue")
+ return
+ }
+
+ startDataPosition -= lenAtomCache
+ copy(packetBuffer.B[startDataPosition:], atomCacheBuffer.B)
+ lib.ReleaseBuffer(atomCacheBuffer)
+
+ } else {
+ lenAtomCache = 1
+ startDataPosition -= lenAtomCache
+ packetBuffer.B[startDataPosition] = byte(0)
+ }
+
+ for {
+
+ // 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistMessage) + lenAtomCache
+ lenPacket = 1 + 1 + lenAtomCache + lenControl + lenMessage
+
+ if !fragmentationEnabled || lenPacket < options.FragmentationUnit {
+ // send as a single packet
+ startDataPosition -= 6
+
+ binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
+ packetBuffer.B[startDataPosition+4] = protoDist // 131
+ packetBuffer.B[startDataPosition+5] = protoDistMessage // 68
+ if _, err := dc.flusher.Write(packetBuffer.B[startDataPosition:]); err != nil {
+ return
+ }
+ break
+ }
+
+ // Message should be fragmented
+
+ // https://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution-header-for-fragmented-messages
+ // "The entire atom cache and control message has to be part of the starting fragment"
+
+ sequenceID := uint64(atomic.AddInt64(&dc.sequenceID, 1))
+ numFragments := lenMessage/options.FragmentationUnit + 1
+
+ // 1 (dist header: 131) + 1 (dist header: protoDistFragment) + 8 (sequenceID) + 8 (fragmentID) + ...
+ lenPacket = 1 + 1 + 8 + 8 + lenAtomCache + lenControl + options.FragmentationUnit
+
+ // 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistFragment) + 8 (sequenceID) + 8 (fragmentID)
+ startDataPosition -= 22
+
+ binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
+ packetBuffer.B[startDataPosition+4] = protoDist // 131
+ packetBuffer.B[startDataPosition+5] = protoDistFragment1 // 69
+
+ binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID))
+ binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments))
+ if _, err := dc.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket]); err != nil {
+ return
+ }
+
+ startDataPosition += 4 + lenPacket
+ numFragments--
+
+ nextFragment:
+
+ if len(packetBuffer.B[startDataPosition:]) > options.FragmentationUnit {
+ lenPacket = 1 + 1 + 8 + 8 + options.FragmentationUnit
+ // reuse the previous 22 bytes for the next frame header
+ startDataPosition -= 22
+
+ } else {
+ // the last one
+ lenPacket = 1 + 1 + 8 + 8 + len(packetBuffer.B[startDataPosition:])
+ startDataPosition -= 22
+ }
+
+ binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket))
+ packetBuffer.B[startDataPosition+4] = protoDist // 131
+ packetBuffer.B[startDataPosition+5] = protoDistFragmentN // 70
+
+ binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID))
+ binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments))
+
+ if _, err := dc.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket]); err != nil {
+ return
+ }
+
+ startDataPosition += 4 + lenPacket
+ numFragments--
+ if numFragments > 0 {
+ goto nextFragment
+ }
+
+ // done
+ break
+ }
+
+ lib.ReleaseBuffer(packetBuffer)
+
+ if cacheEnabled == false {
+ continue
+ }
+
+ // get updates from link AtomCache and update the local one (map writerAtomCache)
+ id := linkAtomCache.GetLastID()
+ if lastCacheID < id {
+ linkAtomCache.Lock()
+ for _, a := range linkAtomCache.ListSince(lastCacheID + 1) {
+ writerAtomCache[a] = etf.CacheItem{ID: lastCacheID + 1, Name: a, Encoded: false}
+ lastCacheID++
+ }
+ linkAtomCache.Unlock()
+ }
+
+ }
+
+}
+
+func (dc *distConnection) send(msg *sendMessage) error {
+ i := atomic.AddInt32(&dc.senders.i, 1)
+ n := i % dc.senders.n
+ s := dc.senders.sender[n]
+ if s == nil {
+ // connection was closed
+ return node.ErrNoRoute
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ select {
+ case s.sendChannel <- msg:
+ return nil
+ default:
+ return ErrOverloadConnection
+ }
+}
diff --git a/node/dist/dist_test.go b/proto/dist/proto_test.go
similarity index 88%
rename from node/dist/dist_test.go
rename to proto/dist/proto_test.go
index 3474b6e3..4e9110da 100644
--- a/node/dist/dist_test.go
+++ b/proto/dist/proto_test.go
@@ -2,9 +2,7 @@ package dist
import (
"bytes"
- "fmt"
"math/rand"
- "net"
"reflect"
"testing"
"time"
@@ -13,46 +11,6 @@ import (
"github.com/ergo-services/ergo/lib"
)
-func TestLinkRead(t *testing.T) {
-
- server, client := net.Pipe()
- defer func() {
- server.Close()
- client.Close()
- }()
-
- link := Link{
- conn: server,
- }
-
- go client.Write([]byte{0, 0, 0, 0, 0, 0, 0, 1, 0})
-
- // read keepalive answer on a client side
- go func() {
- bb := make([]byte, 10)
- for {
- _, e := client.Read(bb)
- if e != nil {
- return
- }
- }
- }()
-
- c := make(chan bool)
- b := lib.TakeBuffer()
- go func() {
- link.Read(b)
- close(c)
- }()
- select {
- case <-c:
- fmt.Println("OK", b.B)
- case <-time.After(1000 * time.Millisecond):
- t.Fatal("incorrect")
- }
-
-}
-
func TestComposeName(t *testing.T) {
//link := &Link{
// Name: "testName",
@@ -112,7 +70,7 @@ func TestValidateChallengeAck(t *testing.T) {
}
func TestDecodeDistHeaderAtomCache(t *testing.T) {
- link := Link{}
+ link := &distConnection{}
a1 := etf.Atom("atom1")
a2 := etf.Atom("atom2")
link.cacheIn[1034] = &a1
@@ -190,7 +148,7 @@ func TestEncodeDistHeaderAtomCache(t *testing.T) {
}
- l := &Link{}
+ l := &distConnection{}
l.encodeDistHeaderAtomCache(b, writerAtomCache, encodingAtomCache)
if !reflect.DeepEqual(b.B, expected) {
@@ -218,7 +176,7 @@ func TestEncodeDistHeaderAtomCache(t *testing.T) {
}
func BenchmarkDecodeDistHeaderAtomCache(b *testing.B) {
- link := &Link{}
+ link := &distConnection{}
packet := []byte{
131, 68, // start dist header
5, 4, 137, 9, // 5 atoms and theirs flags
@@ -241,7 +199,7 @@ func BenchmarkDecodeDistHeaderAtomCache(b *testing.B) {
}
func BenchmarkEncodeDistHeaderAtomCache(b *testing.B) {
- link := &Link{}
+ link := &distConnection{}
buf := lib.TakeBuffer()
defer lib.ReleaseBuffer(buf)
@@ -268,7 +226,7 @@ func BenchmarkEncodeDistHeaderAtomCache(b *testing.B) {
}
func TestDecodeFragment(t *testing.T) {
- link := &Link{}
+ link := &distConnection{}
link.checkCleanTimeout = 50 * time.Millisecond
link.checkCleanDeadline = 150 * time.Millisecond
@@ -330,15 +288,15 @@ func TestDecodeFragment(t *testing.T) {
link.checkCleanTimeout = 0
link.checkCleanDeadline = 0
fragments := [][]byte{
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 9, 1, 2, 3},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 8, 4, 5, 6},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 7, 8, 9},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 6, 10, 11, 12},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 5, 13, 14, 15},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 4, 16, 17, 18},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 3, 19, 20, 21},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 2, 22, 23, 24},
- []byte{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1, 25, 26, 27},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 9, 1, 2, 3},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 8, 4, 5, 6},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 7, 8, 9},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 6, 10, 11, 12},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 5, 13, 14, 15},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 4, 16, 17, 18},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 3, 19, 20, 21},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 2, 22, 23, 24},
+ {0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1, 25, 26, 27},
}
expected = []byte{68, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}
diff --git a/proto/dist/resolver.go b/proto/dist/resolver.go
new file mode 100644
index 00000000..63338406
--- /dev/null
+++ b/proto/dist/resolver.go
@@ -0,0 +1,325 @@
+package dist
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ergo-services/ergo/lib"
+ "github.com/ergo-services/ergo/node"
+)
+
+const (
+ DefaultEPMDPort uint16 = 4369
+
+ epmdAliveReq = 120
+ epmdAliveResp = 121
+ epmdAliveRespX = 118
+ epmdPortPleaseReq = 122
+ epmdPortResp = 119
+ epmdNamesReq = 110
+
+ // wont be implemented
+ // epmdDumpReq = 100
+ // epmdKillReq = 107
+ // epmdStopReq = 115
+
+ // Extra data
+ ergoExtraMagic = 4411
+ ergoExtraVersion1 = 1
+ ergoExtraEnabledTLS = 100
+ ergoExtraEnabledProxy = 101
+)
+
+// epmd implements resolver
+type epmdResolver struct {
+ node.Resolver
+
+ ctx context.Context
+
+ // EPMD server
+ enableEPMD bool
+ host string
+ port uint16
+
+ // Node
+ name string
+ nodePort uint16
+ nodeName string
+ nodeHost string
+ handshakeVersion node.HandshakeVersion
+
+ extra []byte
+}
+
+func CreateResolver(ctx context.Context) node.Resolver {
+ resolver := &epmdResolver{
+ ctx: ctx,
+ port: DefaultEPMDPort,
+ }
+ return resolver
+}
+
+func CreateResolverWithEPMD(ctx context.Context, host string, port uint16) node.Resolver {
+ if port == 0 {
+ port = DefaultEPMDPort
+ }
+ resolver := &epmdResolver{
+ ctx: ctx,
+ enableEPMD: true,
+ host: host,
+ port: port,
+ }
+ startServerEPMD(ctx, host, port)
+ return resolver
+}
+
+func (e *epmdResolver) Register(name string, port uint16, options node.ResolverOptions) error {
+ n := strings.Split(name, "@")
+ if len(n) != 2 {
+ return fmt.Errorf("(EMPD) FQDN for node name is required (example: node@hostname)")
+ }
+
+ e.name = name
+ e.nodeName = n[0]
+ e.nodeHost = n[1]
+ e.nodePort = port
+ e.handshakeVersion = options.HandshakeVersion
+
+ e.composeExtra(options)
+
+ conn, err := e.registerNode(options)
+ if err != nil {
+ return err
+ }
+ go func() {
+ buf := make([]byte, 1024)
+ for {
+ _, err := conn.Read(buf)
+ if err == nil {
+ continue
+ }
+ lib.Log("[%s] EPMD client: closing connection", name)
+
+ // reconnect to the EPMD server
+ for {
+ if e.ctx.Err() != nil {
+ // node is stopped
+ return
+ }
+
+ // try to start embedded EPMD server
+ if e.enableEPMD {
+ startServerEPMD(e.ctx, e.host, e.port)
+ }
+
+ if c, err := e.registerNode(options); err != nil {
+ lib.Log("EPMD client: can't register node %q (%s). Retry in 3 seconds...", name, err)
+ time.Sleep(3 * time.Second)
+ } else {
+ conn = c
+ break
+ }
+ }
+ }
+ }()
+
+ go func() {
+ <-e.ctx.Done()
+ conn.Close()
+ }()
+
+ return nil
+}
+
+func (e *epmdResolver) Resolve(name string) (node.Route, error) {
+ var route node.Route
+
+ n := strings.Split(name, "@")
+ if len(n) != 2 {
+ return node.Route{}, fmt.Errorf("incorrect FQDN node name (example: node@localhost)")
+ }
+ conn, err := net.Dial("tcp", net.JoinHostPort(n[1], fmt.Sprintf("%d", e.port)))
+ if err != nil {
+ return node.Route{}, err
+ }
+
+ defer conn.Close()
+
+ if err := e.sendPortPleaseReq(conn, n[0]); err != nil {
+ return node.Route{}, err
+ }
+
+ route.Name = n[0]
+ route.Host = n[1]
+
+ err = e.readPortResp(&route, conn)
+ if err != nil {
+ return node.Route{}, err
+ }
+
+ return route, nil
+
+}
+
+func (e *epmdResolver) composeExtra(options node.ResolverOptions) {
+ buf := make([]byte, 5)
+
+ // 2 bytes: ergoExtraMagic
+ binary.BigEndian.PutUint16(buf[0:2], uint16(ergoExtraMagic))
+ // 1 byte Extra version
+ buf[3] = ergoExtraVersion1
+ // 1 byte flag enabled TLS
+ if options.EnabledTLS {
+ buf[4] = 1
+ }
+ // 1 byte flag enabled proxy
+ if options.EnabledProxy {
+ buf[5] = 1
+ }
+ e.extra = buf
+ return
+}
+
+func (e *epmdResolver) readExtra(route *node.Route, buf []byte) {
+ if len(buf) < 5 {
+ return
+ }
+ extraLen := int(binary.BigEndian.Uint16(buf[0:2]))
+ if extraLen < len(buf)+2 {
+ return
+ }
+ magic := binary.BigEndian.Uint16(buf[2:4])
+ if uint16(ergoExtraMagic) != magic {
+ return
+ }
+
+ if buf[4] != ergoExtraVersion1 {
+ return
+ }
+
+ if buf[5] == 1 {
+ route.EnabledTLS = true
+ }
+
+ if buf[6] == 1 {
+ route.EnabledProxy = true
+ }
+
+ route.IsErgo = true
+
+ return
+}
+
+func (e *epmdResolver) registerNode(options node.ResolverOptions) (net.Conn, error) {
+ //
+ resolverHost := e.host
+ if resolverHost == "" {
+ resolverHost = e.nodeHost
+ }
+ dsn := net.JoinHostPort(resolverHost, strconv.Itoa(int(e.port)))
+ conn, err := net.Dial("tcp", dsn)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := e.sendAliveReq(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if err := e.readAliveResp(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ lib.Log("[%s] EPMD client: node registered", e.name)
+ return conn, nil
+}
+
+func (e *epmdResolver) sendAliveReq(conn net.Conn) error {
+ buf := make([]byte, 2+14+len(e.nodeName)+len(e.extra))
+ binary.BigEndian.PutUint16(buf[0:2], uint16(len(buf)-2))
+ buf[2] = byte(epmdAliveReq)
+ binary.BigEndian.PutUint16(buf[3:5], e.nodePort)
+ // http://erlang.org/doc/reference_manual/distributed.html (section 13.5)
+ // 77 — regular public node, 72 — hidden
+ // We use a regular one
+ buf[5] = 77
+ // Protocol TCP
+ buf[6] = 0
+ // HighestVersion
+ binary.BigEndian.PutUint16(buf[7:9], uint16(HandshakeVersion6))
+ // LowestVersion
+ binary.BigEndian.PutUint16(buf[9:11], uint16(HandshakeVersion5))
+ // length Node name
+ l := len(e.nodeName)
+ binary.BigEndian.PutUint16(buf[11:13], uint16(l))
+ // Node name
+ offset := (13 + l)
+ copy(buf[13:offset], e.nodeName)
+ // Extra data
+ l = len(e.extra)
+ binary.BigEndian.PutUint16(buf[offset:offset+2], uint16(l))
+ copy(buf[offset+2:offset+2+l], e.extra)
+ // Send
+ if _, err := conn.Write(buf); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (e *epmdResolver) readAliveResp(conn net.Conn) error {
+ buf := make([]byte, 16)
+ if _, err := conn.Read(buf); err != nil {
+ return err
+ }
+ switch buf[0] {
+ case epmdAliveResp, epmdAliveRespX:
+ default:
+ return fmt.Errorf("Malformed EPMD response %v", buf)
+ }
+ if buf[1] != 0 {
+ return fmt.Errorf("Can't register %q. Code: %v", e.nodeName, buf[1])
+ }
+ return nil
+}
+
+func (e *epmdResolver) sendPortPleaseReq(conn net.Conn, name string) error {
+ buflen := uint16(2 + len(name) + 1)
+ buf := make([]byte, buflen)
+ binary.BigEndian.PutUint16(buf[0:2], uint16(len(buf)-2))
+ buf[2] = byte(epmdPortPleaseReq)
+ copy(buf[3:buflen], name)
+ _, err := conn.Write(buf)
+ return err
+}
+
+func (e *epmdResolver) readPortResp(route *node.Route, c net.Conn) error {
+
+ buf := make([]byte, 1024)
+ _, err := c.Read(buf)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("reading from link - %s", err)
+ }
+
+ if buf[0] == epmdPortResp && buf[1] == 0 {
+ p := binary.BigEndian.Uint16(buf[2:4])
+ // we don't use all the extra info for a while. FIXME (do we need it?)
+ nameLen := binary.BigEndian.Uint16(buf[10:12])
+ route.Port = p
+ extraStart := 12 + int(nameLen)
+
+ e.readExtra(route, buf[extraStart:])
+ return nil
+ } else if buf[1] > 0 {
+ return fmt.Errorf("desired node not found")
+ } else {
+ return fmt.Errorf("malformed reply - %#v", buf)
+ }
+}
diff --git a/proto/dist/types.go b/proto/dist/types.go
new file mode 100644
index 00000000..7fed7de0
--- /dev/null
+++ b/proto/dist/types.go
@@ -0,0 +1,39 @@
+package dist
+
+// Distributed operations codes (http://www.erlang.org/doc/apps/erts/erl_dist_protocol.html)
+const (
+ distProtoLINK = 1
+ distProtoSEND = 2
+ distProtoEXIT = 3
+ distProtoUNLINK = 4
+ distProtoNODE_LINK = 5
+ distProtoREG_SEND = 6
+ distProtoGROUP_LEADER = 7
+ distProtoEXIT2 = 8
+ distProtoSEND_TT = 12
+ distProtoEXIT_TT = 13
+ distProtoREG_SEND_TT = 16
+ distProtoEXIT2_TT = 18
+ distProtoMONITOR = 19
+ distProtoDEMONITOR = 20
+ distProtoMONITOR_EXIT = 21
+ distProtoSEND_SENDER = 22
+ distProtoSEND_SENDER_TT = 23
+ distProtoPAYLOAD_EXIT = 24
+ distProtoPAYLOAD_EXIT_TT = 25
+ distProtoPAYLOAD_EXIT2 = 26
+ distProtoPAYLOAD_EXIT2_TT = 27
+ distProtoPAYLOAD_MONITOR_P_EXIT = 28
+ distProtoSPAWN_REQUEST = 29
+ distProtoSPAWN_REQUEST_TT = 30
+ distProtoSPAWN_REPLY = 31
+ distProtoSPAWN_REPLY_TT = 32
+ distProtoALIAS_SEND = 33
+ distProtoALIAS_SEND_TT = 34
+ distProtoUNLINK_ID = 35
+ distProtoUNLINK_ID_ACK = 36
+
+ // ergo operations codes
+ distProtoPROXY = 1001
+ distProtoREG_PROXY = 1002
+)
diff --git a/tests/application_test.go b/tests/application_test.go
index e2f96584..a3d5d814 100644
--- a/tests/application_test.go
+++ b/tests/application_test.go
@@ -27,12 +27,12 @@ func (a *testApplication) Load(args ...etf.Term) (gen.ApplicationSpec, error) {
Name: name,
Description: "My Test Applicatoin",
Version: "v.0.1",
- Environment: map[string]interface{}{
+ Environment: map[gen.EnvKey]interface{}{
"envName1": 123,
"envName2": "Hello world",
},
Children: []gen.ApplicationChildSpec{
- gen.ApplicationChildSpec{
+ {
Child: &testAppGenServer{},
Name: nameGS,
},
@@ -262,7 +262,7 @@ func TestApplicationBasics(t *testing.T) {
tLifeSpan := time.Since(tStart)
fmt.Printf("... application should be self stopped in 150ms: ")
- if mynode.IsProcessAlive(p) {
+ if p.IsAlive() {
t.Fatal("still alive")
}
diff --git a/tests/registrar_test.go b/tests/core_test.go
similarity index 87%
rename from tests/registrar_test.go
rename to tests/core_test.go
index af96a74d..a22da074 100644
--- a/tests/registrar_test.go
+++ b/tests/core_test.go
@@ -11,16 +11,16 @@ import (
"github.com/ergo-services/ergo/node"
)
-type TestRegistrarGenserver struct {
+type TestCoreGenserver struct {
gen.Server
}
-func (trg *TestRegistrarGenserver) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
- // fmt.Printf("TestRegistrarGenserver ({%s, %s}): HandleCall: %#v, From: %#v\n", trg.process.name, trg.process.Node.Name(), message, from)
+func (trg *TestCoreGenserver) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) {
+ // fmt.Printf("TestCoreGenserver ({%s, %s}): HandleCall: %#v, From: %#v\n", trg.process.name, trg.process.Node.Name(), message, from)
return message, gen.ServerStatusOK
}
-func (trg *TestRegistrarGenserver) HandleDirect(process *gen.ServerProcess, message interface{}) (interface{}, error) {
+func (trg *TestCoreGenserver) HandleDirect(process *gen.ServerProcess, message interface{}) (interface{}, error) {
switch m := message.(type) {
case makeCall:
return process.Call(m.to, m.message)
@@ -28,7 +28,7 @@ func (trg *TestRegistrarGenserver) HandleDirect(process *gen.ServerProcess, mess
return nil, gen.ErrUnsupportedRequest
}
-func TestRegistrar(t *testing.T) {
+func TestCore(t *testing.T) {
fmt.Printf("\n=== Test Registrar\n")
fmt.Printf("Starting nodes: nodeR1@localhost, nodeR2@localhost: ")
node1, _ := ergo.StartNode("nodeR1@localhost", "cookies", node.Options{})
@@ -39,8 +39,8 @@ func TestRegistrar(t *testing.T) {
fmt.Println("OK")
}
- gs := &TestRegistrarGenserver{}
- fmt.Printf("Starting TestRegistrarGenserver. registering as 'gs1' on %s and create an alias: ", node1.Name())
+ gs := &TestCoreGenserver{}
+ fmt.Printf("Starting TestCoreGenserver. registering as 'gs1' on %s and create an alias: ", node1.Name())
node1gs1, err := node1.Spawn("gs1", gen.ProcessOptions{}, gs, nil)
if err != nil {
t.Fatal(err)
@@ -100,7 +100,7 @@ func TestRegistrar(t *testing.T) {
}
fmt.Println("OK")
- fmt.Printf("Starting TestRegistrarGenserver and registering as 'gs2' on %s: ", node1.Name())
+ fmt.Printf("Starting TestCoreGenserver and registering as 'gs2' on %s: ", node1.Name())
node1gs2, err := node1.Spawn("gs2", gen.ProcessOptions{}, gs, nil)
if err != nil {
t.Fatal(err)
@@ -121,7 +121,7 @@ func TestRegistrar(t *testing.T) {
fmt.Println("OK")
}
-func TestRegistrarAlias(t *testing.T) {
+func TestCoreAlias(t *testing.T) {
fmt.Printf("\n=== Test Registrar Alias\n")
fmt.Printf("Starting node: nodeR1Alias@localhost: ")
node1, _ := ergo.StartNode("nodeR1Alias@localhost", "cookies", node.Options{})
@@ -132,7 +132,7 @@ func TestRegistrarAlias(t *testing.T) {
fmt.Println("OK")
}
- gs := &TestRegistrarGenserver{}
+ gs := &TestCoreGenserver{}
fmt.Printf(" Starting gs1 and gs2 GenServers on %s: ", node1.Name())
node1gs1, err := node1.Spawn("gs1", gen.ProcessOptions{}, gs, nil)
if err != nil {
diff --git a/tests/monitor_test.go b/tests/monitor_test.go
index 85061feb..a3ba3db9 100644
--- a/tests/monitor_test.go
+++ b/tests/monitor_test.go
@@ -64,7 +64,7 @@ func TestMonitorLocalLocal(t *testing.T) {
waitForResultWithValue(t, gs2.v, node1gs2.Self())
// by Pid
- fmt.Printf("... by Pid Local-Local: gs1 -> gs2. demonitor: ")
+ fmt.Printf("... by Pid Local-Local: gs1 -> gs2. monitor/demonitor: ")
ref := node1gs1.MonitorProcess(node1gs2.Self())
if !node1gs2.IsMonitor(ref) {
@@ -76,7 +76,7 @@ func TestMonitorLocalLocal(t *testing.T) {
}
fmt.Println("OK")
- fmt.Printf("... by Pid Local-Local: gs1 -> gs2. terminate: ")
+ fmt.Printf("... by Pid Local-Local: gs1 -> gs2. monitor/terminate: ")
ref = node1gs1.MonitorProcess(node1gs2.Self())
node1gs2.Exit("normal")
result := gen.MessageDown{
@@ -90,7 +90,7 @@ func TestMonitorLocalLocal(t *testing.T) {
t.Fatal(err)
}
- fmt.Print("... by Pid Local-Local: gs1 -> unknownPid: ")
+ fmt.Print("... by Pid Local-Local: gs1 -> monitor unknownPid: ")
ref = node1gs1.MonitorProcess(node1gs2.Self())
result = gen.MessageDown{
Ref: ref,
@@ -103,7 +103,7 @@ func TestMonitorLocalLocal(t *testing.T) {
node1gs2, _ = node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil)
waitForResultWithValue(t, gs2.v, node1gs2.Self())
// by Name
- fmt.Printf("... by Name Local-Local: gs1 -> gs2. demonitor: ")
+ fmt.Printf("... by Name Local-Local: gs1 -> gs2. monitor/demonitor: ")
ref = node1gs1.MonitorProcess("gs2")
if err := checkCleanProcessRef(node1gs1, ref); err == nil {
t.Fatal("monitor reference has been lost")
@@ -114,7 +114,7 @@ func TestMonitorLocalLocal(t *testing.T) {
}
fmt.Println("OK")
- fmt.Printf("... by Name Local-Local: gs1 -> gs2. terminate: ")
+ fmt.Printf("... by Name Local-Local: gs1 -> gs2. monitor/terminate: ")
ref = node1gs1.MonitorProcess("gs2")
node1gs2.Exit("normal")
result = gen.MessageDown{
@@ -126,7 +126,7 @@ func TestMonitorLocalLocal(t *testing.T) {
if err := checkCleanProcessRef(node1gs1, ref); err != nil {
t.Fatal(err)
}
- fmt.Print("... by Name Local-Local: gs1 -> unknownPid: ")
+ fmt.Print("... by Name Local-Local: gs1 -> monitor unknown name: ")
ref = node1gs1.MonitorProcess("asdfasdf")
result = gen.MessageDown{
Ref: ref,
@@ -216,14 +216,14 @@ func TestMonitorLocalRemoteByPid(t *testing.T) {
waitForResultWithValue(t, gs2.v, node2gs2.Self())
// by Pid
- fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. demonitor: ")
+ fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor/demonitor: ")
ref := node1gs1.MonitorProcess(node2gs2.Self())
// wait a bit for the MessageDown if something went wrong
waitForTimeout(t, gs1.v)
- if err := checkCleanProcessRef(node1gs1, ref); err == nil {
+ if node1gs1.IsMonitor(ref) == false {
t.Fatal("monitor reference has been lost on node 1")
}
- if err := checkCleanProcessRef(node2gs2, ref); err == nil {
+ if node2gs2.IsMonitor(ref) == false {
t.Fatal("monitor reference has been lost on node 2")
}
if found := node1gs1.DemonitorProcess(ref); found == false {
@@ -240,7 +240,7 @@ func TestMonitorLocalRemoteByPid(t *testing.T) {
}
fmt.Println("OK")
- fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. terminate: ")
+ fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor/terminate: ")
ref = node1gs1.MonitorProcess(node2gs2.Self())
// wait a bit for the MessageDown if something went wrong
waitForTimeout(t, gs1.v)
@@ -259,7 +259,7 @@ func TestMonitorLocalRemoteByPid(t *testing.T) {
t.Fatal(err)
}
- fmt.Printf("... by Pid Local-Remote: gs1 -> unknownPid: ")
+ fmt.Printf("... by Pid Local-Remote: gs1 -> monitor unknownPid: ")
ref = node1gs1.MonitorProcess(node2gs2.Self())
result = gen.MessageDown{
Ref: ref,
@@ -275,10 +275,11 @@ func TestMonitorLocalRemoteByPid(t *testing.T) {
node2gs2, _ = node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil)
waitForResultWithValue(t, gs2.v, node2gs2.Self())
- fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. onNodeDown: ")
+ fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor/NodeDown: ")
ref = node1gs1.MonitorProcess(node2gs2.Self())
// wait a bit for the MessageDown if something went wrong
waitForTimeout(t, gs1.v)
+ node1.Disconnect(node2.Name())
node2.Stop()
result = gen.MessageDown{
Ref: ref,
@@ -290,7 +291,7 @@ func TestMonitorLocalRemoteByPid(t *testing.T) {
t.Fatal(err)
}
- fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. UnknownNode: ")
+ fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor unknown node: ")
ref = node1gs1.MonitorProcess(node2gs2.Self())
result.Ref = ref
waitForResultWithValue(t, gs1.v, result)
@@ -329,7 +330,7 @@ func TestMonitorLocalRemoteByName(t *testing.T) {
processID := gen.ProcessID{Name: "gs2", Node: node2.Name()}
- fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. demonitor: ")
+ fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor/demonitor: ")
ref := node1gs1.MonitorProcess(processID)
// wait a bit for the MessageDown if something went wrong
waitForTimeout(t, gs1.v)
@@ -353,7 +354,7 @@ func TestMonitorLocalRemoteByName(t *testing.T) {
}
fmt.Println("OK")
- fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. terminate: ")
+ fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor/terminate: ")
ref = node1gs1.MonitorProcess(processID)
// wait a bit for the MessageDown if something went wrong
waitForTimeout(t, gs1.v)
@@ -372,7 +373,7 @@ func TestMonitorLocalRemoteByName(t *testing.T) {
t.Fatal("monitor ref is still alive")
}
- fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> unknownPid: ")
+ fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> monitor unknown remote name: ")
ref = node1gs1.MonitorProcess(processID)
result = gen.MessageDown{
Ref: ref,
@@ -388,22 +389,21 @@ func TestMonitorLocalRemoteByName(t *testing.T) {
node2gs2, _ = node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil)
waitForResultWithValue(t, gs2.v, node2gs2.Self())
- fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. onNodeDown: ")
+ fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor/onNodeDown: ")
ref = node1gs1.MonitorProcess(processID)
+ node1.Disconnect(node2.Name())
+ node2.Stop()
result = gen.MessageDown{
Ref: ref,
ProcessID: processID,
Reason: "noconnection",
}
- // wait a bit for the MessageDown if something went wrong
- waitForTimeout(t, gs1.v)
- node2.Stop()
waitForResultWithValue(t, gs1.v, result)
if node1gs1.IsMonitor(ref) {
t.Fatal("monitor ref is still alive")
}
- fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. UnknownNode: ")
+ fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor unknown node: ")
ref = node1gs1.MonitorProcess(processID)
result.Ref = ref
waitForResultWithValue(t, gs1.v, result)
@@ -750,7 +750,7 @@ func TestLinkLocalRemote(t *testing.T) {
// helpers
func checkCleanProcessRef(p gen.Process, ref etf.Ref) error {
if p.IsMonitor(ref) {
- return fmt.Errorf("monitor process reference hasnt clean correctly")
+ return fmt.Errorf("monitor process reference hasn't been cleaned correctly")
}
return nil
@@ -759,7 +759,7 @@ func checkCleanProcessRef(p gen.Process, ref etf.Ref) error {
func checkCleanLinkPid(p gen.Process, pid etf.Pid) error {
for _, l := range p.Links() {
if l == pid {
- return fmt.Errorf("process link reference hasnt cleaned correctly")
+ return fmt.Errorf("process link reference hasn't been cleaned correctly")
}
}
return nil
diff --git a/tests/node_test.go b/tests/node_test.go
index 3c368fda..cc6f6ca0 100644
--- a/tests/node_test.go
+++ b/tests/node_test.go
@@ -1,6 +1,7 @@
package tests
import (
+ "context"
"crypto/md5"
"fmt"
"math/rand"
@@ -14,7 +15,7 @@ import (
"github.com/ergo-services/ergo/etf"
"github.com/ergo-services/ergo/gen"
"github.com/ergo-services/ergo/node"
- "github.com/ergo-services/ergo/node/dist"
+ "github.com/ergo-services/ergo/proto/dist"
)
type benchCase struct {
@@ -23,13 +24,13 @@ type benchCase struct {
}
func TestNode(t *testing.T) {
+ ctx := context.Background()
opts := node.Options{
- ListenRangeBegin: 25001,
- ListenRangeEnd: 25001,
- EPMDPort: 24999,
+ Listen: 25001,
+ Resolver: dist.CreateResolverWithEPMD(ctx, "", 24999),
}
- node1, _ := ergo.StartNode("node@localhost", "cookies", opts)
+ node1, _ := ergo.StartNodeWithContext(ctx, "node@localhost", "cookies", opts)
if conn, err := net.Dial("tcp", ":25001"); err != nil {
fmt.Println("Connect to the node' listening port FAILED")
@@ -53,8 +54,8 @@ func TestNode(t *testing.T) {
t.Fatal(e)
}
- if !node1.IsProcessAlive(p) {
- t.Fatal("IsProcessAlive: expect 'true', but got 'false'")
+ if !p.IsAlive() {
+ t.Fatal("IsAlive: expect 'true', but got 'false'")
}
_, ee := node1.ProcessInfo(p.Self())
@@ -194,28 +195,41 @@ func TestNodeAtomCache(t *testing.T) {
}
func TestNodeStaticRoute(t *testing.T) {
- nodeName := "nodeT1StaticRoute@localhost"
- nodeStaticPort := 9876
+ nodeName1 := "nodeT1StaticRoute@localhost"
+ nodeName2 := "nodeT2StaticRoute@localhost"
+ nodeStaticPort := uint16(9876)
- node1, _ := ergo.StartNode(nodeName, "secret", node.Options{})
- nr, err := node1.Resolve(nodeName)
+ node1, e1 := ergo.StartNode(nodeName1, "secret", node.Options{})
+ if e1 != nil {
+ t.Fatal(e1)
+ }
+ defer node1.Stop()
+
+ node2, e2 := ergo.StartNode(nodeName2, "secret", node.Options{})
+ if e2 != nil {
+ t.Fatal(e2)
+ }
+ defer node2.Stop()
+
+ nr, err := node1.Resolve(nodeName2)
if err != nil {
- t.Fatal("Can't resolve port number for ", nodeName)
+ t.Fatal("Can't resolve port number for ", nodeName2)
}
- e := node1.AddStaticRoute(nodeName, uint16(nodeStaticPort))
+ // override route for nodeName2 with static port
+ e := node1.AddStaticRoute(nodeName2, nodeStaticPort, node.RouteOptions{})
if e != nil {
t.Fatal(e)
}
// should be overrided by the new value of nodeStaticPort
- if nr, err := node1.Resolve(nodeName); err != nil || nr.Port != nodeStaticPort {
- t.Fatal("Wrong port number after adding static route. Got", nr.Port, "Expected", nodeStaticPort)
+ if r, err := node1.Resolve(nodeName2); err != nil || r.Port != nodeStaticPort {
+ t.Fatal("Wrong port number after adding static route. Got", r.Port, "Expected", nodeStaticPort)
}
- node1.RemoveStaticRoute(nodeName)
+ node1.RemoveStaticRoute(nodeName2)
// should be resolved into the original port number
- if nr2, err := node1.Resolve(nodeName); err != nil || nr.Port != nr2.Port {
+ if nr2, err := node1.Resolve(nodeName2); err != nil || nr.Port != nr2.Port {
t.Fatal("Wrong port number after removing static route")
}
}
@@ -241,21 +255,20 @@ func (h *handshakeGenServer) HandleDirect(process *gen.ServerProcess, message in
func TestNodeDistHandshake(t *testing.T) {
fmt.Printf("\n=== Test Node Handshake versions\n")
+ cookie := "secret"
- nodeOptions5 := node.Options{
- HandshakeVersion: dist.ProtoHandshake5,
- }
- nodeOptions6 := node.Options{
- HandshakeVersion: dist.ProtoHandshake6,
- }
- nodeOptions5WithTLS := node.Options{
- HandshakeVersion: dist.ProtoHandshake5,
- TLSMode: node.TLSModeAuto,
+ // handshake version 5
+ handshake5options := dist.HandshakeOptions{
+ Cookie: cookie,
+ Version: dist.HandshakeVersion5,
}
- nodeOptions6WithTLS := node.Options{
- HandshakeVersion: dist.ProtoHandshake6,
- TLSMode: node.TLSModeAuto,
+
+ // handshake version 6
+ handshake6options := dist.HandshakeOptions{
+ Cookie: cookie,
+ Version: dist.HandshakeVersion6,
}
+
hgs := &handshakeGenServer{}
type Pair struct {
@@ -263,68 +276,110 @@ func TestNodeDistHandshake(t *testing.T) {
nodeA node.Node
nodeB node.Node
}
- node1, e1 := ergo.StartNode("node1Handshake5@localhost", "secret", nodeOptions5)
+ node1Options5 := node.Options{
+ Handshake: dist.CreateHandshake(handshake5options),
+ }
+ node1, e1 := ergo.StartNode("node1Handshake5@localhost", "secret", node1Options5)
if e1 != nil {
t.Fatal(e1)
}
- node2, e2 := ergo.StartNode("node2Handshake5@localhost", "secret", nodeOptions5)
+ node2Options5 := node.Options{
+ Handshake: dist.CreateHandshake(handshake5options),
+ }
+ node2, e2 := ergo.StartNode("node2Handshake5@localhost", "secret", node2Options5)
if e2 != nil {
t.Fatal(e2)
}
- node3, e3 := ergo.StartNode("node3Handshake5@localhost", "secret", nodeOptions5)
+ node3Options5 := node.Options{
+ Handshake: dist.CreateHandshake(handshake5options),
+ }
+ node3, e3 := ergo.StartNode("node3Handshake5@localhost", "secret", node3Options5)
if e3 != nil {
t.Fatal(e3)
}
- node4, e4 := ergo.StartNode("node4Handshake6@localhost", "secret", nodeOptions6)
+ node4Options6 := node.Options{
+ Handshake: dist.CreateHandshake(handshake6options),
+ }
+ node4, e4 := ergo.StartNode("node4Handshake6@localhost", "secret", node4Options6)
if e4 != nil {
t.Fatal(e4)
}
// node5, _ := ergo.StartNode("node5Handshake6@localhost", "secret", nodeOptions6)
// node6, _ := ergo.StartNode("node6Handshake5@localhost", "secret", nodeOptions5)
- node7, e7 := ergo.StartNode("node7Handshake6@localhost", "secret", nodeOptions6)
+ node7Options6 := node.Options{
+ Handshake: dist.CreateHandshake(handshake6options),
+ }
+ node7, e7 := ergo.StartNode("node7Handshake6@localhost", "secret", node7Options6)
if e7 != nil {
t.Fatal(e7)
}
- node8, e8 := ergo.StartNode("node8Handshake6@localhost", "secret", nodeOptions6)
+ node8Options6 := node.Options{
+ Handshake: dist.CreateHandshake(handshake6options),
+ }
+ node8, e8 := ergo.StartNode("node8Handshake6@localhost", "secret", node8Options6)
if e8 != nil {
t.Fatal(e8)
}
- node9, e9 := ergo.StartNode("node9Handshake5@localhost", "secret", nodeOptions5WithTLS)
+ node9Options5WithTLS := node.Options{
+ Handshake: dist.CreateHandshake(handshake5options),
+ TLS: node.TLS{Enabled: true},
+ }
+ node9, e9 := ergo.StartNode("node9Handshake5@localhost", "secret", node9Options5WithTLS)
if e9 != nil {
t.Fatal(e9)
}
- node10, e10 := ergo.StartNode("node10Handshake5@localhost", "secret", nodeOptions5WithTLS)
+ node10Options5WithTLS := node.Options{
+ Handshake: dist.CreateHandshake(handshake5options),
+ TLS: node.TLS{Enabled: true},
+ }
+ node10, e10 := ergo.StartNode("node10Handshake5@localhost", "secret", node10Options5WithTLS)
if e10 != nil {
t.Fatal(e10)
}
- node11, e11 := ergo.StartNode("node11Handshake5@localhost", "secret", nodeOptions5WithTLS)
+ node11Options5WithTLS := node.Options{
+ Handshake: dist.CreateHandshake(handshake5options),
+ TLS: node.TLS{Enabled: true},
+ }
+ node11, e11 := ergo.StartNode("node11Handshake5@localhost", "secret", node11Options5WithTLS)
if e11 != nil {
t.Fatal(e11)
}
- node12, e12 := ergo.StartNode("node12Handshake6@localhost", "secret", nodeOptions6WithTLS)
+ node12Options6WithTLS := node.Options{
+ Handshake: dist.CreateHandshake(handshake6options),
+ TLS: node.TLS{Enabled: true},
+ }
+ node12, e12 := ergo.StartNode("node12Handshake6@localhost", "secret", node12Options6WithTLS)
if e12 != nil {
t.Fatal(e12)
}
// node13, _ := ergo.StartNode("node13Handshake6@localhost", "secret", nodeOptions6WithTLS)
// node14, _ := ergo.StartNode("node14Handshake5@localhost", "secret", nodeOptions5WithTLS)
- node15, e15 := ergo.StartNode("node15Handshake6@localhost", "secret", nodeOptions6WithTLS)
+ node15Options6WithTLS := node.Options{
+ Handshake: dist.CreateHandshake(handshake6options),
+ TLS: node.TLS{Enabled: true},
+ }
+ node15, e15 := ergo.StartNode("node15Handshake6@localhost", "secret", node15Options6WithTLS)
if e15 != nil {
t.Fatal(e15)
}
- node16, e16 := ergo.StartNode("node16Handshake6@localhost", "secret", nodeOptions6WithTLS)
+ node16Options6WithTLS := node.Options{
+ Handshake: dist.CreateHandshake(handshake6options),
+ TLS: node.TLS{Enabled: true},
+ }
+ node16, e16 := ergo.StartNode("node16Handshake6@localhost", "secret", node16Options6WithTLS)
if e16 != nil {
t.Fatal(e16)
}
nodes := []Pair{
- Pair{"No TLS. version 5 -> version 5", node1, node2},
- Pair{"No TLS. version 5 -> version 6", node3, node4},
+ {"No TLS. version 5 -> version 5", node1, node2},
+ {"No TLS. version 5 -> version 6", node3, node4},
//Pair{ "No TLS. version 6 -> version 5", node5, node6 },
- Pair{"No TLS. version 6 -> version 6", node7, node8},
- Pair{"With TLS. version 5 -> version 5", node9, node10},
- Pair{"With TLS. version 5 -> version 6", node11, node12},
+ {"No TLS. version 6 -> version 6", node7, node8},
+ {"With TLS. version 5 -> version 5", node9, node10},
+ {"With TLS. version 5 -> version 6", node11, node12},
//Pair{ "With TLS. version 6 -> version 5", node13, node14 },
- Pair{"With TLS. version 6 -> version 6", node15, node16},
+ {"With TLS. version 6 -> version 6", node15, node16},
}
defer func(nodes []Pair) {
@@ -339,7 +394,7 @@ func TestNodeDistHandshake(t *testing.T) {
var result etf.Term
for i := range nodes {
pair := nodes[i]
- fmt.Printf(" %s: ", pair.name)
+ fmt.Printf(" %s %s -> %s: ", pair.name, pair.nodeA.Name(), pair.nodeB.Name())
pA, e = pair.nodeA.Spawn("", gen.ProcessOptions{}, hgs)
if e != nil {
t.Fatal(e)
@@ -378,21 +433,34 @@ func TestNodeRemoteSpawn(t *testing.T) {
}
opts := gen.RemoteSpawnOptions{
- RegisterName: "remote",
+ Name: "remote",
}
- fmt.Printf(" process gs1@node1 requests spawn new process on node2 and register this process with name 'remote': ")
- _, err = process.RemoteSpawn(node2.Name(), "remote", opts, 1, 2, 3)
+ fmt.Printf(" process gs1@node1 request to spawn new process on node2 and register this process with name 'remote': ")
+ gotPid, err := process.RemoteSpawn(node2.Name(), "remote", opts, 1, 2, 3)
if err != nil {
t.Fatal(err)
}
+ p := node2.ProcessByName("remote")
+ if p == nil {
+ t.Fatal("can't find process 'remote' on node2")
+ }
+ if gotPid != p.Self() {
+ t.Fatal("process pid mismatch")
+ }
fmt.Println("OK")
- fmt.Printf(" process gs1@node1 requests spawn new process on node2 with the same name (must be failed): ")
+ fmt.Printf(" process gs1@node1 request to spawn new process on node2 with the same name (must be failed): ")
_, err = process.RemoteSpawn(node2.Name(), "remote", opts, 1, 2, 3)
if err != node.ErrTaken {
t.Fatal(err)
}
fmt.Println("OK")
+ fmt.Printf(" process gs1@node1 request to spawn new process on node2 with unregistered behavior name (must be failed): ")
+ _, err = process.RemoteSpawn(node2.Name(), "randomname", opts, 1, 2, 3)
+ if err != node.ErrBehaviorUnknown {
+ t.Fatal(err)
+ }
+ fmt.Println("OK")
}
type benchGS struct {
@@ -414,7 +482,7 @@ func BenchmarkNodeSequential(b *testing.B) {
node1name := fmt.Sprintf("nodeB1_%d@localhost", b.N)
node2name := fmt.Sprintf("nodeB2_%d@localhost", b.N)
- node1, _ := ergo.StartNode(node1name, "bench", node.Options{DisableHeaderAtomCache: false})
+ node1, _ := ergo.StartNode(node1name, "bench", node.Options{})
node2, _ := ergo.StartNode(node2name, "bench", node.Options{})
bgs := &benchGS{}
@@ -457,7 +525,7 @@ func BenchmarkNodeSequential(b *testing.B) {
func BenchmarkNodeSequentialSingleNode(b *testing.B) {
node1name := fmt.Sprintf("nodeB1Local_%d@localhost", b.N)
- node1, _ := ergo.StartNode(node1name, "bench", node.Options{DisableHeaderAtomCache: true})
+ node1, _ := ergo.StartNode(node1name, "bench", node.Options{})
bgs := &benchGS{}
@@ -500,7 +568,7 @@ func BenchmarkNodeParallel(b *testing.B) {
node1name := fmt.Sprintf("nodeB1Parallel_%d@localhost", b.N)
node2name := fmt.Sprintf("nodeB2Parallel_%d@localhost", b.N)
- node1, _ := ergo.StartNode(node1name, "bench", node.Options{DisableHeaderAtomCache: false})
+ node1, _ := ergo.StartNode(node1name, "bench", node.Options{})
node2, _ := ergo.StartNode(node2name, "bench", node.Options{})
bgs := &benchGS{}
@@ -550,7 +618,7 @@ func BenchmarkNodeParallel(b *testing.B) {
func BenchmarkNodeParallelSingleNode(b *testing.B) {
node1name := fmt.Sprintf("nodeB1ParallelLocal_%d@localhost", b.N)
- node1, _ := ergo.StartNode(node1name, "bench", node.Options{DisableHeaderAtomCache: false})
+ node1, _ := ergo.StartNode(node1name, "bench", node.Options{})
bgs := &benchGS{}
@@ -596,15 +664,15 @@ func BenchmarkNodeParallelSingleNode(b *testing.B) {
}
func benchCases() []benchCase {
return []benchCase{
- benchCase{"number", 12345},
- benchCase{"string", "hello world"},
- benchCase{"tuple (PID)",
+ {"number", 12345},
+ {"string", "hello world"},
+ {"tuple (PID)",
etf.Pid{
Node: "node@localhost",
ID: 1000,
Creation: 1,
},
},
- benchCase{"binary 1MB", make([]byte, 1024*1024)},
+ {"binary 1MB", make([]byte, 1024*1024)},
}
}
diff --git a/tests/rpc_test.go b/tests/rpc_test.go
index 143ef9da..12aa709b 100644
--- a/tests/rpc_test.go
+++ b/tests/rpc_test.go
@@ -54,7 +54,7 @@ func TestRPC(t *testing.T) {
return a[len(a)-1]
}
- fmt.Printf("Registering RPC method 'testMod.testFun' on %s: ", node1.NodeName())
+ fmt.Printf("Registering RPC method 'testMod.testFun' on %s: ", node1.Name())
time.Sleep(100 * time.Millisecond) // waiting for start 'rex' gen_server
if e := node1.ProvideRPC("testMod", "testFun", testFun1); e != nil {
t.Fatal(e)
@@ -64,7 +64,7 @@ func TestRPC(t *testing.T) {
node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil)
- fmt.Printf("Call RPC method 'testMod.testFun' with 1 arg on %s: ", node1.NodeName())
+ fmt.Printf("Call RPC method 'testMod.testFun' with 1 arg on %s: ", node1.Name())
case1 := testRPCCase1{
node: "nodeRPC@localhost",
mod: "testMod",
@@ -76,7 +76,7 @@ func TestRPC(t *testing.T) {
}
waitForResultWithValue(t, gs1.res, 12345)
- fmt.Printf("Call RPC method 'testMod.testFun' with 3 arg on %s: ", node1.NodeName())
+ fmt.Printf("Call RPC method 'testMod.testFun' with 3 arg on %s: ", node1.Name())
case1 = testRPCCase1{
node: "nodeRPC@localhost",
mod: "testMod",
@@ -88,14 +88,14 @@ func TestRPC(t *testing.T) {
}
waitForResultWithValue(t, gs1.res, node1gs1.Self())
- fmt.Printf("Revoking RPC method 'testMod.testFun' on %s: ", node1.NodeName())
+ fmt.Printf("Revoking RPC method 'testMod.testFun' on %s: ", node1.Name())
if e := node1.RevokeRPC("testMod", "testFun"); e != nil {
t.Fatal(e)
} else {
fmt.Println("OK")
}
- fmt.Printf("Call revoked RPC method 'testMod.testFun' with 1 arg on %s: ", node1.NodeName())
+ fmt.Printf("Call revoked RPC method 'testMod.testFun' with 1 arg on %s: ", node1.Name())
expected1 := etf.Tuple{etf.Atom("badrpc"),
etf.Tuple{etf.Atom("EXIT"),
etf.Tuple{etf.Atom("undef"),
@@ -115,7 +115,7 @@ func TestRPC(t *testing.T) {
}
waitForResultWithValue(t, gs1.res, expected1)
- fmt.Printf("Call RPC unknown method 'xxx.xxx' on %s: ", node1.NodeName())
+ fmt.Printf("Call RPC unknown method 'xxx.xxx' on %s: ", node1.Name())
expected2 := etf.Tuple{etf.Atom("badrpc"),
etf.Tuple{etf.Atom("EXIT"),
etf.Tuple{etf.Atom("undef"),
diff --git a/tests/server_test.go b/tests/server_test.go
index a6d10591..a978422b 100644
--- a/tests/server_test.go
+++ b/tests/server_test.go
@@ -84,19 +84,19 @@ func TestServer(t *testing.T) {
err: make(chan error, 2),
}
- fmt.Printf(" wait for start of gs1 on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name())
node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil)
waitForResultWithValue(t, gs1.res, nil)
- fmt.Printf(" wait for start of gs2 on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name())
node1gs2, _ := node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil)
waitForResultWithValue(t, gs2.res, nil)
- fmt.Printf(" wait for start of gs3 on %#v: ", node2.NodeName())
+ fmt.Printf(" wait for start of gs3 on %#v: ", node2.Name())
node2gs3, _ := node2.Spawn("gs3", gen.ProcessOptions{}, gs3, nil)
waitForResultWithValue(t, gs3.res, nil)
- fmt.Printf(" wait for start of gsDirect on %#v: ", node2.NodeName())
+ fmt.Printf(" wait for start of gsDirect on %#v: ", node2.Name())
node2gsDirect, _ := node2.Spawn("gsDirect", gen.ProcessOptions{}, gsDirect, nil)
waitForResult(t, gsDirect.err)
@@ -219,7 +219,7 @@ func TestServer(t *testing.T) {
}
fmt.Printf(" process.Send (by Name) local (gs1) -> remote (gs3) : ")
- processName := gen.ProcessID{Name: "gs3", Node: node2.NodeName()}
+ processName := gen.ProcessID{Name: "gs3", Node: node2.Name()}
node1gs1.Send(processName, etf.Atom("hi"))
waitForResultWithValue(t, gs3.res, etf.Atom("hi"))
@@ -319,7 +319,7 @@ func TestServer(t *testing.T) {
}
fmt.Println("OK")
- fmt.Printf("Stopping nodes: %v, %v\n", node1.NodeName(), node2.NodeName())
+ fmt.Printf("Stopping nodes: %v, %v\n", node1.Name(), node2.Name())
node1.Stop()
node2.Stop()
}
@@ -431,21 +431,21 @@ func TestServerMessageOrder(t *testing.T) {
res: make(chan interface{}, 2),
}
- fmt.Printf(" wait for start of gs1order on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs1order on %#v: ", node1.Name())
node1gs1, err1 := node1.Spawn("gs1order", gen.ProcessOptions{}, gs1, nil)
if err1 != nil {
panic(err1)
}
waitForResultWithValue(t, gs1.res, nil)
- fmt.Printf(" wait for start of gs2order on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs2order on %#v: ", node1.Name())
node1gs2, err2 := node1.Spawn("gs2order", gen.ProcessOptions{}, gs2, nil)
if err2 != nil {
panic(err2)
}
waitForResultWithValue(t, gs2.res, nil)
- fmt.Printf(" wait for start of gs3order on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs3order on %#v: ", node1.Name())
node1gs3, err3 := node1.Spawn("gs3order", gen.ProcessOptions{}, gs3, nil)
if err3 != nil {
panic(err3)
@@ -643,27 +643,27 @@ func TestServerMessageFlood(t *testing.T) {
gsdest := &messageFloodDestGS{
res: make(chan interface{}, 2),
}
- fmt.Printf(" wait for start of gs1source on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs1source on %#v: ", node1.Name())
gs1sourceProcess, _ := node1.Spawn("gs1source", gen.ProcessOptions{}, gs1source, nil)
waitForResultWithValue(t, gs1source.res, nil)
- fmt.Printf(" wait for start of gs2source on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs2source on %#v: ", node1.Name())
gs2sourceProcess, _ := node1.Spawn("gs2source", gen.ProcessOptions{}, gs2source, nil)
waitForResultWithValue(t, gs2source.res, nil)
- fmt.Printf(" wait for start of gs3source on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs3source on %#v: ", node1.Name())
gs3sourceProcess, _ := node1.Spawn("gs3source", gen.ProcessOptions{}, gs3source, nil)
waitForResultWithValue(t, gs3source.res, nil)
- fmt.Printf(" wait for start of gs4source on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs4source on %#v: ", node1.Name())
gs4sourceProcess, _ := node1.Spawn("gs4source", gen.ProcessOptions{}, gs4source, nil)
waitForResultWithValue(t, gs4source.res, nil)
- fmt.Printf(" wait for start of gs5source on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gs5source on %#v: ", node1.Name())
gs5sourceProcess, _ := node1.Spawn("gs5source", gen.ProcessOptions{}, gs5source, nil)
waitForResultWithValue(t, gs5source.res, nil)
- fmt.Printf(" wait for start of gsdest on %#v: ", node1.NodeName())
+ fmt.Printf(" wait for start of gsdest on %#v: ", node1.Name())
node1.Spawn("gsdest", gen.ProcessOptions{}, gsdest, nil)
waitForResultWithValue(t, gsdest.res, nil)
diff --git a/tests/stage_test.go b/tests/stage_test.go
index 3291e67a..4249b9d2 100644
--- a/tests/stage_test.go
+++ b/tests/stage_test.go
@@ -571,6 +571,7 @@ func TestStageDistributed(t *testing.T) {
fmt.Printf("... Consumer@node2 handled subscription confirmation from Producer@node1 (StageCancelTemporary): ")
waitForResultWithValue(t, consumer.value, sub3)
+ node2.Disconnect(node1.Name())
node1.Stop()
fmt.Printf("... Stopping node1: ")
if err := node1.WaitWithTimeout(1000 * time.Millisecond); err != nil {
diff --git a/tests/supervisor_ofa_test.go b/tests/supervisor_ofa_test.go
index 78e0e250..7dee39cb 100644
--- a/tests/supervisor_ofa_test.go
+++ b/tests/supervisor_ofa_test.go
@@ -91,17 +91,17 @@ func TestSupervisorOneForAll(t *testing.T) {
// testing permanent
testCases := []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"new", "new", "new"},
events: 6, // waiting for 3 terminating and 3 starting
},
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"new", "new", "new"},
events: 6,
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"new", "new", "new"},
events: 6,
@@ -155,17 +155,17 @@ func TestSupervisorOneForAll(t *testing.T) {
// testing transient
testCases = []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"empty", "new", "new"},
events: 5, // waiting for 3 terminates and 2 starts
},
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"empty", "new", "new"},
events: 4, // waiting for 2 terminates and 2 starts
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"empty", "new", "empty"},
events: 3, // waiting for 2 terminates and 1 start
@@ -211,17 +211,17 @@ func TestSupervisorOneForAll(t *testing.T) {
// restart strategy is rest_for_one or one_for_all and a sibling's death
// causes the temporary process to be terminated).
testCases = []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"empty", "empty", "empty"},
events: 3, // waiting for 3 terminates
},
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"empty", "empty", "empty"},
events: 3, // waiting for 3 terminates
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"empty", "empty", "empty"},
events: 3, // waiting for 3 terminate
@@ -279,17 +279,17 @@ func (ts *testSupervisorOneForAll) Init(args ...etf.Term) (gen.SupervisorSpec, e
ch := args[1].(chan interface{})
return gen.SupervisorSpec{
Children: []gen.SupervisorChildSpec{
- gen.SupervisorChildSpec{
+ {
Name: "testGS1",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 0},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS2",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 1},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS3",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 2},
diff --git a/tests/supervisor_ofo_test.go b/tests/supervisor_ofo_test.go
index 4f2d8d71..32dace72 100644
--- a/tests/supervisor_ofo_test.go
+++ b/tests/supervisor_ofo_test.go
@@ -264,17 +264,17 @@ func (ts *testSupervisorOneForOne) Init(args ...etf.Term) (gen.SupervisorSpec, e
ch := args[1].(chan interface{})
return gen.SupervisorSpec{
Children: []gen.SupervisorChildSpec{
- gen.SupervisorChildSpec{
+ {
Name: "testGS1",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 0},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS2",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 1},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS3",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 2},
diff --git a/tests/supervisor_rfo_test.go b/tests/supervisor_rfo_test.go
index f5603871..2a7bde95 100644
--- a/tests/supervisor_rfo_test.go
+++ b/tests/supervisor_rfo_test.go
@@ -89,17 +89,17 @@ func TestSupervisorRestForOne(t *testing.T) {
// testing permanent
testCases := []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"new", "new", "new"},
events: 6, // waiting for 3 terminates and 3 starts
},
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"old", "new", "new"},
events: 4, // waiting for 2 terminates and 2 starts
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"old", "old", "new"},
events: 2, // waiting for 1 terminates and 1 starts
@@ -158,17 +158,17 @@ func TestSupervisorRestForOne(t *testing.T) {
// testing transient
testCases = []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"new", "new", "new"},
events: 6, // waiting for 3 terminates and 3 starts
},
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"old", "empty", "new"},
events: 3, // waiting for 2 terminates and 1 starts
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"old", "empty", "empty"},
events: 1, // waiting for 1 terminates
@@ -214,17 +214,17 @@ func TestSupervisorRestForOne(t *testing.T) {
// restart strategy is rest_for_one or one_for_all and a sibling's death
// causes the temporary process to be terminated).
testCases = []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"empty", "empty", "empty"},
events: 3, // waiting for 3 terminates
},
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"old", "empty", "empty"},
events: 2, // waiting for 2 terminates
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"old", "old", "empty"},
events: 1, // waiting for 1 terminate
@@ -284,17 +284,17 @@ func (ts *testSupervisorRestForOne) Init(args ...etf.Term) (gen.SupervisorSpec,
ch := args[1].(chan interface{})
return gen.SupervisorSpec{
Children: []gen.SupervisorChildSpec{
- gen.SupervisorChildSpec{
+ {
Name: "testGS1",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 0},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS2",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 1},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS3",
Child: &testSupervisorGenServer{},
Args: []etf.Term{ch, 2},
diff --git a/tests/supervisor_sofo_test.go b/tests/supervisor_sofo_test.go
index 14514414..3ffafea8 100644
--- a/tests/supervisor_sofo_test.go
+++ b/tests/supervisor_sofo_test.go
@@ -38,17 +38,17 @@ func TestSupervisorSimpleOneForOne(t *testing.T) {
// ===================================================================================================
// test SupervisorStrategyRestartPermanent
testCases := []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"new", "new", "new", "new", "new", "new"},
events: 12, // waiting for 6 terminates and 6 restarts
},
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"new", "new", "new", "new", "new", "new"},
events: 12, // waiting for 6 terminates and 6 restarts
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"new", "new", "new", "new", "new", "new"},
events: 12, // waiting for 6 terminates and 6 restarts
@@ -146,17 +146,17 @@ func TestSupervisorSimpleOneForOne(t *testing.T) {
// ===================================================================================================
// test SupervisorStrategyRestartTransient
testCases = []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"new", "new", "new", "new", "new", "new"},
events: 12, // waiting for 6 terminates and 6 restarts
},
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"},
events: 6, // waiting for 6 terminates
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"},
events: 6, // waiting for 6 terminates
@@ -254,17 +254,17 @@ func TestSupervisorSimpleOneForOne(t *testing.T) {
// ===================================================================================================
// test SupervisorStrategyRestartTemporary
testCases = []ChildrenTestCase{
- ChildrenTestCase{
+ {
reason: "abnormal",
statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"},
events: 6, // waiting for 6 terminates
},
- ChildrenTestCase{
+ {
reason: "normal",
statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"},
events: 6, // waiting for 6 terminates
},
- ChildrenTestCase{
+ {
reason: "shutdown",
statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"},
events: 6, // waiting for 6 terminates
@@ -364,15 +364,15 @@ func (ts *testSupervisorSimpleOneForOne) Init(args ...etf.Term) (gen.SupervisorS
restart := args[0].(string)
return gen.SupervisorSpec{
Children: []gen.SupervisorChildSpec{
- gen.SupervisorChildSpec{
+ {
Name: "testGS1",
Child: &testSupervisorGenServer{},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS2",
Child: &testSupervisorGenServer{},
},
- gen.SupervisorChildSpec{
+ {
Name: "testGS3",
Child: &testSupervisorGenServer{},
},
diff --git a/version.go b/version.go
index bafd4690..804b271b 100644
--- a/version.go
+++ b/version.go
@@ -1,7 +1,7 @@
package ergo
const (
- Version = "2.0.0"
- VersionPrefix = "ergo"
- VersionOTP int = 24
+ Version = "2.0.0" // Ergo Framework version
+ VersionPrefix = "ergo" // Prefix using for the full version name
+ VersionOTP int = 24 // Erlang version support
)