diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index 9d6e3de18..92da9d1f0 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -19,7 +19,9 @@ package abi
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
+ "github.com/core-coin/go-core/crypto"
"io"
"github.com/core-coin/go-core/common"
@@ -32,6 +34,12 @@ type ABI struct {
Constructor Method
Methods map[string]Method
Events map[string]Event
+
+ // Additional "special" functions introduced in ylem v0.6.0.
+ // It's separated from the original default fallback. Each contract
+ // can only define one fallback and receive function.
+ Fallback Method // Note it's also used to represent legacy fallback before v0.6.0
+ Receive Method
}
// JSON returns a parsed ABI interface and error if it failed.
@@ -70,7 +78,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
return nil, err
}
// Pack up the method ID too if not a constructor and return
- return append(method.ID(), arguments...), nil
+ return append(method.ID, arguments...), nil
}
// Unpack output in v according to the abi specification
@@ -108,13 +116,22 @@ func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte)
// UnmarshalJSON implements json.Unmarshaler interface
func (abi *ABI) UnmarshalJSON(data []byte) error {
var fields []struct {
- Type string
- Name string
- Constant bool
+ Type string
+ Name string
+ Inputs []Argument
+ Outputs []Argument
+
+ // Status indicator which can be: "pure", "view",
+ // "nonpayable" or "payable".
StateMutability string
- Anonymous bool
- Inputs []Argument
- Outputs []Argument
+
+ // Deprecated Status indicators, but removed in v0.6.0.
+ Constant bool // True if function is either pure or view
+ Payable bool // True if function is payable
+
+ // Event relevant indicator represents the event is
+ // declared as anonymous.
+ Anonymous bool
}
if err := json.Unmarshal(data, &fields); err != nil {
return err
@@ -124,44 +141,68 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
for _, field := range fields {
switch field.Type {
case "constructor":
- abi.Constructor = Method{
- Inputs: field.Inputs,
+ abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
+ case "function":
+ name := abi.overloadedMethodName(field.Name)
+ abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
+ case "fallback":
+ // New introduced function type in v0.6.0, check more detail
+ // here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
+ if abi.HasFallback() {
+ return errors.New("only single fallback is allowed")
}
- // empty defaults to function according to the abi spec
- case "function", "":
- name := field.Name
- _, ok := abi.Methods[name]
- for idx := 0; ok; idx++ {
- name = fmt.Sprintf("%s%d", field.Name, idx)
- _, ok = abi.Methods[name]
+ abi.Fallback = NewMethod("", "", Fallback, field.StateMutability, field.Constant, field.Payable, nil, nil)
+ case "receive":
+ // New introduced function type in v0.6.0, check more detail
+ // here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
+ if abi.HasReceive() {
+ return errors.New("only single receive is allowed")
}
- isConst := field.Constant || field.StateMutability == "pure" || field.StateMutability == "view"
- abi.Methods[name] = Method{
- Name: name,
- RawName: field.Name,
- Const: isConst,
- Inputs: field.Inputs,
- Outputs: field.Outputs,
+ if field.StateMutability != "payable" {
+ return errors.New("the statemutability of receive can only be payable")
}
+ abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "event":
- name := field.Name
- _, ok := abi.Events[name]
- for idx := 0; ok; idx++ {
- name = fmt.Sprintf("%s%d", field.Name, idx)
- _, ok = abi.Events[name]
- }
- abi.Events[name] = Event{
- Name: name,
- RawName: field.Name,
- Anonymous: field.Anonymous,
- Inputs: field.Inputs,
- }
+ name := abi.overloadedEventName(field.Name)
+ abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
+ default:
+ return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
}
}
return nil
}
+// overloadedMethodName returns the next available name for a given function.
+// Needed since ylem allows for function overload.
+//
+// e.g. if the abi contains Methods send, send1
+// overloadedMethodName would return send2 for input send.
+func (abi *ABI) overloadedMethodName(rawName string) string {
+ name := rawName
+ _, ok := abi.Methods[name]
+ for idx := 0; ok; idx++ {
+ name = fmt.Sprintf("%s%d", rawName, idx)
+ _, ok = abi.Methods[name]
+ }
+ return name
+}
+
+// overloadedEventName returns the next available name for a given event.
+// Needed since ylem allows for event overload.
+//
+// e.g. if the abi contains events received, received1
+// overloadedEventName would return received2 for input received.
+func (abi *ABI) overloadedEventName(rawName string) string {
+ name := rawName
+ _, ok := abi.Events[name]
+ for idx := 0; ok; idx++ {
+ name = fmt.Sprintf("%s%d", rawName, idx)
+ _, ok = abi.Events[name]
+ }
+ return name
+}
+
// MethodById looks up a method by the 4-byte id
// returns nil if none found
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
@@ -169,7 +210,7 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata))
}
for _, method := range abi.Methods {
- if bytes.Equal(method.ID(), sigdata[:4]) {
+ if bytes.Equal(method.ID, sigdata[:4]) {
return &method, nil
}
}
@@ -180,9 +221,41 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
// ABI and returns nil if none found.
func (abi *ABI) EventByID(topic common.Hash) (*Event, error) {
for _, event := range abi.Events {
- if bytes.Equal(event.ID().Bytes(), topic.Bytes()) {
+ if bytes.Equal(event.ID.Bytes(), topic.Bytes()) {
return &event, nil
}
}
return nil, fmt.Errorf("no event with id: %#x", topic.Hex())
}
+
+// HasFallback returns an indicator whether a fallback function is included.
+func (abi *ABI) HasFallback() bool {
+ return abi.Fallback.Type == Fallback
+}
+
+// HasReceive returns an indicator whether a receive function is included.
+func (abi *ABI) HasReceive() bool {
+ return abi.Receive.Type == Receive
+}
+
+// revertSelector is a special function selector for revert reason unpacking.
+var revertSelector = crypto.SHA3([]byte("Error(string)"))[:4]
+
+// UnpackRevert resolves the abi-encoded revert reason. According to the solidity
+// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert,
+// the provided revert reason is abi-encoded as if it were a call to a function
+// `Error(string)`. So it's a special tool for it.
+func UnpackRevert(data []byte) (string, error) {
+ if len(data) < 4 {
+ return "", errors.New("invalid data for unpacking")
+ }
+ if !bytes.Equal(data[:4], revertSelector) {
+ return "", errors.New("invalid data for unpacking")
+ }
+ var reason string
+ typ, _ := NewType("string", "", nil)
+ if err := (Arguments{{Type: typ}}).Unpack(&reason, data[4:]); err != nil {
+ return "", err
+ }
+ return reason, nil
+}
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index 042390088..f7014455a 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -19,6 +19,7 @@ package abi
import (
"bytes"
"encoding/hex"
+ "errors"
"fmt"
"math/big"
"reflect"
@@ -31,47 +32,41 @@ import (
const jsondata = `
[
- { "type" : "function", "name" : "balance", "constant" : true },
- { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
+ { "type" : "function", "name" : "balance", "stateMutability" : "view" },
+ { "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
]`
const jsondata2 = `
[
- { "type" : "function", "name" : "balance", "constant" : true },
- { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
- { "type" : "function", "name" : "test", "constant" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
- { "type" : "function", "name" : "string", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
- { "type" : "function", "name" : "bool", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
- { "type" : "function", "name" : "address", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
- { "type" : "function", "name" : "uint64[2]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
- { "type" : "function", "name" : "uint64[]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
- { "type" : "function", "name" : "foo", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
- { "type" : "function", "name" : "bar", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
- { "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
- { "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
- { "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
- { "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
- { "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
- { "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
- { "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
+ { "type" : "function", "name" : "balance", "stateMutability" : "view" },
+ { "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
+ { "type" : "function", "name" : "test", "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
+ { "type" : "function", "name" : "string", "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
+ { "type" : "function", "name" : "bool", "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
+ { "type" : "function", "name" : "address", "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
+ { "type" : "function", "name" : "uint64[2]", "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
+ { "type" : "function", "name" : "uint64[]", "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
+ { "type" : "function", "name" : "foo", "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
+ { "type" : "function", "name" : "bar", "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
+ { "type" : "function", "name" : "slice", "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
+ { "type" : "function", "name" : "slice256", "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
+ { "type" : "function", "name" : "sliceAddress", "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
+ { "type" : "function", "name" : "sliceMultiAddress", "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
+ { "type" : "function", "name" : "nestedArray", "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
+ { "type" : "function", "name" : "nestedArray2", "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
+ { "type" : "function", "name" : "nestedSlice", "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
]`
func TestReader(t *testing.T) {
Uint256, _ := NewType("uint256", "", nil)
- exp := ABI{
+ abi := ABI{
Methods: map[string]Method{
- "balance": {
- "balance", "balance", true, nil, nil,
- },
- "send": {
- "send", "send", false, []Argument{
- {"amount", Uint256, false},
- }, nil,
- },
+ "balance": NewMethod("balance", "balance", Function, "view", false, false, nil, nil),
+ "send": NewMethod("send", "send", Function, "", false, false, []Argument{{"amount", Uint256, false}}, nil),
},
}
- abi, err := JSON(strings.NewReader(jsondata))
+ exp, err := JSON(strings.NewReader(jsondata))
if err != nil {
t.Error(err)
}
@@ -173,22 +168,22 @@ func TestTestSlice(t *testing.T) {
func TestMethodSignature(t *testing.T) {
String, _ := NewType("string", "", nil)
- m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
+ m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil)
exp := "foo(string,string)"
- if m.Sig() != exp {
- t.Error("signature mismatch", exp, "!=", m.Sig())
+ if m.Sig != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig)
}
idexp := crypto.SHA3([]byte(exp))[:4]
- if !bytes.Equal(m.ID(), idexp) {
- t.Errorf("expected ids to match %x != %x", m.ID(), idexp)
+ if !bytes.Equal(m.ID, idexp) {
+ t.Errorf("expected ids to match %x != %x", m.ID, idexp)
}
uintt, _ := NewType("uint256", "", nil)
- m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil}
+ m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", uintt, false}}, nil)
exp = "foo(uint256)"
- if m.Sig() != exp {
- t.Error("signature mismatch", exp, "!=", m.Sig())
+ if m.Sig != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig)
}
// Method with tuple arguments
@@ -204,10 +199,10 @@ func TestMethodSignature(t *testing.T) {
{Name: "y", Type: "int256"},
}},
})
- m = Method{"foo", "foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
+ m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"s", s, false}, {"bar", String, false}}, nil)
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
- if m.Sig() != exp {
- t.Error("signature mismatch", exp, "!=", m.Sig())
+ if m.Sig != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig)
}
}
@@ -219,12 +214,12 @@ func TestOverloadedMethodSignature(t *testing.T) {
}
check := func(name string, expect string, method bool) {
if method {
- if abi.Methods[name].Sig() != expect {
- t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig())
+ if abi.Methods[name].Sig != expect {
+ t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig)
}
} else {
- if abi.Events[name].Sig() != expect {
- t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig())
+ if abi.Events[name].Sig != expect {
+ t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig)
}
}
}
@@ -586,7 +581,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
}
func TestDefaultFunctionParsing(t *testing.T) {
- const definition = `[{ "name" : "balance" }]`
+ const definition = `[{ "name" : "balance", "type" : "function" }]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
@@ -933,13 +928,13 @@ func TestABI_MethodById(t *testing.T) {
}
for name, m := range abi.Methods {
a := fmt.Sprintf("%v", m)
- m2, err := abi.MethodById(m.ID())
+ m2, err := abi.MethodById(m.ID)
if err != nil {
t.Fatalf("Failed to look up ABI method: %v", err)
}
b := fmt.Sprintf("%v", m2)
if a != b {
- t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID())
+ t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID)
}
}
// Also test empty
@@ -1007,8 +1002,8 @@ func TestABI_EventById(t *testing.T) { //todo: TEST
t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum)
}
- if event.ID() != topicID {
- t.Errorf("Event id %s does not match topic %s, test #%d", event.ID().Hex(), topicID.Hex(), testnum)
+ if event.ID != topicID {
+ t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum)
}
unknowntopicID := crypto.SHA3Hash([]byte("unknownEvent"))
@@ -1063,3 +1058,59 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
t.Fatalf("Should not have found extra method")
}
}
+
+// TestUnnamedEventParam checks that an event with unnamed parameters is
+// correctly handled
+// The test runs the abi of the following contract.
+// contract TestEvent {
+// event send(uint256, uint256);
+// }
+func TestUnnamedEventParam(t *testing.T) {
+ abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]`
+ contractAbi, err := JSON(strings.NewReader(abiJSON))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ event, ok := contractAbi.Events["send"]
+ if !ok {
+ t.Fatalf("Could not find event")
+ }
+ if event.Inputs[0].Name != "arg0" {
+ t.Fatalf("Could not find input")
+ }
+ if event.Inputs[1].Name != "arg1" {
+ t.Fatalf("Could not find input")
+ }
+}
+
+func TestUnpackRevert(t *testing.T) {
+ t.Parallel()
+
+ var cases = []struct {
+ input string
+ expect string
+ expectErr error
+ }{
+ {"", "", errors.New("invalid data for unpacking")},
+ {"08c379a1", "", errors.New("invalid data for unpacking")},
+ {"4e401cbe0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil},
+ }
+ for index, c := range cases {
+ t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
+ got, err := UnpackRevert(common.Hex2Bytes(c.input))
+ if c.expectErr != nil {
+ if err == nil {
+ t.Fatalf("Expected non-nil error")
+ }
+ if err.Error() != c.expectErr.Error() {
+ t.Fatalf("Expected error mismatch, want %v, got %v", c.expectErr, err)
+ }
+ return
+ }
+ if c.expect != got {
+ t.Fatalf("Output mismatch, want %v, got %v", c.expect, got)
+ }
+ })
+ }
+}
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index e74753ece..1dcd49ff8 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -92,9 +92,8 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if len(data) == 0 {
if len(arguments) != 0 {
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
- } else {
- return nil // Nothing to unmarshal, return
}
+ return nil // Nothing to unmarshal, return
}
// make sure the passed value is arguments pointer
if reflect.Ptr != reflect.ValueOf(v).Kind() {
@@ -104,6 +103,9 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if err != nil {
return err
}
+ if len(marshalledValues) == 0 {
+ return fmt.Errorf("abi: Unpack(no-values unmarshalled %T)", v)
+ }
if arguments.isTuple() {
return arguments.unpackTuple(v, marshalledValues)
}
@@ -112,18 +114,24 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
+ // Make sure map is not nil
+ if v == nil {
+ return fmt.Errorf("abi: cannot unpack into a nil map")
+ }
if len(data) == 0 {
if len(arguments) != 0 {
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
- } else {
- return nil // Nothing to unmarshal, return
}
+ return nil // Nothing to unmarshal, return
}
marshalledValues, err := arguments.UnpackValues(data)
if err != nil {
return err
}
- return arguments.unpackIntoMap(v, marshalledValues)
+ for i, arg := range arguments.NonIndexed() {
+ v[arg.Name] = marshalledValues[i]
+ }
+ return nil
}
// unpack sets the unmarshalled value to go format.
@@ -195,19 +203,6 @@ func unpack(t *Type, dst interface{}, src interface{}) error {
return nil
}
-// unpackIntoMap unpacks marshalledValues into the provided map[string]interface{}
-func (arguments Arguments) unpackIntoMap(v map[string]interface{}, marshalledValues []interface{}) error {
- // Make sure map is not nil
- if v == nil {
- return fmt.Errorf("abi: cannot unpack into a nil map")
- }
-
- for i, arg := range arguments.NonIndexed() {
- v[arg.Name] = marshalledValues[i]
- }
- return nil
-}
-
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
if arguments.LengthNonIndexed() == 0 {
@@ -233,30 +228,28 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interfac
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
var (
- value = reflect.ValueOf(v).Elem()
- typ = value.Type()
- kind = value.Kind()
+ value = reflect.ValueOf(v).Elem()
+ typ = value.Type()
+ kind = value.Kind()
+ nonIndexedArgs = arguments.NonIndexed()
)
- if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
+ if err := requireUnpackKind(value, len(nonIndexedArgs), arguments); err != nil {
return err
}
// If the interface is a struct, get of abi->struct_field mapping
var abi2struct map[string]string
if kind == reflect.Struct {
- var (
- argNames []string
- err error
- )
- for _, arg := range arguments.NonIndexed() {
- argNames = append(argNames, arg.Name)
+ argNames := make([]string, len(nonIndexedArgs))
+ for i, arg := range nonIndexedArgs {
+ argNames[i] = arg.Name
}
- abi2struct, err = mapArgNamesToStructFields(argNames, value)
- if err != nil {
+ var err error
+ if abi2struct, err = mapArgNamesToStructFields(argNames, value); err != nil {
return err
}
}
- for i, arg := range arguments.NonIndexed() {
+ for i, arg := range nonIndexedArgs {
switch kind {
case reflect.Struct:
field := value.FieldByName(abi2struct[arg.Name])
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 286c5d817..fc1e6963a 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
+ "github.com/core-coin/go-core/accounts/abi"
"math/big"
"sync"
"time"
@@ -49,7 +50,6 @@ var (
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
errTransactionDoesNotExist = errors.New("transaction does not exist")
- errEnergyEstimationFailed = errors.New("energy required exceeds allowance or always failing transaction")
)
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
@@ -75,7 +75,7 @@ type SimulatedBackend struct {
func NewSimulatedBackendWithDatabase(database xcbdb.Database, alloc core.GenesisAlloc, energyLimit uint64) *SimulatedBackend {
genesis := core.Genesis{Config: params.AllCryptoreProtocolChanges, EnergyLimit: energyLimit, Alloc: alloc}
genesis.MustCommit(database)
- blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
backend := &SimulatedBackend{
database: database,
@@ -349,8 +349,11 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call gocore.CallMsg
if err != nil {
return nil, err
}
- rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
- return rval, err
+ res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
+ if err != nil {
+ return nil, err
+ }
+ return res.Return(), nil
}
// PendingCallContract executes a contract call on the pending state.
@@ -359,8 +362,11 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call gocore.
defer b.mu.Unlock()
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
- rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
- return rval, err
+ res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
+ if err != nil {
+ return nil, err
+ }
+ return res.Return(), nil
}
// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving
@@ -397,23 +403,34 @@ func (b *SimulatedBackend) EstimateEnergy(ctx context.Context, call gocore.CallM
}
cap = hi
- // Create a helper to check if a energy allowance results in an executable transaction
- executable := func(energy uint64) bool {
+ // Create a helper to check if an energy allowance results in an executable transaction
+ executable := func(energy uint64) (bool, *core.ExecutionResult, error) {
call.Energy = energy
snapshot := b.pendingState.Snapshot()
- _, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
+ res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
b.pendingState.RevertToSnapshot(snapshot)
- if err != nil || failed {
- return false
+ if err != nil {
+ if err == core.ErrIntrinsicEnergy {
+ return true, nil, nil // Special case, raise energy limit
+ }
+ return true, nil, err // Bail out
}
- return true
+ return res.Failed(), res, nil
}
// Execute the binary search and hone in on an executable energy limit
for lo+1 < hi {
mid := (hi + lo) / 2
- if !executable(mid) {
+ failed, _, err := executable(mid)
+
+ // If the error is not nil(consensus error), it means the provided message
+ // call or transaction will never be accepted no matter how much gas it is
+ // assigned. Return the error directly, don't struggle any more
+ if err != nil {
+ return 0, err
+ }
+ if failed {
lo = mid
} else {
hi = mid
@@ -421,8 +438,25 @@ func (b *SimulatedBackend) EstimateEnergy(ctx context.Context, call gocore.CallM
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
- if !executable(hi) {
- return 0, errEnergyEstimationFailed
+ failed, result, err := executable(hi)
+ if err != nil {
+ return 0, err
+ }
+ if failed {
+ if result != nil && result.Err != vm.ErrOutOfEnergy {
+ errMsg := fmt.Sprintf("always failing transaction (%v)", result.Err)
+ if len(result.Revert()) > 0 {
+ ret, err := abi.UnpackRevert(result.Revert())
+ if err != nil {
+ errMsg += fmt.Sprintf(" (%#x)", result.Revert())
+ } else {
+ errMsg += fmt.Sprintf(" (%s)", ret)
+ }
+ }
+ return 0, errors.New(errMsg)
+ }
+ // Otherwise, the specified energy cap is too low
+ return 0, fmt.Errorf("energy required exceeds allowance (%d)", cap)
}
}
return hi, nil
@@ -430,7 +464,7 @@ func (b *SimulatedBackend) EstimateEnergy(ctx context.Context, call gocore.CallM
// callContract implements common code between normal and pending contract calls.
// state is modified during execution, make sure to copy it if necessary.
-func (b *SimulatedBackend) callContract(ctx context.Context, call gocore.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, uint64, bool, error) {
+func (b *SimulatedBackend) callContract(ctx context.Context, call gocore.CallMsg, block *types.Block, statedb *state.StateDB) (*core.ExecutionResult, error) {
// Ensure message is initialized properly.
if call.EnergyPrice == nil {
call.EnergyPrice = big.NewInt(1)
diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go
index 542bfeb6b..b6674a7ec 100644
--- a/accounts/abi/bind/backends/simulated_test.go
+++ b/accounts/abi/bind/backends/simulated_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"crypto/rand"
+ "errors"
eddsa "github.com/core-coin/go-goldilocks"
"math/big"
"strings"
@@ -366,26 +367,113 @@ func TestSimulatedBackend_TransactionByHash(t *testing.T) {
}
func TestSimulatedBackend_EstimateEnergy(t *testing.T) {
- sim := NewSimulatedBackend(
- core.GenesisAlloc{}, 10000000,
- )
+ /*
+ pragma solidity ^0.6.4;
+ contract EnergyEstimation {
+ function PureRevert() public { revert(); }
+ function Revert() public { revert("revert reason");}
+ function OOG() public { for (uint i = 0; ; i++) {}}
+ function Assert() public { assert(false);}
+ function Valid() public {}
+ }*/
+ const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
+ const contractBin = "608060405234801561001057600080fd5b5061027a806100206000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c80633ae247141461005c578063593b9361146100665780636c5fcd821461007057806396be6e031461007a578063a842ede314610084575b600080fd5b61006461008e565b005b61006e6100c9565b005b6100786100ce565b005b6100826100e4565b005b61008c6100e6565b005b6040517f4e401cbe0000000000000000000000000000000000000000000000000000000081526004016100c090610140565b60405180910390fd5b600080fd5b60005b80806100dc9061017b565b9150506100d1565b565b600061011b577f4b1f2ce300000000000000000000000000000000000000000000000000000000600052600160045260246000fd5b565b600061012a600d83610160565b9150610135826101f3565b602082019050919050565b600060208201905081810360008301526101598161011d565b9050919050565b600082825260208201905092915050565b6000819050919050565b600061018682610171565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156101b9576101b86101c4565b5b600182019050919050565b7f4b1f2ce300000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f72657665727420726561736f6e0000000000000000000000000000000000000060008201525056fea26469706673582212202458901c98e418f9c6e0efe666fc13c1536b98db06e04c1d7bae3eede256401f64736f6c63782a302e382e342d646576656c6f702e323032322e372e382b636f6d6d69742e30353336326564342e6d6f64005b"
+
+ key, _ := crypto.GenerateKey(rand.Reader)
+ pub := eddsa.Ed448DerivePublicKey(*key)
+ addr := crypto.PubkeyToAddress(pub)
+ opts := bind.NewKeyedTransactor(key)
+
+ sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Core)}}, 10000000)
defer sim.Close()
- bgCtx := context.Background()
- pub := eddsa.Ed448DerivePublicKey(*testKey)
- testAddr := crypto.PubkeyToAddress(pub)
- energy, err := sim.EstimateEnergy(bgCtx, gocore.CallMsg{
- From: testAddr,
- To: &testAddr,
- Value: big.NewInt(1000),
- Data: []byte{},
- })
- if err != nil {
- t.Errorf("could not estimate energy: %v", err)
- }
+ parsed, _ := abi.JSON(strings.NewReader(contractAbi))
+ contractAddr, _, _, _ := bind.DeployContract(opts, parsed, common.FromHex(contractBin), sim)
+ sim.Commit()
- if energy != params.TxEnergy {
- t.Errorf("expected 21000 energy cost for a transaction got %v", energy)
+ var cases = []struct {
+ name string
+ message gocore.CallMsg
+ expect uint64
+ expectError error
+ }{
+ {"plain transfer(valid)", gocore.CallMsg{
+ From: addr,
+ To: &addr,
+ Energy: 0,
+ EnergyPrice: big.NewInt(0),
+ Value: big.NewInt(1),
+ Data: nil,
+ }, params.TxEnergy, nil},
+
+ {"plain transfer(invalid)", gocore.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Energy: 0,
+ EnergyPrice: big.NewInt(0),
+ Value: big.NewInt(1),
+ Data: nil,
+ }, 0, errors.New("always failing transaction (execution reverted)")},
+
+ {"Revert", gocore.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Energy: 0,
+ EnergyPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("3ae24714"),
+ }, 0, errors.New("always failing transaction (execution reverted) (revert reason)")},
+
+ {"PureRevert", gocore.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Energy: 0,
+ EnergyPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("593b9361"),
+ }, 0, errors.New("always failing transaction (execution reverted)")},
+
+ {"OOG", gocore.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Energy: 100000,
+ EnergyPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("6c5fcd82"),
+ }, 0, errors.New("energy required exceeds allowance (100000)")},
+
+ {"Assert", gocore.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Energy: 100000,
+ EnergyPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("a842ede3"),
+ }, 0, errors.New("always failing transaction (execution reverted) (0x4b1f2ce30000000000000000000000000000000000000000000000000000000000000001)")},
+
+ {"Valid", gocore.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Energy: 100000,
+ EnergyPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("96be6e03"),
+ }, 21252, nil},
+ }
+ for _, c := range cases {
+ got, err := sim.EstimateEnergy(context.Background(), c.message)
+ if c.expectError != nil {
+ if err == nil {
+ t.Fatalf("Expect error, got nil")
+ }
+ if c.expectError.Error() != err.Error() {
+ t.Fatalf("Expect error, want %v, got %v", c.expectError, err)
+ }
+ continue
+ }
+ if got != c.expect {
+ t.Fatalf("Energy estimation mismatch, want %d, got %d", c.expect, got)
+ }
}
}
diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go
index 2c268682a..47f4150f6 100644
--- a/accounts/abi/bind/base.go
+++ b/accounts/abi/bind/base.go
@@ -171,12 +171,24 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in
if err != nil {
return nil, err
}
+ // todo(error2215) check the method is payable or not,
+ // reject invalid transaction at the first place
return c.transact(opts, &c.address, input)
}
+// RawTransact initiates a transaction with the given raw calldata as the input.
+// It's usually used to initiates transaction for invoking **Fallback** function.
+func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
+ // todo(error2215) check the method is payable or not,
+ // reject invalid transaction at the first place
+ return c.transact(opts, &c.address, calldata)
+}
+
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) {
+ // todo(error2215) check the payable fallback or receive is defined
+ // or not, reject invalid transaction at the first place
return c.transact(opts, &c.address, nil)
}
@@ -252,7 +264,7 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int
opts = new(FilterOpts)
}
// Append the event selector to the query parameters and construct the topic set
- query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...)
+ query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
topics, err := makeTopics(query...)
if err != nil {
@@ -301,7 +313,7 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
opts = new(WatchOpts)
}
// Append the event selector to the query parameters and construct the topic set
- query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...)
+ query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
topics, err := makeTopics(query...)
if err != nil {
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index 447a804d7..cf7b0fda2 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -77,6 +77,8 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
calls = make(map[string]*tmplMethod)
transacts = make(map[string]*tmplMethod)
events = make(map[string]*tmplEvent)
+ fallback *tmplMethod
+ receive *tmplMethod
// identifiers are used to detect duplicated identifier of function
// and event. For all calls, transacts and events, abigen will generate
@@ -92,7 +94,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
// Ensure there is no duplicated identifier
var identifiers = callIdentifiers
- if !original.Const {
+ if !original.IsConstant() {
identifiers = transactIdentifiers
}
if identifiers[normalizedName] {
@@ -121,7 +123,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
}
}
// Append the methods to the call or transact lists
- if original.Const {
+ if original.IsConstant() {
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
} else {
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
@@ -156,7 +158,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
// Append the event to the accumulator list
events[original.Name] = &tmplEvent{Original: original, Normalized: normalized}
}
-
+ // Add two special fallback functions if they exist
+ if cvmABI.HasFallback() {
+ fallback = &tmplMethod{Original: cvmABI.Fallback}
+ }
+ if cvmABI.HasReceive() {
+ receive = &tmplMethod{Original: cvmABI.Receive}
+ }
// There is no easy way to pass arbitrary java objects to the Go side.
if len(structs) > 0 && lang == LangJava {
return "", errors.New("java binding for tuple arguments is not supported yet")
@@ -169,6 +177,8 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
Constructor: cvmABI.Constructor,
Calls: calls,
Transacts: transacts,
+ Fallback: fallback,
+ Receive: receive,
Events: events,
Libraries: make(map[string]string),
}
@@ -619,11 +629,22 @@ func formatMethod(method abi.Method, structs map[string]*tmplStruct) string {
outputs[i] += fmt.Sprintf(" %v", output.Name)
}
}
- constant := ""
- if method.Const {
- constant = "constant "
+ // Extract meaningful state mutability of solidity method.
+ // If it's default value, never print it.
+ state := method.StateMutability
+ if state == "nonpayable" {
+ state = ""
+ }
+ if state != "" {
+ state = state + " "
+ }
+ identity := fmt.Sprintf("function %v", method.RawName)
+ if method.Type == abi.Fallback {
+ identity = "fallback"
+ } else if method.Type == abi.Receive {
+ identity = "receive"
}
- return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
+ return fmt.Sprintf("%s(%v) %sreturns(%v)", identity, strings.Join(inputs, ", "), state, strings.Join(outputs, ", "))
}
// formatEvent transforms raw event representation into a user friendly one.
diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index 2fd8dea8d..6627e6f88 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -183,7 +183,8 @@ var bindTests = []struct {
{"type":"event","name":"indexed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256","indexed":true}]},
{"type":"event","name":"mixed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256"}]},
{"type":"event","name":"anonymous","anonymous":true,"inputs":[]},
- {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]}
+ {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]},
+ {"type":"event","name":"unnamed","inputs":[{"name":"","type":"uint256","indexed": true},{"name":"","type":"uint256","indexed":true}]}
]
`},
`
@@ -233,6 +234,11 @@ var bindTests = []struct {
fmt.Println(event.Addr) // Make sure the reconstructed indexed fields are present
fmt.Println(res, str, dat, hash, err)
+
+ oit, err := e.FilterUnnamed(nil, []*big.Int{}, []*big.Int{})
+ arg0 := oit.Event.Arg0 // Make sure unnamed arguments are handled correctly
+ arg1 := oit.Event.Arg1 // Make sure unnamed arguments are handled correctly
+ fmt.Println(arg0, arg1)
}
// Run a tiny reflection test to ensure disallowed methods don't appear
if _, ok := reflect.TypeOf(&EventChecker{}).MethodByName("FilterAnonymous"); ok {
@@ -1598,6 +1604,103 @@ var bindTests = []struct {
nil,
nil,
},
+ // Test fallback separation introduced in v0.6.0
+ {
+ `NewFallbacks`,
+ `
+ pragma solidity >=0.6.0 <0.7.0;
+
+ contract NewFallbacks {
+ event Fallback(bytes data);
+ fallback() external {
+ bytes memory data;
+ assembly {
+ calldatacopy(data, 0, calldatasize())
+ }
+ emit Fallback(data);
+ }
+
+ event Received(address addr, uint value);
+ receive() external payable {
+ emit Received(msg.sender, msg.value);
+ }
+ }
+ `,
+ []string{"608060405234801561001057600080fd5b50610230806100206000396000f3fe608060405236610044577fb4764187bbbca84b57e9671514c33bdb82a80b7ae801dc5bbeab272a07868ce3333460405161003a9291906100e9565b60405180910390a1005b34801561005057600080fd5b50606036600082377fc5f892623b9cf327459605db591333292717e25ed9606e17f41a7a395784aaf8816040516100879190610112565b60405180910390a150005b61009b81610150565b82525050565b60006100ac82610134565b6100b6818561013f565b93506100c681856020860161018e565b6100cf816101c1565b840191505092915050565b6100e381610184565b82525050565b60006040820190506100fe6000830185610092565b61010b60208301846100da565b9392505050565b6000602082019050818103600083015261012c81846100a1565b905092915050565b600081519050919050565b600082825260208201905092915050565b600061015b82610162565b9050919050565b600075ffffffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b60005b838110156101ac578082015181840152602081019050610191565b838111156101bb576000848401525b50505050565b6000601f19601f830116905091905056fea264697066735822122068573f19c872bcc9beb1e92de46f5bac58be7c913a517086ec850e7d387c81ff64736f6c63782a302e382e342d646576656c6f702e323032322e372e362b636f6d6d69742e30353336326564342e6d6f64005b"},
+ []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Fallback","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"addr","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Received","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"stateMutability":"payable","type":"receive"}]`},
+ `
+ "bytes"
+ "math/big"
+ "crypto/rand"
+
+ eddsa "github.com/core-coin/go-goldilocks"
+
+ "github.com/core-coin/go-core/accounts/abi/bind"
+ "github.com/core-coin/go-core/accounts/abi/bind/backends"
+ "github.com/core-coin/go-core/core"
+ "github.com/core-coin/go-core/crypto"
+ `,
+ `
+ key, _ := crypto.GenerateKey(rand.Reader)
+ pub := eddsa.Ed448DerivePublicKey(*key)
+ addr := crypto.PubkeyToAddress(pub)
+
+ sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 1000000)
+ defer sim.Close()
+
+ opts := bind.NewKeyedTransactor(key)
+ _, _, c, err := DeployNewFallbacks(opts, sim)
+ if err != nil {
+ t.Fatalf("Failed to deploy contract: %v", err)
+ }
+ sim.Commit()
+
+ // Test receive function
+ opts.Value = big.NewInt(100)
+ c.Receive(opts)
+ sim.Commit()
+
+ var gotEvent bool
+ iter, _ := c.FilterReceived(nil)
+ defer iter.Close()
+ for iter.Next() {
+ if iter.Event.Addr != addr {
+ t.Fatal("Msg.sender mismatch")
+ }
+ if iter.Event.Value.Uint64() != 100 {
+ t.Fatal("Msg.value mismatch")
+ }
+ gotEvent = true
+ break
+ }
+ if !gotEvent {
+ t.Fatal("Expect to receive event emitted by receive")
+ }
+
+ // Test fallback function
+ opts.Value = nil
+ calldata := []byte{0x01, 0x02, 0x03}
+ c.Fallback(opts, calldata)
+ sim.Commit()
+
+ iter2, _ := c.FilterFallback(nil)
+ defer iter2.Close()
+ for iter2.Next() {
+ if !bytes.Equal(iter2.Event.Data, calldata) {
+ t.Fatal("calldata mismatch")
+ }
+ gotEvent = true
+ break
+ }
+ if !gotEvent {
+ t.Fatal("Expect to receive event emitted by fallback")
+ }
+ `,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
}
// Tests that packages generated by the binder can be successfully compiled and
@@ -1613,7 +1716,7 @@ func TestGolangBindings(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temporary workspace: %v", err)
}
- //defer os.RemoveAll(ws)
+ defer os.RemoveAll(ws)
pkg := filepath.Join(ws, "bindtest")
if err = os.MkdirAll(pkg, 0700); err != nil {
@@ -1676,404 +1779,3 @@ func TestGolangBindings(t *testing.T) {
t.Fatalf("failed to run binding test: %v\n%s", err, out)
}
}
-
-// Tests that java binding generated by the binder is exactly matched.
-func TestJavaBindings(t *testing.T) {
- var cases = []struct {
- name string
- contract string
- abi string
- bytecode string
- expected string
- }{
- {
- "test",
- `
- pragma experimental ABIEncoderV2;
- pragma solidity ^0.5.2;
-
- contract test {
- function setAddress(address a) public returns(address){}
- function setAddressList(address[] memory a_l) public returns(address[] memory){}
- function setAddressArray(address[2] memory a_a) public returns(address[2] memory){}
-
- function setUint8(uint8 u8) public returns(uint8){}
- function setUint16(uint16 u16) public returns(uint16){}
- function setUint32(uint32 u32) public returns(uint32){}
- function setUint64(uint64 u64) public returns(uint64){}
- function setUint256(uint256 u256) public returns(uint256){}
- function setUint256List(uint256[] memory u256_l) public returns(uint256[] memory){}
- function setUint256Array(uint256[2] memory u256_a) public returns(uint256[2] memory){}
-
- function setInt8(int8 i8) public returns(int8){}
- function setInt16(int16 i16) public returns(int16){}
- function setInt32(int32 i32) public returns(int32){}
- function setInt64(int64 i64) public returns(int64){}
- function setInt256(int256 i256) public returns(int256){}
- function setInt256List(int256[] memory i256_l) public returns(int256[] memory){}
- function setInt256Array(int256[2] memory i256_a) public returns(int256[2] memory){}
-
- function setBytes1(bytes1 b1) public returns(bytes1) {}
- function setBytes32(bytes32 b32) public returns(bytes32) {}
- function setBytes(bytes memory bs) public returns(bytes memory) {}
- function setBytesList(bytes[] memory bs_l) public returns(bytes[] memory) {}
- function setBytesArray(bytes[2] memory bs_a) public returns(bytes[2] memory) {}
-
- function setString(string memory s) public returns(string memory) {}
- function setStringList(string[] memory s_l) public returns(string[] memory) {}
- function setStringArray(string[2] memory s_a) public returns(string[2] memory) {}
-
- function setBool(bool b) public returns(bool) {}
- function setBoolList(bool[] memory b_l) public returns(bool[] memory) {}
- function setBoolArray(bool[2] memory b_a) public returns(bool[2] memory) {}
- }`,
- `[{"constant":false,"inputs":[{"name":"u16","type":"uint16"}],"name":"setUint16","outputs":[{"name":"","type":"uint16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_a","type":"bool[2]"}],"name":"setBoolArray","outputs":[{"name":"","type":"bool[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_a","type":"address[2]"}],"name":"setAddressArray","outputs":[{"name":"","type":"address[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_l","type":"bytes[]"}],"name":"setBytesList","outputs":[{"name":"","type":"bytes[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u8","type":"uint8"}],"name":"setUint8","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u32","type":"uint32"}],"name":"setUint32","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b","type":"bool"}],"name":"setBool","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_l","type":"int256[]"}],"name":"setInt256List","outputs":[{"name":"","type":"int256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_a","type":"uint256[2]"}],"name":"setUint256Array","outputs":[{"name":"","type":"uint256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_l","type":"bool[]"}],"name":"setBoolList","outputs":[{"name":"","type":"bool[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_a","type":"bytes[2]"}],"name":"setBytesArray","outputs":[{"name":"","type":"bytes[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_l","type":"address[]"}],"name":"setAddressList","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_a","type":"int256[2]"}],"name":"setInt256Array","outputs":[{"name":"","type":"int256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_a","type":"string[2]"}],"name":"setStringArray","outputs":[{"name":"","type":"string[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s","type":"string"}],"name":"setString","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u64","type":"uint64"}],"name":"setUint64","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i16","type":"int16"}],"name":"setInt16","outputs":[{"name":"","type":"int16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i8","type":"int8"}],"name":"setInt8","outputs":[{"name":"","type":"int8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_l","type":"uint256[]"}],"name":"setUint256List","outputs":[{"name":"","type":"uint256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256","type":"int256"}],"name":"setInt256","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i32","type":"int32"}],"name":"setInt32","outputs":[{"name":"","type":"int32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b32","type":"bytes32"}],"name":"setBytes32","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_l","type":"string[]"}],"name":"setStringList","outputs":[{"name":"","type":"string[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256","type":"uint256"}],"name":"setUint256","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs","type":"bytes"}],"name":"setBytes","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a","type":"address"}],"name":"setAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i64","type":"int64"}],"name":"setInt64","outputs":[{"name":"","type":"int64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b1","type":"bytes1"}],"name":"setBytes1","outputs":[{"name":"","type":"bytes1"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`,
- `608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037`,
- `
-// This file is an automatically generated Java binding. Do not modify as any
-// change will likely be lost upon the next re-generation!
-
-package bindtest;
-
-import cc.coreblockchain.gocore.*;
-import java.util.*;
-
-
-
-public class Test {
- // ABI is the input ABI used to generate the binding from.
- public final static String ABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"u16\",\"type\":\"uint16\"}],\"name\":\"setUint16\",\"outputs\":[{\"name\":\"\",\"type\":\"uint16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_a\",\"type\":\"bool[2]\"}],\"name\":\"setBoolArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_a\",\"type\":\"address[2]\"}],\"name\":\"setAddressArray\",\"outputs\":[{\"name\":\"\",\"type\":\"address[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_l\",\"type\":\"bytes[]\"}],\"name\":\"setBytesList\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u8\",\"type\":\"uint8\"}],\"name\":\"setUint8\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u32\",\"type\":\"uint32\"}],\"name\":\"setUint32\",\"outputs\":[{\"name\":\"\",\"type\":\"uint32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b\",\"type\":\"bool\"}],\"name\":\"setBool\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_l\",\"type\":\"int256[]\"}],\"name\":\"setInt256List\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_a\",\"type\":\"uint256[2]\"}],\"name\":\"setUint256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_l\",\"type\":\"bool[]\"}],\"name\":\"setBoolList\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_a\",\"type\":\"bytes[2]\"}],\"name\":\"setBytesArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_l\",\"type\":\"address[]\"}],\"name\":\"setAddressList\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_a\",\"type\":\"int256[2]\"}],\"name\":\"setInt256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_a\",\"type\":\"string[2]\"}],\"name\":\"setStringArray\",\"outputs\":[{\"name\":\"\",\"type\":\"string[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s\",\"type\":\"string\"}],\"name\":\"setString\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u64\",\"type\":\"uint64\"}],\"name\":\"setUint64\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i16\",\"type\":\"int16\"}],\"name\":\"setInt16\",\"outputs\":[{\"name\":\"\",\"type\":\"int16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i8\",\"type\":\"int8\"}],\"name\":\"setInt8\",\"outputs\":[{\"name\":\"\",\"type\":\"int8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_l\",\"type\":\"uint256[]\"}],\"name\":\"setUint256List\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256\",\"type\":\"int256\"}],\"name\":\"setInt256\",\"outputs\":[{\"name\":\"\",\"type\":\"int256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i32\",\"type\":\"int32\"}],\"name\":\"setInt32\",\"outputs\":[{\"name\":\"\",\"type\":\"int32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b32\",\"type\":\"bytes32\"}],\"name\":\"setBytes32\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_l\",\"type\":\"string[]\"}],\"name\":\"setStringList\",\"outputs\":[{\"name\":\"\",\"type\":\"string[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256\",\"type\":\"uint256\"}],\"name\":\"setUint256\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs\",\"type\":\"bytes\"}],\"name\":\"setBytes\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i64\",\"type\":\"int64\"}],\"name\":\"setInt64\",\"outputs\":[{\"name\":\"\",\"type\":\"int64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b1\",\"type\":\"bytes1\"}],\"name\":\"setBytes1\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes1\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]";
-
-
- // BYTECODE is the compiled bytecode used for deploying new contracts.
- public final static String BYTECODE = "0x608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037";
-
- // deploy deploys a new Core contract, binding an instance of Test to it.
- public static Test deploy(TransactOpts auth, CoreClient client) throws Exception {
- Interfaces args = Gocore.newInterfaces(0);
- String bytecode = BYTECODE;
-
-
- return new Test(Gocore.deployContract(auth, ABI, Gocore.decodeFromHex(bytecode), client, args));
- }
-
- // Internal constructor used by contract deployment.
- private Test(BoundContract deployment) {
- this.Address = deployment.getAddress();
- this.Deployer = deployment.getDeployer();
- this.Contract = deployment;
- }
-
-
- // Core address where this contract is located at.
- public final Address Address;
-
- // Core transaction in which this contract was deployed (if known!).
- public final Transaction Deployer;
-
- // Contract instance bound to a blockchain address.
- private final BoundContract Contract;
-
- // Creates a new instance of Test, bound to a specific deployed contract.
- public Test(Address address, CoreClient client) throws Exception {
- this(Gocore.bindContract(address, ABI, client));
- }
-
-
-
-
- // setAddress is a paid mutator transaction binding the contract method 0x38b3d31e.
- //
- // Solidity: function setAddress(address a) returns(address)
- public Transaction setAddress(TransactOpts opts, Address a) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setAddress(a);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setAddress" , args);
- }
-
- // setAddressArray is a paid mutator transaction binding the contract method 0xd1b7ed97.
- //
- // Solidity: function setAddressArray(address[2] a_a) returns(address[2])
- public Transaction setAddressArray(TransactOpts opts, Addresses a_a) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setAddresses(a_a);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setAddressArray" , args);
- }
-
- // setAddressList is a paid mutator transaction binding the contract method 0x904faf03.
- //
- // Solidity: function setAddressList(address[] a_l) returns(address[])
- public Transaction setAddressList(TransactOpts opts, Addresses a_l) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setAddresses(a_l);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setAddressList" , args);
- }
-
- // setBool is a paid mutator transaction binding the contract method 0x4dd024a0.
- //
- // Solidity: function setBool(bool b) returns(bool)
- public Transaction setBool(TransactOpts opts, boolean b) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBool(b);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBool" , args);
- }
-
- // setBoolArray is a paid mutator transaction binding the contract method 0xd6c63e40.
- //
- // Solidity: function setBoolArray(bool[2] b_a) returns(bool[2])
- public Transaction setBoolArray(TransactOpts opts, Bools b_a) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBools(b_a);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBoolArray" , args);
- }
-
- // setBoolList is a paid mutator transaction binding the contract method 0xaffde4fb.
- //
- // Solidity: function setBoolList(bool[] b_l) returns(bool[])
- public Transaction setBoolList(TransactOpts opts, Bools b_l) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBools(b_l);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBoolList" , args);
- }
-
- // setBytes is a paid mutator transaction binding the contract method 0xa29012c8.
- //
- // Solidity: function setBytes(bytes bs) returns(bytes)
- public Transaction setBytes(TransactOpts opts, byte[] bs) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBinary(bs);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBytes" , args);
- }
-
- // setBytes1 is a paid mutator transaction binding the contract method 0xc04b5fb7.
- //
- // Solidity: function setBytes1(bytes1 b1) returns(bytes1)
- public Transaction setBytes1(TransactOpts opts, byte[] b1) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBinary(b1);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBytes1" , args);
- }
-
- // setBytes32 is a paid mutator transaction binding the contract method 0x73f74fb1.
- //
- // Solidity: function setBytes32(bytes32 b32) returns(bytes32)
- public Transaction setBytes32(TransactOpts opts, byte[] b32) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBinary(b32);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBytes32" , args);
- }
-
- // setBytesArray is a paid mutator transaction binding the contract method 0x28d422d6.
- //
- // Solidity: function setBytesArray(bytes[2] bs_a) returns(bytes[2])
- public Transaction setBytesArray(TransactOpts opts, Binaries bs_a) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBinaries(bs_a);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBytesArray" , args);
- }
-
- // setBytesList is a paid mutator transaction binding the contract method 0xd7d2a529.
- //
- // Solidity: function setBytesList(bytes[] bs_l) returns(bytes[])
- public Transaction setBytesList(TransactOpts opts, Binaries bs_l) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBinaries(bs_l);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setBytesList" , args);
- }
-
- // setInt16 is a paid mutator transaction binding the contract method 0xaf48b6f1.
- //
- // Solidity: function setInt16(int16 i16) returns(int16)
- public Transaction setInt16(TransactOpts opts, short i16) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setInt16(i16);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setInt16" , args);
- }
-
- // setInt256 is a paid mutator transaction binding the contract method 0xee27bc3c.
- //
- // Solidity: function setInt256(int256 i256) returns(int256)
- public Transaction setInt256(TransactOpts opts, BigInt i256) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBigInt(i256);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setInt256" , args);
- }
-
- // setInt256Array is a paid mutator transaction binding the contract method 0x1ff2e90c.
- //
- // Solidity: function setInt256Array(int256[2] i256_a) returns(int256[2])
- public Transaction setInt256Array(TransactOpts opts, BigInts i256_a) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBigInts(i256_a);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setInt256Array" , args);
- }
-
- // setInt256List is a paid mutator transaction binding the contract method 0x22b4da49.
- //
- // Solidity: function setInt256List(int256[] i256_l) returns(int256[])
- public Transaction setInt256List(TransactOpts opts, BigInts i256_l) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBigInts(i256_l);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setInt256List" , args);
- }
-
- // setInt32 is a paid mutator transaction binding the contract method 0x2d4e1848.
- //
- // Solidity: function setInt32(int32 i32) returns(int32)
- public Transaction setInt32(TransactOpts opts, int i32) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setInt32(i32);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setInt32" , args);
- }
-
- // setInt64 is a paid mutator transaction binding the contract method 0x4658e91d.
- //
- // Solidity: function setInt64(int64 i64) returns(int64)
- public Transaction setInt64(TransactOpts opts, long i64) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setInt64(i64);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setInt64" , args);
- }
-
- // setInt8 is a paid mutator transaction binding the contract method 0xcaf9aa96.
- //
- // Solidity: function setInt8(int8 i8) returns(int8)
- public Transaction setInt8(TransactOpts opts, byte i8) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setInt8(i8);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setInt8" , args);
- }
-
- // setString is a paid mutator transaction binding the contract method 0x0a8de7d6.
- //
- // Solidity: function setString(string s) returns(string)
- public Transaction setString(TransactOpts opts, String s) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setString(s);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setString" , args);
- }
-
- // setStringArray is a paid mutator transaction binding the contract method 0xda9f56a3.
- //
- // Solidity: function setStringArray(string[2] s_a) returns(string[2])
- public Transaction setStringArray(TransactOpts opts, Strings s_a) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setStrings(s_a);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setStringArray" , args);
- }
-
- // setStringList is a paid mutator transaction binding the contract method 0x04345454.
- //
- // Solidity: function setStringList(string[] s_l) returns(string[])
- public Transaction setStringList(TransactOpts opts, Strings s_l) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setStrings(s_l);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setStringList" , args);
- }
-
- // setUint16 is a paid mutator transaction binding the contract method 0x1a8f73ff.
- //
- // Solidity: function setUint16(uint16 u16) returns(uint16)
- public Transaction setUint16(TransactOpts opts, BigInt u16) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setUint16(u16);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setUint16" , args);
- }
-
- // setUint256 is a paid mutator transaction binding the contract method 0x60ff6c02.
- //
- // Solidity: function setUint256(uint256 u256) returns(uint256)
- public Transaction setUint256(TransactOpts opts, BigInt u256) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBigInt(u256);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setUint256" , args);
- }
-
- // setUint256Array is a paid mutator transaction binding the contract method 0x47122832.
- //
- // Solidity: function setUint256Array(uint256[2] u256_a) returns(uint256[2])
- public Transaction setUint256Array(TransactOpts opts, BigInts u256_a) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBigInts(u256_a);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setUint256Array" , args);
- }
-
- // setUint256List is a paid mutator transaction binding the contract method 0x8cb906c1.
- //
- // Solidity: function setUint256List(uint256[] u256_l) returns(uint256[])
- public Transaction setUint256List(TransactOpts opts, BigInts u256_l) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setBigInts(u256_l);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setUint256List" , args);
- }
-
- // setUint32 is a paid mutator transaction binding the contract method 0x1daf92c8.
- //
- // Solidity: function setUint32(uint32 u32) returns(uint32)
- public Transaction setUint32(TransactOpts opts, BigInt u32) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setUint32(u32);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setUint32" , args);
- }
-
- // setUint64 is a paid mutator transaction binding the contract method 0xcfd9f34b.
- //
- // Solidity: function setUint64(uint64 u64) returns(uint64)
- public Transaction setUint64(TransactOpts opts, BigInt u64) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setUint64(u64);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setUint64" , args);
- }
-
- // setUint8 is a paid mutator transaction binding the contract method 0x0e4a91fa.
- //
- // Solidity: function setUint8(uint8 u8) returns(uint8)
- public Transaction setUint8(TransactOpts opts, BigInt u8) throws Exception {
- Interfaces args = Gocore.newInterfaces(1);
- Interface arg0 = Gocore.newInterface();arg0.setUint8(u8);args.set(0,arg0);
-
- return this.Contract.transact(opts, "setUint8" , args);
- }
-
-}
-
-`,
- },
- }
- for i, c := range cases {
- binding, err := Bind([]string{c.name}, []string{c.abi}, []string{c.bytecode}, nil, "bindtest", LangJava, nil, nil)
- if err != nil {
- t.Fatalf("test %d: failed to generate binding: %v", i, err)
- }
- if binding != c.expected {
- t.Fatalf("test %d: generated binding mismatch, has %s, want %s", i, binding, c.expected)
- }
- }
-}
diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go
index 6d88d6845..9aafe8f6c 100644
--- a/accounts/abi/bind/template.go
+++ b/accounts/abi/bind/template.go
@@ -35,6 +35,8 @@ type tmplContract struct {
Constructor abi.Method // Contract constructor for deploy parametrization
Calls map[string]*tmplMethod // Contract calls that only read state data
Transacts map[string]*tmplMethod // Contract calls that write state data
+ Fallback *tmplMethod // Additional special fallback function
+ Receive *tmplMethod // Additional special receive function
Events map[string]*tmplEvent // Contract events accessors
Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs
Library bool // Indicator whether the contract is a library
@@ -351,6 +353,49 @@ var (
}
{{end}}
+ {{if .Fallback}}
+ // Fallback is a paid mutator transaction binding the contract fallback function.
+ //
+ // Solidity: {{formatmethod .Fallback.Original $structs}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) {
+ return _{{$contract.Type}}.contract.RawTransact(opts, calldata)
+ }
+ // Fallback is a paid mutator transaction binding the contract fallback function.
+ //
+ // Solidity: {{formatmethod .Fallback.Original $structs}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
+ }
+
+ // Fallback is a paid mutator transaction binding the contract fallback function.
+ //
+ // Solidity: {{formatmethod .Fallback.Original $structs}}
+ func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
+ }
+ {{end}}
+ {{if .Receive}}
+ // Receive is a paid mutator transaction binding the contract receive function.
+ //
+ // Solidity: {{formatmethod .Receive.Original $structs}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) {
+ return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
+ }
+ // Receive is a paid mutator transaction binding the contract receive function.
+ //
+ // Solidity: {{formatmethod .Receive.Original $structs}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
+ }
+
+ // Receive is a paid mutator transaction binding the contract receive function.
+ //
+ // Solidity: {{formatmethod .Receive.Original $structs}}
+ func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
+ }
+ {{end}}
+
{{range .Events}}
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
@@ -611,6 +656,22 @@ import java.util.*;
return this.Contract.transact(opts, "{{.Original.Name}}" , args);
}
{{end}}
-}
+ {{if .Fallback}}
+ // Fallback is a paid mutator transaction binding the contract fallback function.
+ //
+ // Solidity: {{formatmethod .Fallback.Original $structs}}
+ public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception {
+ return this.Contract.rawTransact(opts, calldata);
+ }
+ {{end}}
+ {{if .Receive}}
+ // Receive is a paid mutator transaction binding the contract receive function.
+ //
+ // Solidity: {{formatmethod .Receive.Original $structs}}
+ public Transaction Receive(TransactOpts opts) throws Exception {
+ return this.Contract.rawTransact(opts, null);
+ }
+ {{end}}
+ }
{{end}}
`
diff --git a/accounts/abi/event.go b/accounts/abi/event.go
index 6cee51796..53b071874 100644
--- a/accounts/abi/event.go
+++ b/accounts/abi/event.go
@@ -42,36 +42,60 @@ type Event struct {
RawName string
Anonymous bool
Inputs Arguments
+
+ str string
+ // Sig contains the string signature according to the ABI spec.
+ // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
+ // Please note that "int" is substitute for its canonical representation "int256"
+ Sig string
+ // ID returns the canonical representation of the event's signature used by the
+ // abi definition to identify event names and types.
+ ID common.Hash
}
-func (e Event) String() string {
- inputs := make([]string, len(e.Inputs))
- for i, input := range e.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
+// NewEvent creates a new Event.
+// It sanitizes the input arguments to remove unnamed arguments.
+// It also precomputes the id, signature and string representation
+// of the event.
+func NewEvent(name, rawName string, anonymous bool, inputs Arguments) Event {
+ // sanitize inputs to remove inputs without names
+ // and precompute string and sig representation.
+ names := make([]string, len(inputs))
+ types := make([]string, len(inputs))
+ for i, input := range inputs {
+ if input.Name == "" {
+ inputs[i] = Argument{
+ Name: fmt.Sprintf("arg%d", i),
+ Indexed: input.Indexed,
+ Type: input.Type,
+ }
+ } else {
+ inputs[i] = input
+ }
+ // string representation
+ names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
if input.Indexed {
- inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
+ names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
}
+ // sig representation
+ types[i] = input.Type.String()
}
- return fmt.Sprintf("event %v(%v)", e.RawName, strings.Join(inputs, ", "))
-}
-// Sig returns the event string signature according to the ABI spec.
-//
-// Example
-//
-// event foo(uint32 a, int b) = "foo(uint32,int256)"
-//
-// Please note that "int" is substitute for its canonical representation "int256"
-func (e Event) Sig() string {
- types := make([]string, len(e.Inputs))
- for i, input := range e.Inputs {
- types[i] = input.Type.String()
+ str := fmt.Sprintf("event %v(%v)", rawName, strings.Join(names, ", "))
+ sig := fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
+ id := common.BytesToHash(crypto.SHA3([]byte(sig)))
+
+ return Event{
+ Name: name,
+ RawName: rawName,
+ Anonymous: anonymous,
+ Inputs: inputs,
+ str: str,
+ Sig: sig,
+ ID: id,
}
- return fmt.Sprintf("%v(%v)", e.RawName, strings.Join(types, ","))
}
-// ID returns the canonical representation of the event's signature used by the
-// abi definition to identify event names and types.
-func (e Event) ID() common.Hash {
- return common.BytesToHash(crypto.SHA3([]byte(e.Sig())))
+func (e Event) String() string {
+ return e.str
}
diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go
index 9c0252ed0..a60598eaa 100644
--- a/accounts/abi/event_test.go
+++ b/accounts/abi/event_test.go
@@ -104,8 +104,8 @@ func TestEventId(t *testing.T) { //TODO: TEST
}
for name, event := range abi.Events {
- if event.ID() != test.expectations[name] {
- t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID())
+ if event.ID != test.expectations[name] {
+ t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID)
}
}
}
diff --git a/accounts/abi/method.go b/accounts/abi/method.go
index e915a6e91..241953cd4 100644
--- a/accounts/abi/method.go
+++ b/accounts/abi/method.go
@@ -23,6 +23,24 @@ import (
"github.com/core-coin/go-core/crypto"
)
+// FunctionType represents different types of functions a contract might have.
+type FunctionType int
+
+const (
+ // Constructor represents the constructor of the contract.
+ // The constructor function is called while deploying a contract.
+ Constructor FunctionType = iota
+ // Fallback represents the fallback function.
+ // This function is executed if no other function matches the given function
+ // signature and no receive function is specified.
+ Fallback
+ // Receive represents the receive function.
+ // This function is executed on plain Ether transfers.
+ Receive
+ // Function represents a normal function.
+ Function
+)
+
// Method represents a callable given a `Name` and whether the method is a constant.
// If the method is `Const` no transaction needs to be created for this
// particular Method call. It can easily be simulated using a local VM.
@@ -41,50 +59,110 @@ type Method struct {
// * foo(uint,uint)
// The method name of the first one will be resolved as foo while the second one
// will be resolved as foo0.
- Name string
- // RawName is the raw method name parsed from ABI.
- RawName string
- Const bool
+ Name string
+ RawName string // RawName is the raw method name parsed from ABI
+
+ // Type indicates whether the method is a
+ // special fallback introduced in solidity v0.6.0
+ Type FunctionType
+
+ // StateMutability indicates the mutability state of method,
+ // the default value is nonpayable. It can be empty if the abi
+ // is generated by legacy compiler.
+ StateMutability string
+
+ // Legacy indicators generated by compiler before v0.6.0
+ Constant bool
+ Payable bool
+
Inputs Arguments
Outputs Arguments
-}
-// Sig returns the methods string signature according to the ABI spec.
-//
-// Example
-//
-// function foo(uint32 a, int b) = "foo(uint32,int256)"
-//
-// Please note that "int" is substitute for its canonical representation "int256"
-func (method Method) Sig() string {
- types := make([]string, len(method.Inputs))
- for i, input := range method.Inputs {
- types[i] = input.Type.String()
- }
- return fmt.Sprintf("%v(%v)", method.RawName, strings.Join(types, ","))
+ str string
+ // Sig returns the methods string signature according to the ABI spec.
+ // e.g. function foo(uint32 a, int b) = "foo(uint32,int256)"
+ // Please note that "int" is substitute for its canonical representation "int256"
+ Sig string
+ // ID returns the canonical representation of the method's signature used by the
+ // abi definition to identify method names and types.
+ ID []byte
}
-func (method Method) String() string {
- inputs := make([]string, len(method.Inputs))
- for i, input := range method.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
+// NewMethod creates a new Method.
+// A method should always be created using NewMethod.
+// It also precomputes the sig representation and the string representation
+// of the method.
+func NewMethod(name string, rawName string, funType FunctionType, mutability string, isConst, isPayable bool, inputs Arguments, outputs Arguments) Method {
+ var (
+ types = make([]string, len(inputs))
+ inputNames = make([]string, len(inputs))
+ outputNames = make([]string, len(outputs))
+ )
+ for i, input := range inputs {
+ inputNames[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
+ types[i] = input.Type.String()
}
- outputs := make([]string, len(method.Outputs))
- for i, output := range method.Outputs {
- outputs[i] = output.Type.String()
+ for i, output := range outputs {
+ outputNames[i] = output.Type.String()
if len(output.Name) > 0 {
- outputs[i] += fmt.Sprintf(" %v", output.Name)
+ outputNames[i] += fmt.Sprintf(" %v", output.Name)
}
}
- constant := ""
- if method.Const {
- constant = "constant "
+ // calculate the signature and method id. Note only function
+ // has meaningful signature and id.
+ var (
+ sig string
+ id []byte
+ )
+ if funType == Function {
+ sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
+ id = crypto.SHA3([]byte(sig))[:4]
+ }
+ // Extract meaningful state mutability of solidity method.
+ // If it's default value, never print it.
+ state := mutability
+ if state == "nonpayable" {
+ state = ""
+ }
+ if state != "" {
+ state = state + " "
+ }
+ identity := fmt.Sprintf("function %v", rawName)
+ if funType == Fallback {
+ identity = "fallback"
+ } else if funType == Receive {
+ identity = "receive"
+ } else if funType == Constructor {
+ identity = "constructor"
+ }
+ str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
+
+ return Method{
+ Name: name,
+ RawName: rawName,
+ Type: funType,
+ StateMutability: mutability,
+ Constant: isConst,
+ Payable: isPayable,
+ Inputs: inputs,
+ Outputs: outputs,
+ str: str,
+ Sig: sig,
+ ID: id,
}
- return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
}
-// ID returns the canonical representation of the method's signature used by the
-// abi definition to identify method names and types.
-func (method Method) ID() []byte {
- return crypto.SHA3([]byte(method.Sig()))[:4]
+func (method Method) String() string {
+ return method.str
+}
+
+// IsConstant returns the indicator whether the method is read-only.
+func (method Method) IsConstant() bool {
+ return method.StateMutability == "view" || method.StateMutability == "pure" || method.Constant
+}
+
+// IsPayable returns the indicator whether the method can process
+// plain ether transfers.
+func (method Method) IsPayable() bool {
+ return method.StateMutability == "payable" || method.Payable
}
diff --git a/accounts/abi/method_test.go b/accounts/abi/method_test.go
index d9bc3609e..cb19bd187 100644
--- a/accounts/abi/method_test.go
+++ b/accounts/abi/method_test.go
@@ -23,13 +23,15 @@ import (
const methoddata = `
[
- {"type": "function", "name": "balance", "constant": true },
- {"type": "function", "name": "send", "constant": false, "inputs": [{ "name": "amount", "type": "uint256" }]},
- {"type": "function", "name": "transfer", "constant": false, "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]},
+ {"type": "function", "name": "balance", "stateMutability": "view"},
+ {"type": "function", "name": "send", "inputs": [{ "name": "amount", "type": "uint256" }]},
+ {"type": "function", "name": "transfer", "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]},
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple"}],"name":"tuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[]"}],"name":"tupleSlice","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
{"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5]"}],"name":"tupleArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
- {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}
+ {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},
+ {"stateMutability":"nonpayable","type":"fallback"},
+ {"stateMutability":"payable","type":"receive"}
]`
func TestMethodString(t *testing.T) {
@@ -39,7 +41,7 @@ func TestMethodString(t *testing.T) {
}{
{
method: "balance",
- expectation: "function balance() constant returns()",
+ expectation: "function balance() view returns()",
},
{
method: "send",
@@ -65,6 +67,14 @@ func TestMethodString(t *testing.T) {
method: "complexTuple",
expectation: "function complexTuple((uint256,uint256)[5][] a) returns()",
},
+ {
+ method: "fallback",
+ expectation: "fallback() returns()",
+ },
+ {
+ method: "receive",
+ expectation: "receive() payable returns()",
+ },
}
abi, err := JSON(strings.NewReader(methoddata))
@@ -73,7 +83,14 @@ func TestMethodString(t *testing.T) {
}
for _, test := range table {
- got := abi.Methods[test.method].String()
+ var got string
+ if test.method == "fallback" {
+ got = abi.Fallback.String()
+ } else if test.method == "receive" {
+ got = abi.Receive.String()
+ } else {
+ got = abi.Methods[test.method].String()
+ }
if got != test.expectation {
t.Errorf("expected string to be %s, got %s", test.expectation, got)
}
@@ -120,7 +137,7 @@ func TestMethodSig(t *testing.T) {
}
for _, test := range cases {
- got := abi.Methods[test.method].Sig()
+ got := abi.Methods[test.method].Sig
if got != test.expect {
t.Errorf("expected string to be %s, got %s", test.expect, got)
}
diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go
index 9b12e2f18..24acd00b9 100644
--- a/accounts/abi/pack_test.go
+++ b/accounts/abi/pack_test.go
@@ -634,7 +634,7 @@ func TestMethodPack(t *testing.T) {
t.Fatal(err)
}
- sig := abi.Methods["slice"].ID()
+ sig := abi.Methods["slice"].ID
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -648,7 +648,7 @@ func TestMethodPack(t *testing.T) {
}
var addrA, addrB = common.Address{1}, common.Address{2}
- sig = abi.Methods["sliceAddress"].ID()
+ sig = abi.Methods["sliceAddress"].ID
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
@@ -663,7 +663,7 @@ func TestMethodPack(t *testing.T) {
}
var addrC, addrD = common.Address{3}, common.Address{4}
- sig = abi.Methods["sliceMultiAddress"].ID()
+ sig = abi.Methods["sliceMultiAddress"].ID
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -681,7 +681,7 @@ func TestMethodPack(t *testing.T) {
t.Errorf("expected %x got %x", sig, packed)
}
- sig = abi.Methods["slice256"].ID()
+ sig = abi.Methods["slice256"].ID
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -695,7 +695,7 @@ func TestMethodPack(t *testing.T) {
}
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
- sig = abi.Methods["nestedArray"].ID()
+ sig = abi.Methods["nestedArray"].ID
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -712,7 +712,7 @@ func TestMethodPack(t *testing.T) {
t.Errorf("expected %x got %x", sig, packed)
}
- sig = abi.Methods["nestedArray2"].ID()
+ sig = abi.Methods["nestedArray2"].ID
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
@@ -728,7 +728,7 @@ func TestMethodPack(t *testing.T) {
t.Errorf("expected %x got %x", sig, packed)
}
- sig = abi.Methods["nestedSlice"].ID()
+ sig = abi.Methods["nestedSlice"].ID
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go
index 634311862..593d991b8 100644
--- a/accounts/abi/reflect.go
+++ b/accounts/abi/reflect.go
@@ -118,18 +118,16 @@ func requireAssignable(dst, src reflect.Value) error {
}
// requireUnpackKind verifies preconditions for unpacking `args` into `kind`
-func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
- args Arguments) error {
-
- switch k {
+func requireUnpackKind(v reflect.Value, minLength int, args Arguments) error {
+ switch v.Kind() {
case reflect.Struct:
case reflect.Slice, reflect.Array:
- if minLen := args.LengthNonIndexed(); v.Len() < minLen {
+ if v.Len() < minLength {
return fmt.Errorf("abi: insufficient number of elements in the list/array for unpack, want %d, got %d",
- minLen, v.Len())
+ minLength, v.Len())
}
default:
- return fmt.Errorf("abi: cannot unmarshal tuple into %v", t)
+ return fmt.Errorf("abi: cannot unmarshal tuple into %v", v.Type())
}
return nil
}
@@ -156,9 +154,8 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
continue
}
// skip fields that have no abi:"" tag.
- var ok bool
- var tagName string
- if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
+ tagName, ok := typ.Field(i).Tag.Lookup("abi")
+ if !ok {
continue
}
// check if tag is empty.
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index 7aea78786..d72ffc4ae 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -443,7 +443,7 @@ var unpackTests = []unpackTest{
func TestUnpack(t *testing.T) {
for i, test := range unpackTests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
- def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
+ def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def)
abi, err := JSON(strings.NewReader(def))
if err != nil {
t.Fatalf("invalid ABI definition %s: %v", def, err)
@@ -522,7 +522,7 @@ type methodMultiOutput struct {
func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOutput) {
const definition = `[
- { "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
+ { "name" : "multi", "type": "function", "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
var expected = methodMultiOutput{big.NewInt(1), "hello"}
abi, err := JSON(strings.NewReader(definition))
@@ -611,7 +611,7 @@ func TestMethodMultiReturn(t *testing.T) {
}
func TestMultiReturnWithArray(t *testing.T) {
- const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
+ const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
@@ -634,7 +634,7 @@ func TestMultiReturnWithArray(t *testing.T) {
}
func TestMultiReturnWithStringArray(t *testing.T) {
- const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
+ const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
@@ -668,7 +668,7 @@ func TestMultiReturnWithStringArray(t *testing.T) {
}
func TestMultiReturnWithStringSlice(t *testing.T) {
- const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
+ const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
@@ -704,7 +704,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
// values of nested static arrays count towards the size as well, and any element following
// after such nested array argument should be read with the correct offset,
// so that it does not read content from the previous array argument.
- const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]`
+ const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
@@ -741,15 +741,15 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
func TestUnmarshal(t *testing.T) {
const definition = `[
- { "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
- { "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] },
- { "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] },
- { "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] },
- { "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
- { "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] },
- { "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] },
- { "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
- { "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
+ { "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] },
+ { "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] },
+ { "name" : "bytes", "type": "function", "outputs": [ { "type": "bytes" } ] },
+ { "name" : "fixed", "type": "function", "outputs": [ { "type": "bytes32" } ] },
+ { "name" : "multi", "type": "function", "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
+ { "name" : "intArraySingle", "type": "function", "outputs": [ { "type": "uint256[3]" } ] },
+ { "name" : "addressSliceSingle", "type": "function", "outputs": [ { "type": "address[]" } ] },
+ { "name" : "addressSliceDouble", "type": "function", "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] },
+ { "name" : "mixedBytes", "type": "function", "stateMutability" : "view", "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
@@ -989,7 +989,7 @@ func TestUnmarshal(t *testing.T) {
}
func TestUnpackTuple(t *testing.T) {
- const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
+ const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
abi, err := JSON(strings.NewReader(simpleTuple))
if err != nil {
t.Fatal(err)
@@ -1018,7 +1018,7 @@ func TestUnpackTuple(t *testing.T) {
}
// Test nested tuple
- const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[
+ const nestedTuple = `[{"name":"tuple","type":"function","outputs":[
{"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
{"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
{"type":"uint256","name":"a"}
@@ -1140,7 +1140,7 @@ func TestOOMMaliciousInput(t *testing.T) {
},
}
for i, test := range oomTests {
- def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
+ def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def)
abi, err := JSON(strings.NewReader(def))
if err != nil {
t.Fatalf("invalid ABI definition %s: %v", def, err)
diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go
index d5f4f8b32..d90fcace7 100644
--- a/accounts/keystore/keystore_test.go
+++ b/accounts/keystore/keystore_test.go
@@ -130,7 +130,7 @@ func TestTimedUnlock(t *testing.T) {
}
// Signing with passphrase works
- if err = ks.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil {
+ if err = ks.TimedUnlock(a1, pass, 200*time.Millisecond); err != nil {
t.Fatal(err)
}
@@ -141,7 +141,7 @@ func TestTimedUnlock(t *testing.T) {
}
// Signing fails again after automatic locking
- time.Sleep(250 * time.Millisecond)
+ time.Sleep(500 * time.Millisecond)
_, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData)
if err != ErrLocked {
t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err)
diff --git a/build/ci.go b/build/ci.go
index 150090e6e..5383c7bf0 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-core library. If not, see .
+//go:build none
// +build none
/*
@@ -41,9 +42,6 @@ For all commands, -n prevents execution of external programs (dry run mode).
package main
import (
- "bufio"
- "bytes"
- "encoding/base64"
"flag"
"fmt"
"go/parser"
@@ -53,14 +51,11 @@ import (
"os"
"os/exec"
"path/filepath"
- "regexp"
"runtime"
"strings"
"time"
- "github.com/cespare/cp"
"github.com/core-coin/go-core/internal/build"
- "github.com/core-coin/go-core/params"
)
var (
@@ -80,63 +75,6 @@ var (
executablePath("rlpdump"),
executablePath("clef"),
}
-
- // A debian package is created for all executables listed here.
- debExecutables = []debExecutable{
- {
- BinaryName: "abigen",
- Description: "Source code generator to convert Core contract definitions into easy to use, compile-time type-safe Go packages.",
- },
- {
- BinaryName: "bootnode",
- Description: "Core bootnode.",
- },
- {
- BinaryName: "cvm",
- Description: "Developer utility version of the CVM (Core Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
- },
- {
- BinaryName: "gocore",
- Description: "Core CLI client.",
- },
- {
- BinaryName: "rlpdump",
- Description: "Developer utility tool that prints RLP structures.",
- },
- {
- BinaryName: "clef",
- Description: "Core account management tool.",
- },
- }
-
- // A debian package is created for all executables listed here.
- debCore = debPackage{
- Name: "go-core",
- Version: params.Version,
- Executables: debExecutables,
- }
-
- // Debian meta packages to build and push to Ubuntu PPA
- debPackages = []debPackage{
- debCore,
- }
-
- // Distros for which packages are created.
- // Note: vivid is unsupported because there is no golang-1.6 package for it.
- // Note: wily is unsupported because it was officially deprecated on Launchpad.
- // Note: yakkety is unsupported because it was officially deprecated on Launchpad.
- // Note: zesty is unsupported because it was officially deprecated on Launchpad.
- // Note: artful is unsupported because it was officially deprecated on Launchpad.
- // Note: cosmic is unsupported because it was officially deprecated on Launchpad.
- debDistroGoBoots = map[string]string{
- "xenial": "golang-go",
- "bionic": "golang-go",
- "focal": "golang-go",
- }
-
- debGoBootPaths = map[string]string{
- "golang-go": "/usr/local/go",
- }
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -164,16 +102,6 @@ func main() {
doTest(os.Args[2:])
case "lint":
doLint(os.Args[2:])
- case "archive":
- doArchive(os.Args[2:])
- case "debsrc":
- doDebianSource(os.Args[2:])
- case "nsis":
- doWindowsInstaller(os.Args[2:])
- case "aar":
- doAndroidArchive(os.Args[2:])
- case "xcode":
- doXCodeFramework(os.Args[2:])
case "xgo":
doXgo(os.Args[2:])
case "purge":
@@ -257,6 +185,7 @@ func doInstall(cmdline []string) {
func buildFlags(env build.Environment) (flags []string) {
var ld []string
if env.Commit != "" {
+ ld = append(ld, "-X", "main.gitTag="+env.Tag)
ld = append(ld, "-X", "main.gitCommit="+env.Commit)
ld = append(ld, "-X", "main.gitDate="+env.Date)
}
@@ -313,7 +242,7 @@ func doTest(cmdline []string) {
// Test a single package at a time. CI builders are slow
// and some tests run into timeouts under load.
gotest := goTool("test", buildFlags(env)...)
- gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
+ gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "20m")
if *coverage {
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
}
@@ -359,175 +288,6 @@ func downloadLinter(cachedir string) string {
return filepath.Join(cachedir, base, "golangci-lint")
}
-// Release Packaging
-func doArchive(cmdline []string) {
- var (
- arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging")
- atype = flag.String("type", "zip", "Type of archive to write (zip|tar)")
- signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`)
- upload = flag.String("upload", "", `Destination to upload the archives (usually "gocorestore/builds")`)
- ext string
- )
- flag.CommandLine.Parse(cmdline)
- switch *atype {
- case "zip":
- ext = ".zip"
- case "tar":
- ext = ".tar.gz"
- default:
- log.Fatal("unknown archive type: ", atype)
- }
-
- var (
- env = build.Env()
-
- basegocore = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
- gocore = "gocore-" + basegocore + ext
- alltools = "gocore-alltools-" + basegocore + ext
- )
- maybeSkipArchive(env)
- if err := build.WriteArchive(gocore, gocoreArchiveFiles); err != nil {
- log.Fatal(err)
- }
- if err := build.WriteArchive(alltools, allToolsArchiveFiles); err != nil {
- log.Fatal(err)
- }
- for _, archive := range []string{gocore, alltools} {
- if err := archiveUpload(archive, *upload, *signer); err != nil {
- log.Fatal(err)
- }
- }
-}
-
-func archiveBasename(arch string, archiveVersion string) string {
- platform := runtime.GOOS + "-" + arch
- if arch == "arm" {
- platform += os.Getenv("GOARM")
- }
- if arch == "android" {
- platform = "android-all"
- }
- if arch == "ios" {
- platform = "ios-all"
- }
- return platform + "-" + archiveVersion
-}
-
-func archiveUpload(archive string, blobstore string, signer string) error {
- // If signing was requested, generate the signature files
- if signer != "" {
- key := getenvBase64(signer)
- if err := build.PGPSignFile(archive, archive+".asc", string(key)); err != nil {
- return err
- }
- }
- // If uploading to Azure was requested, push the archive possibly with its signature
- if blobstore != "" {
- auth := build.AzureBlobstoreConfig{
- Account: strings.Split(blobstore, "/")[0],
- Token: os.Getenv("AZURE_BLOBSTORE_TOKEN"),
- Container: strings.SplitN(blobstore, "/", 2)[1],
- }
- if err := build.AzureBlobstoreUpload(archive, filepath.Base(archive), auth); err != nil {
- return err
- }
- if signer != "" {
- if err := build.AzureBlobstoreUpload(archive+".asc", filepath.Base(archive+".asc"), auth); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// skips archiving for some build configurations.
-func maybeSkipArchive(env build.Environment) {
- if env.IsPullRequest {
- log.Printf("skipping because this is a PR build")
- os.Exit(0)
- }
- if env.IsCronJob {
- log.Printf("skipping because this is a cron job")
- os.Exit(0)
- }
-}
-
-// Debian Packaging
-func doDebianSource(cmdline []string) {
- var (
- goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`)
- cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`)
- signer = flag.String("signer", "", `Signing key name, also used as package author`)
- upload = flag.String("upload", "", `Where to upload the source package (usually "core/core")`)
- sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "gocore-ci")`)
- workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
- now = time.Now()
- )
- flag.CommandLine.Parse(cmdline)
- *workdir = makeWorkdir(*workdir)
- env := build.Env()
- maybeSkipArchive(env)
-
- // Import the signing key.
- if key := getenvBase64("PPA_SIGNING_KEY"); len(key) > 0 {
- gpg := exec.Command("gpg", "--import")
- gpg.Stdin = bytes.NewReader(key)
- build.MustRun(gpg)
- }
-
- // Download and verify the Go source package.
- gobundle := downloadGoSources(*goversion, *cachedir)
-
- // Download all the dependencies needed to build the sources and run the ci script
- srcdepfetch := goTool("install", "-n", "./...")
- srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
- build.MustRun(srcdepfetch)
-
- cidepfetch := goTool("run", "./build/ci.go")
- cidepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
- cidepfetch.Run() // Command fails, don't care, we only need the deps to start it
-
- // Create Debian packages and upload them.
- for _, pkg := range debPackages {
- for distro, goboot := range debDistroGoBoots {
- // Prepare the debian package with the go-core sources.
- meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
- pkgdir := stageDebianSource(*workdir, meta)
-
- // Add Go source code
- if err := build.ExtractTarballArchive(gobundle, pkgdir); err != nil {
- log.Fatalf("Failed to extract Go sources: %v", err)
- }
- if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
- log.Fatalf("Failed to rename Go source folder: %v", err)
- }
- // Add all dependency modules in compressed form
- os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755)
- if err := cp.CopyAll(filepath.Join(pkgdir, ".mod", "cache", "download"), filepath.Join(*workdir, "modgopath", "pkg", "mod", "cache", "download")); err != nil {
- log.Fatalf("Failed to copy Go module dependencies: %v", err)
- }
- // Run the packaging and upload to the PPA
- debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz", "-nc")
- debuild.Dir = pkgdir
- build.MustRun(debuild)
-
- var (
- basename = fmt.Sprintf("%s_%s", meta.Name(), meta.VersionString())
- source = filepath.Join(*workdir, basename+".tar.xz")
- dsc = filepath.Join(*workdir, basename+".dsc")
- changes = filepath.Join(*workdir, basename+"_source.changes")
- buildinfo = filepath.Join(*workdir, basename+"_source.buildinfo")
- )
- if *signer != "" {
- build.MustRunCommand("debsign", changes)
- }
- if *upload != "" {
- ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes, buildinfo})
- }
- }
- }
-}
-
func downloadGoSources(version string, cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt")
file := fmt.Sprintf("go%s.src.tar.gz", version)
@@ -539,484 +299,7 @@ func downloadGoSources(version string, cachedir string) string {
return dst
}
-func ppaUpload(workdir, ppa, sshUser string, files []string) {
- p := strings.Split(ppa, "/")
- if len(p) != 2 {
- log.Fatal("-upload PPA name must contain single /")
- }
- if sshUser == "" {
- sshUser = p[0]
- }
- incomingDir := fmt.Sprintf("~%s/ubuntu/%s", p[0], p[1])
- // Create the SSH identity file if it doesn't exist.
- var idfile string
- if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
- idfile = filepath.Join(workdir, "sshkey")
- if _, err := os.Stat(idfile); os.IsNotExist(err) {
- ioutil.WriteFile(idfile, sshkey, 0600)
- }
- }
- // Upload
- dest := sshUser + "@ppa.launchpad.net"
- if err := build.UploadSFTP(idfile, dest, incomingDir, files); err != nil {
- log.Fatal(err)
- }
-}
-
-func getenvBase64(variable string) []byte {
- dec, err := base64.StdEncoding.DecodeString(os.Getenv(variable))
- if err != nil {
- log.Fatal("invalid base64 " + variable)
- }
- return []byte(dec)
-}
-
-func makeWorkdir(wdflag string) string {
- var err error
- if wdflag != "" {
- err = os.MkdirAll(wdflag, 0744)
- } else {
- wdflag, err = ioutil.TempDir("", "gocore-build-")
- }
- if err != nil {
- log.Fatal(err)
- }
- return wdflag
-}
-
-func isUnstableBuild(env build.Environment) bool {
- if env.Tag != "" {
- return false
- }
- return true
-}
-
-type debPackage struct {
- Name string // the name of the Debian package to produce, e.g. "core"
- Version string // the clean version of the debPackage, e.g. 1.8.12, without any metadata
- Executables []debExecutable // executables to be included in the package
-}
-
-type debMetadata struct {
- Env build.Environment
- GoBootPackage string
- GoBootPath string
-
- PackageName string
-
- // go-core version being built. Note that this
- // is not the debian package version. The package version
- // is constructed by VersionString.
- Version string
-
- Author string // "name ", also selects signing key
- Distro, Time string
- Executables []debExecutable
-}
-
-type debExecutable struct {
- PackageName string
- BinaryName string
- Description string
-}
-
-// Package returns the name of the package if present, or
-// fallbacks to BinaryName
-func (d debExecutable) Package() string {
- if d.PackageName != "" {
- return d.PackageName
- }
- return d.BinaryName
-}
-
-func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
- if author == "" {
- // No signing key, use default author.
- author = "Core Builds "
- }
- return debMetadata{
- GoBootPackage: goboot,
- GoBootPath: debGoBootPaths[goboot],
- PackageName: name,
- Env: env,
- Author: author,
- Distro: distro,
- Version: version,
- Time: t.Format(time.RFC1123Z),
- Executables: exes,
- }
-}
-
-// Name returns the name of the metapackage that depends
-// on all executable packages.
-func (meta debMetadata) Name() string {
- if isUnstableBuild(meta.Env) {
- return meta.PackageName + "-unstable"
- }
- return meta.PackageName
-}
-
-// VersionString returns the debian version of the packages.
-func (meta debMetadata) VersionString() string {
- vsn := meta.Version
- if meta.Env.Buildnum != "" {
- vsn += "+build" + meta.Env.Buildnum
- }
- if meta.Distro != "" {
- vsn += "+" + meta.Distro
- }
- return vsn
-}
-
-// ExeList returns the list of all executable packages.
-func (meta debMetadata) ExeList() string {
- names := make([]string, len(meta.Executables))
- for i, e := range meta.Executables {
- names[i] = meta.ExeName(e)
- }
- return strings.Join(names, ", ")
-}
-
-// ExeName returns the package name of an executable package.
-func (meta debMetadata) ExeName(exe debExecutable) string {
- if isUnstableBuild(meta.Env) {
- return exe.Package() + "-unstable"
- }
- return exe.Package()
-}
-
-// ExeConflicts returns the content of the Conflicts field
-// for executable packages.
-func (meta debMetadata) ExeConflicts(exe debExecutable) string {
- if isUnstableBuild(meta.Env) {
- // Set up the conflicts list so that the *-unstable packages
- // cannot be installed alongside the regular version.
- //
- // https://www.debian.org/doc/debian-policy/ch-relationships.html
- // is very explicit about Conflicts: and says that Breaks: should
- // be preferred and the conflicting files should be handled via
- // alternates. We might do this eventually but using a conflict is
- // easier now.
- return "core, " + exe.Package()
- }
- return ""
-}
-
-func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
- pkg := meta.Name() + "-" + meta.VersionString()
- pkgdir = filepath.Join(tmpdir, pkg)
- if err := os.Mkdir(pkgdir, 0755); err != nil {
- log.Fatal(err)
- }
- // Copy the source code.
- build.MustRunCommand("git", "checkout-index", "-a", "--prefix", pkgdir+string(filepath.Separator))
-
- // Put the debian build files in place.
- debian := filepath.Join(pkgdir, "debian")
- build.Render("build/deb/"+meta.PackageName+"/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
- build.Render("build/deb/"+meta.PackageName+"/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
- build.Render("build/deb/"+meta.PackageName+"/deb.control", filepath.Join(debian, "control"), 0644, meta)
- build.Render("build/deb/"+meta.PackageName+"/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
- build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta)
- build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta)
- for _, exe := range meta.Executables {
- install := filepath.Join(debian, meta.ExeName(exe)+".install")
- docs := filepath.Join(debian, meta.ExeName(exe)+".docs")
- build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
- build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
- }
- return pkgdir
-}
-
-// Windows installer
-func doWindowsInstaller(cmdline []string) {
- // Parse the flags and make skip installer generation on PRs
- var (
- arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging")
- signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`)
- upload = flag.String("upload", "", `Destination to upload the archives (usually "gocorestore/builds")`)
- workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
- )
- flag.CommandLine.Parse(cmdline)
- *workdir = makeWorkdir(*workdir)
- env := build.Env()
- maybeSkipArchive(env)
-
- // Aggregate binaries that are included in the installer
- var (
- devTools []string
- allTools []string
- gocoreTool string
- )
- for _, file := range allToolsArchiveFiles {
- if file == "COPYING" { // license, copied later
- continue
- }
- allTools = append(allTools, filepath.Base(file))
- if filepath.Base(file) == "gocore.exe" {
- gocoreTool = file
- } else {
- devTools = append(devTools, file)
- }
- }
-
- // Render NSIS scripts: Installer NSIS contains two installer sections,
- // first section contains the gocore binary, second section holds the dev tools.
- templateData := map[string]interface{}{
- "License": "COPYING",
- "Gocore": gocoreTool,
- "DevTools": devTools,
- }
- build.Render("build/nsis.gocore.nsi", filepath.Join(*workdir, "gocore.nsi"), 0644, nil)
- build.Render("build/nsis.install.nsh", filepath.Join(*workdir, "install.nsh"), 0644, templateData)
- build.Render("build/nsis.uninstall.nsh", filepath.Join(*workdir, "uninstall.nsh"), 0644, allTools)
- build.Render("build/nsis.pathupdate.nsh", filepath.Join(*workdir, "PathUpdate.nsh"), 0644, nil)
- build.Render("build/nsis.envvarupdate.nsh", filepath.Join(*workdir, "EnvVarUpdate.nsh"), 0644, nil)
- if err := cp.CopyFile(filepath.Join(*workdir, "SimpleFC.dll"), "build/nsis.simplefc.dll"); err != nil {
- log.Fatal("Failed to copy SimpleFC.dll: %v", err)
- }
- if err := cp.CopyFile(filepath.Join(*workdir, "COPYING"), "COPYING"); err != nil {
- log.Fatal("Failed to copy copyright note: %v", err)
- }
- // Build the installer. This assumes that all the needed files have been previously
- // built (don't mix building and packaging to keep cross compilation complexity to a
- // minimum).
- version := strings.Split(params.Version, ".")
- if env.Commit != "" {
- version[2] += "-" + env.Commit[:8]
- }
- installer, _ := filepath.Abs("gocore-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
- build.MustRunCommand("makensis.exe",
- "/DOUTPUTFILE="+installer,
- "/DMAJORVERSION="+version[0],
- "/DMINORVERSION="+version[1],
- "/DBUILDVERSION="+version[2],
- "/DARCH="+*arch,
- filepath.Join(*workdir, "gocore.nsi"),
- )
- // Sign and publish installer.
- if err := archiveUpload(installer, *upload, *signer); err != nil {
- log.Fatal(err)
- }
-}
-
-// Android archives
-
-func doAndroidArchive(cmdline []string) {
- var (
- local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
- signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`)
- deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`)
- upload = flag.String("upload", "", `Destination to upload the archive (usually "gocorestore/builds")`)
- )
- flag.CommandLine.Parse(cmdline)
- env := build.Env()
-
- // Sanity check that the SDK and NDK are installed and set
- if os.Getenv("ANDROID_HOME") == "" {
- log.Fatal("Please ensure ANDROID_HOME points to your Android SDK")
- }
- // Build the Android archive and Maven resources
- build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
- build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "cc.coreblockchain", "-v", "github.com/core-coin/go-core/mobile"))
-
- if *local {
- // If we're building locally, copy bundle to build dir and skip Maven
- os.Rename("gocore.aar", filepath.Join(GOBIN, "gocore.aar"))
- return
- }
- meta := newMavenMetadata(env)
- build.Render("build/mvn.pom", meta.Package+".pom", 0755, meta)
-
- // Skip Maven deploy and Azure upload for PR builds
- maybeSkipArchive(env)
-
- // Sign and upload the archive to Azure
- archive := "gocore-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
- os.Rename("gocore.aar", archive)
-
- if err := archiveUpload(archive, *upload, *signer); err != nil {
- log.Fatal(err)
- }
- // Sign and upload all the artifacts to Maven Central
- os.Rename(archive, meta.Package+".aar")
- if *signer != "" && *deploy != "" {
- // Import the signing key into the local GPG instance
- key := getenvBase64(*signer)
- gpg := exec.Command("gpg", "--import")
- gpg.Stdin = bytes.NewReader(key)
- build.MustRun(gpg)
- keyID, err := build.PGPKeyID(string(key))
- if err != nil {
- log.Fatal(err)
- }
- // Upload the artifacts to Sonatype and/or Maven Central
- repo := *deploy + "/service/local/staging/deploy/maven2"
- if meta.Develop {
- repo = *deploy + "/content/repositories/snapshots"
- }
- build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X",
- "-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh",
- "-Dgpg.keyname="+keyID,
- "-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar")
- }
-}
-
-func gomobileTool(subcmd string, args ...string) *exec.Cmd {
- cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
- cmd.Args = append(cmd.Args, args...)
- cmd.Env = []string{
- "PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
- }
- for _, e := range os.Environ() {
- if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") {
- continue
- }
- cmd.Env = append(cmd.Env, e)
- }
- cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
- return cmd
-}
-
-type mavenMetadata struct {
- Version string
- Package string
- Develop bool
- Contributors []mavenContributor
-}
-
-type mavenContributor struct {
- Name string
- Email string
-}
-
-func newMavenMetadata(env build.Environment) mavenMetadata {
- // Collect the list of authors from the repo root
- contribs := []mavenContributor{}
- if authors, err := os.Open("AUTHORS"); err == nil {
- defer authors.Close()
-
- scanner := bufio.NewScanner(authors)
- for scanner.Scan() {
- // Skip any whitespace from the authors list
- line := strings.TrimSpace(scanner.Text())
- if line == "" || line[0] == '#' {
- continue
- }
- // Split the author and insert as a contributor
- re := regexp.MustCompile("([^<]+) <(.+)>")
- parts := re.FindStringSubmatch(line)
- if len(parts) == 3 {
- contribs = append(contribs, mavenContributor{Name: parts[1], Email: parts[2]})
- }
- }
- }
- // Render the version and package strings
- version := params.Version
- if isUnstableBuild(env) {
- version += "-SNAPSHOT"
- }
- return mavenMetadata{
- Version: version,
- Package: "gocore-" + version,
- Develop: isUnstableBuild(env),
- Contributors: contribs,
- }
-}
-
-// XCode frameworks
-
-func doXCodeFramework(cmdline []string) {
- var (
- local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
- signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`)
- deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`)
- upload = flag.String("upload", "", `Destination to upload the archives (usually "gocorestore/builds")`)
- )
- flag.CommandLine.Parse(cmdline)
- env := build.Env()
-
- // Build the iOS XCode framework
- build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
- build.MustRun(gomobileTool("init"))
- bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/core-coin/go-core/mobile")
-
- if *local {
- // If we're building locally, use the build folder and stop afterwards
- bind.Dir = GOBIN
- build.MustRun(bind)
- return
- }
- archive := "gocore-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
- if err := os.Mkdir(archive, os.ModePerm); err != nil {
- log.Fatal(err)
- }
- bind.Dir, _ = filepath.Abs(archive)
- build.MustRun(bind)
- build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive)
-
- // Skip CocoaPods deploy and Azure upload for PR builds
- maybeSkipArchive(env)
-
- // Sign and upload the framework to Azure
- if err := archiveUpload(archive+".tar.gz", *upload, *signer); err != nil {
- log.Fatal(err)
- }
- // Prepare and upload a PodSpec to CocoaPods
- if *deploy != "" {
- meta := newPodMetadata(env, archive)
- build.Render("build/pod.podspec", "Gocore.podspec", 0755, meta)
- build.MustRunCommand("pod", *deploy, "push", "Gocore.podspec", "--allow-warnings", "--verbose")
- }
-}
-
-type podMetadata struct {
- Version string
- Commit string
- Archive string
- Contributors []podContributor
-}
-
-type podContributor struct {
- Name string
- Email string
-}
-
-func newPodMetadata(env build.Environment, archive string) podMetadata {
- // Collect the list of authors from the repo root
- contribs := []podContributor{}
- if authors, err := os.Open("AUTHORS"); err == nil {
- defer authors.Close()
-
- scanner := bufio.NewScanner(authors)
- for scanner.Scan() {
- // Skip any whitespace from the authors list
- line := strings.TrimSpace(scanner.Text())
- if line == "" || line[0] == '#' {
- continue
- }
- // Split the author and insert as a contributor
- re := regexp.MustCompile("([^<]+) <(.+)>")
- parts := re.FindStringSubmatch(line)
- if len(parts) == 3 {
- contribs = append(contribs, podContributor{Name: parts[1], Email: parts[2]})
- }
- }
- }
- version := params.Version
- if isUnstableBuild(env) {
- version += "-unstable." + env.Buildnum
- }
- return podMetadata{
- Archive: archive,
- Version: version,
- Commit: env.Commit,
- Contributors: contribs,
- }
-}
-
// Cross compilation
-
func doXgo(cmdline []string) {
var (
alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`)
diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go
index ce5c702a6..8dfa539db 100644
--- a/cmd/abigen/main.go
+++ b/cmd/abigen/main.go
@@ -36,6 +36,7 @@ import (
var (
// Git SHA1 commit hash of the release (set via linker flags)
+ gitTag = ""
gitCommit = ""
gitDate = ""
@@ -100,7 +101,7 @@ var (
)
func init() {
- app = utils.NewApp(gitCommit, gitDate, "core checkpoint helper tool")
+ app = utils.NewApp(gitTag, gitCommit, gitDate, "core checkpoint helper tool")
app.Flags = []cli.Flag{
abiFlag,
binFlag,
diff --git a/cmd/checkpoint-admin/main.go b/cmd/checkpoint-admin/main.go
index a1b76ba50..056619b53 100644
--- a/cmd/checkpoint-admin/main.go
+++ b/cmd/checkpoint-admin/main.go
@@ -30,6 +30,7 @@ import (
var (
// Git SHA1 commit hash of the release (set via linker flags)
+ gitTag = ""
gitCommit = ""
gitDate = ""
)
@@ -37,7 +38,7 @@ var (
var app *cli.App
func init() {
- app = utils.NewApp(gitCommit, gitDate, "core checkpoint helper tool")
+ app = utils.NewApp(gitTag, gitCommit, gitDate, "core checkpoint helper tool")
app.Commands = []cli.Command{
commandStatus,
commandDeploy,
diff --git a/cmd/clef/main.go b/cmd/clef/main.go
index bf44c4b1e..00d135751 100644
--- a/cmd/clef/main.go
+++ b/cmd/clef/main.go
@@ -595,10 +595,16 @@ func signer(c *cli.Context) error {
if c.GlobalBool(utils.HTTPEnabledFlag.Name) {
vhosts := splitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name))
cors := splitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name))
+ srv := rpc.NewServer()
+ err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false)
+ if err != nil {
+ utils.Fatalf("Could not register API: %w", err)
+ }
+ handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
// start http server
- httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPEnabledFlag.Name), c.Int(rpcPortFlag.Name))
- listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
+ httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
+ listener, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
diff --git a/cmd/cvm/README.md b/cmd/cvm/README.md
new file mode 100644
index 000000000..50d044528
--- /dev/null
+++ b/cmd/cvm/README.md
@@ -0,0 +1,263 @@
+## CVM state transition tool
+
+The `cvm t8n` tool is a stateless state transition utility. It is a utility
+which can
+
+1. Take a prestate, including
+- Accounts,
+- Block context information,
+- Previous blockshashes (*optional)
+2. Apply a set of transactions,
+3. Apply a mining-reward (*optional),
+4. And generate a post-state, including
+- State root, transaction root, receipt root,
+- Information about rejected transactions,
+- Optionally: a full or partial post-state dump
+
+## Specification
+
+The idea is to specify the behaviour of this binary very _strict_, so that other
+node implementors can build replicas based on their own state-machines, and the
+state generators can swap between a `gocore`-based implementation.
+
+### Command line params
+
+Command line params that has to be supported are
+```
+ --trace Output full trace logs to files .jsonl
+ --trace.nomemory Disable full memory dump in traces
+ --trace.nostack Disable stack output in traces
+ --output.alloc alloc Determines where to put the alloc of the post-state.
+ `stdout` - into the stdout output
+ `stderr` - into the stderr output
+ --output.result result Determines where to put the result (stateroot, txroot etc) of the post-state.
+ `stdout` - into the stdout output
+ `stderr` - into the stderr output
+ --state.fork value Name of ruleset to use.
+ --state.chainid value ChainID to use (default: 1)
+ --state.reward value Mining reward. Set to -1 to disable (default: 0)
+```
+
+### Error codes and output
+
+All logging should happen against the `stderr`.
+There are a few (not many) errors that can occur, those are defined below.
+
+#### CVM-based errors (`2` to `9`)
+
+- Other CVM error. Exit code `2`
+- Failed configuration: when a non-supported or invalid fork was specified. Exit code `3`.
+- Block history is not supplied, but needed for a `BLOCKHASH` operation. If `BLOCKHASH`
+ is invoked targeting a block which history has not been provided for, the program will
+ exit with code `4`.
+
+#### IO errors (`10`-`20`)
+
+- Invalid input json: the supplied data could not be marshalled.
+ The program will exit with code `10`
+- IO problems: failure to load or save files, the program will exit with code `11`
+
+## Examples
+### Basic usage
+
+Invoking it with the provided example files
+```
+./cvm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json
+```
+Two resulting files:
+
+`alloc.json`:
+```json
+{
+ "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
+ "balance": "0xfeed1a9d",
+ "nonce": "0x1"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x5ffd4878be161d74",
+ "nonce": "0xac"
+ },
+ "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0xa410"
+ }
+}
+```
+`result.json`:
+```json
+{
+ "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
+ "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
+ "receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [
+ {
+ "root": "0x",
+ "status": "0x1",
+ "cumulativeEnergyUsed": "0x5208",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "logs": null,
+ "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
+ "contractAddress": "0x0000000000000000000000000000000000000000",
+ "energyUsed": "0x5208",
+ "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionIndex": "0x0"
+ }
+ ],
+ "rejected": [
+ 1
+ ]
+}
+```
+
+We can make them spit out the data to e.g. `stdout` like this:
+```
+./cvm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout
+```
+Output:
+```json
+{
+ "alloc": {
+ "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
+ "balance": "0xfeed1a9d",
+ "nonce": "0x1"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x5ffd4878be161d74",
+ "nonce": "0xac"
+ },
+ "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0xa410"
+ }
+ },
+ "result": {
+ "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
+ "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
+ "receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [
+ {
+ "root": "0x",
+ "status": "0x1",
+ "cumulativeEnergyUsed": "0x5208",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "logs": null,
+ "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
+ "contractAddress": "0x0000000000000000000000000000000000000000",
+ "energyUsed": "0x5208",
+ "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionIndex": "0x0"
+ }
+ ],
+ "rejected": [
+ 1
+ ]
+ }
+}
+```
+
+## About Ommers
+
+Mining rewards and ommer rewards might need to be added. This is how those are applied:
+
+- `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`.
+- For each ommer (mined by `0xbb`), with blocknumber `N-delta`
+ - (where `delta` is the difference between the current block and the ommer)
+ - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward`
+ - The account `0xaa` (block miner) is awarded `block_reward / 32`
+
+To make `state_t8n` apply these, the following inputs are required:
+
+- `state.reward`
+ - For cryptore, it is `5000000000000000000` `ore`,
+ - If this is not defined, mining rewards are not applied,
+ - A value of `0` is valid, and causes accounts to be 'touched'.
+- For each ommer, the tool needs to be given an `address` and a `delta`. This
+ is done via the `env`.
+
+Note: the tool does not verify that e.g. the normal uncle rules apply,
+and allows e.g two uncles at the same height, or the uncle-distance. This means that
+the tool allows for negative uncle reward (distance > 8)
+
+Example:
+`./testdata/5/env.json`:
+```json
+{
+ "currentCoinbase": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "currentDifficulty": "0x20000",
+ "currentEnergyLimit": "0x750a163df65e8a",
+ "currentNumber": "1",
+ "currentTimestamp": "1000",
+ "ommers": [
+ {"delta": 1, "address": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" },
+ {"delta": 2, "address": "0xcccccccccccccccccccccccccccccccccccccccc" }
+ ]
+}
+```
+When applying this, using a reward of `0x08`
+Output:
+```json
+{
+ "alloc": {
+ "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {
+ "balance": "0x88"
+ },
+ "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {
+ "balance": "0x70"
+ },
+ "0xcccccccccccccccccccccccccccccccccccccccc": {
+ "balance": "0x60"
+ }
+ }
+}
+```
+### Future EIPS
+
+It is also possible to experiment with future сips that are not yet defined in a hard fork.
+Example, putting СIP-1344 into Frontier:
+```
+./cvm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json
+```
+
+### Block history
+
+The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`.
+If a required blockhash is not provided, the exit code should be `4`:
+Example where blockhashes are provided:
+```
+./cvm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace
+```
+```
+cat trace-0.jsonl | grep BLOCKHASH -C2
+```
+```
+{"pc":0,"op":96,"energy":"0x5f58ef8","energyCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"depth":1,"refund":0,"opName":"PUSH1","error":""}
+{"pc":2,"op":64,"energy":"0x5f58ef5","energyCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"depth":1,"refund":0,"opName":"BLOCKHASH","error":""}
+{"pc":3,"op":0,"energy":"0x5f58ee1","energyCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"depth":1,"refund":0,"opName":"STOP","error":""}
+{"output":"","energyUsed":"0x17","time":155861}
+```
+
+In this example, the caller has not provided the required blockhash:
+```
+./cvm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace
+```
+```
+ERROR(4): getHash(3) invoked, blockhash for that block not provided
+```
+Error code: 4
+### Chaining
+
+Another thing that can be done, is to chain invocations:
+```
+./cvm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./cvm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json
+INFO [06-29|11:52:04.934] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
+INFO [06-29|11:52:04.936] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
+INFO [06-29|11:52:04.936] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low"
+```
+What happened here, is that we first applied two identical transactions, so the second one was rejected.
+Then, taking the poststate alloc as the input for the next state, we tried again to include
+the same two transactions: this time, both failed due to too low nonce.
+
+In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the
+actual blocknumber (exposed to the CVM) would not increase.
\ No newline at end of file
diff --git a/cmd/cvm/main.go b/cmd/cvm/main.go
index 19cc91397..7248af61d 100644
--- a/cmd/cvm/main.go
+++ b/cmd/cvm/main.go
@@ -19,6 +19,7 @@ package main
import (
"fmt"
+ "github.com/core-coin/go-core/cmd/cvm/t8ntool"
"math/big"
"os"
@@ -26,11 +27,12 @@ import (
"gopkg.in/urfave/cli.v1"
)
+var gitTag = ""
var gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags)
var gitDate = ""
var (
- app = utils.NewApp(gitCommit, gitDate, "the cvm command line interface")
+ app = utils.NewApp(gitTag, gitCommit, gitDate, "the cvm command line interface")
DebugFlag = cli.BoolFlag{
Name: "debug",
@@ -119,6 +121,14 @@ var (
Name: "nostack",
Usage: "disable stack output",
}
+ DisableStorageFlag = cli.BoolFlag{
+ Name: "nostorage",
+ Usage: "disable storage output",
+ }
+ DisableReturnDataFlag = cli.BoolFlag{
+ Name: "noreturndata",
+ Usage: "disable return data output",
+ }
CVMInterpreterFlag = cli.StringFlag{
Name: "vm.cvm",
Usage: "External CVM configuration (default = built-in interpreter)",
@@ -126,6 +136,27 @@ var (
}
)
+var stateTransitionCommand = cli.Command{
+ Name: "transition",
+ Aliases: []string{"t8n"},
+ Usage: "executes a full state transition",
+ Action: t8ntool.Main,
+ Flags: []cli.Flag{
+ t8ntool.TraceFlag,
+ t8ntool.TraceDisableMemoryFlag,
+ t8ntool.TraceDisableStackFlag,
+ t8ntool.TraceDisableReturnDataFlag,
+ t8ntool.OutputAllocFlag,
+ t8ntool.OutputResultFlag,
+ t8ntool.InputAllocFlag,
+ t8ntool.InputEnvFlag,
+ t8ntool.InputTxsFlag,
+ t8ntool.NetworkIDFlag,
+ t8ntool.RewardFlag,
+ t8ntool.VerbosityFlag,
+ },
+}
+
func init() {
app.Flags = []cli.Flag{
BenchFlag,
@@ -149,6 +180,8 @@ func init() {
ReceiverFlag,
DisableMemoryFlag,
DisableStackFlag,
+ DisableStorageFlag,
+ DisableReturnDataFlag,
CVMInterpreterFlag,
}
app.Commands = []cli.Command{
@@ -156,13 +189,18 @@ func init() {
disasmCommand,
runCommand,
stateTestCommand,
+ stateTransitionCommand,
}
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
func main() {
if err := app.Run(os.Args); err != nil {
+ code := 1
+ if ec, ok := err.(*t8ntool.NumberedError); ok {
+ code = ec.Code()
+ }
fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
+ os.Exit(code)
}
}
diff --git a/cmd/cvm/poststate.json b/cmd/cvm/poststate.json
new file mode 100644
index 000000000..f1ce9e294
--- /dev/null
+++ b/cmd/cvm/poststate.json
@@ -0,0 +1,23 @@
+{
+ "root": "f4157bb27bcb1d1a63001434a249a80948f2e9fe1f53d551244c1dae826b5b23",
+ "accounts": {
+ "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
+ "balance": "4276951709",
+ "nonce": 1,
+ "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "6916764286133345652",
+ "nonce": 172,
+ "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
+ },
+ "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "42500",
+ "nonce": 0,
+ "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmd/cvm/runner.go b/cmd/cvm/runner.go
index 6b560082e..c5675b448 100644
--- a/cmd/cvm/runner.go
+++ b/cmd/cvm/runner.go
@@ -108,9 +108,11 @@ func runCmd(ctx *cli.Context) error {
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &vm.LogConfig{
- DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
- DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
- Debug: ctx.GlobalBool(DebugFlag.Name),
+ DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
+ DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
+ DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
+ DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
+ Debug: ctx.GlobalBool(DebugFlag.Name),
}
var (
diff --git a/cmd/cvm/staterunner.go b/cmd/cvm/staterunner.go
index 369533017..ad3424432 100644
--- a/cmd/cvm/staterunner.go
+++ b/cmd/cvm/staterunner.go
@@ -59,8 +59,10 @@ func stateTestCmd(ctx *cli.Context) error {
// Configure the CVM logger
config := &vm.LogConfig{
- DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
- DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
+ DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
+ DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
+ DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
+ DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
}
var (
tracer vm.Tracer
diff --git a/cmd/cvm/t8ntool/execution.go b/cmd/cvm/t8ntool/execution.go
new file mode 100644
index 000000000..acfb1032d
--- /dev/null
+++ b/cmd/cvm/t8ntool/execution.go
@@ -0,0 +1,243 @@
+// Copyright 2022 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package t8ntool
+
+import (
+ "fmt"
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/common/math"
+ "github.com/core-coin/go-core/core"
+ "github.com/core-coin/go-core/core/rawdb"
+ "github.com/core-coin/go-core/core/state"
+ "github.com/core-coin/go-core/core/types"
+ "github.com/core-coin/go-core/core/vm"
+ "github.com/core-coin/go-core/crypto"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/params"
+ "github.com/core-coin/go-core/rlp"
+ "github.com/core-coin/go-core/trie"
+ "github.com/core-coin/go-core/xcbdb"
+ "golang.org/x/crypto/sha3"
+ "math/big"
+ "os"
+)
+
+type Prestate struct {
+ Env stEnv `json:"env"`
+ Pre core.GenesisAlloc `json:"pre"`
+}
+
+// ExecutionResult contains the execution status after running a state test, any
+// error that might have occurred and a dump of the final state if requested.
+type ExecutionResult struct {
+ StateRoot common.Hash `json:"stateRoot"`
+ TxRoot common.Hash `json:"txRoot"`
+ ReceiptRoot common.Hash `json:"receiptRoot"`
+ LogsHash common.Hash `json:"logsHash"`
+ Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
+ Receipts types.Receipts `json:"receipts"`
+ Rejected []int `json:"rejected,omitempty"`
+}
+
+type ommer struct {
+ Delta uint64 `json:"delta"`
+ Address common.Address `json:"address"`
+}
+
+//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
+type stEnv struct {
+ Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"`
+ EnergyLimit uint64 `json:"currentEnergyLimit" gencodec:"required"`
+ Number uint64 `json:"currentNumber" gencodec:"required"`
+ Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+}
+
+type stEnvMarshaling struct {
+ Coinbase common.UnprefixedAddress
+ Difficulty *math.HexOrDecimal256
+ EnergyLimit math.HexOrDecimal64
+ Number math.HexOrDecimal64
+ Timestamp math.HexOrDecimal64
+}
+
+// Apply applies a set of transactions to a pre-state
+func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
+ txs types.Transactions, miningReward int64,
+ getTracerFn func(txIndex int) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) {
+
+ // Capture errors for BLOCKHASH operation, if we haven't been supplied the
+ // required blockhashes
+ var hashError error
+ getHash := func(num uint64) common.Hash {
+ if pre.Env.BlockHashes == nil {
+ hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num)
+ return common.Hash{}
+ }
+ h, ok := pre.Env.BlockHashes[math.HexOrDecimal64(num)]
+ if !ok {
+ hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num)
+ }
+ return h
+ }
+ var (
+ statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre)
+ signer = types.MakeSigner(chainConfig.NetworkID)
+ energypool = new(core.EnergyPool)
+ blockHash = common.Hash{0x13, 0x37}
+ rejectedTxs []int
+ includedTxs types.Transactions
+ energyUsed = uint64(0)
+ receipts = make(types.Receipts, 0)
+ txIndex = 0
+ )
+ energypool.AddEnergy(pre.Env.EnergyLimit)
+ vmContext := vm.Context{
+ CanTransfer: core.CanTransfer,
+ Transfer: core.Transfer,
+ Coinbase: pre.Env.Coinbase,
+ BlockNumber: new(big.Int).SetUint64(pre.Env.Number),
+ Time: new(big.Int).SetUint64(pre.Env.Timestamp),
+ Difficulty: pre.Env.Difficulty,
+ EnergyLimit: pre.Env.EnergyLimit,
+ GetHash: getHash,
+ // EnergyPrice and Origin needs to be set per transaction
+ }
+
+ for i, tx := range txs {
+ msg, err := tx.AsMessage(signer)
+ if err != nil {
+ log.Info("rejected tx", "index", i, "hash", tx.Hash(), "error", err)
+ rejectedTxs = append(rejectedTxs, i)
+ continue
+ }
+ tracer, err := getTracerFn(txIndex)
+ if err != nil {
+ return nil, nil, err
+ }
+ vmConfig.Tracer = tracer
+ vmConfig.Debug = (tracer != nil)
+ statedb.Prepare(tx.Hash(), blockHash, txIndex)
+ vmContext.EnergyPrice = msg.EnergyPrice()
+ vmContext.Origin = msg.From()
+
+ cvm := vm.NewCVM(vmContext, statedb, chainConfig, vmConfig)
+ snapshot := statedb.Snapshot()
+ // (ret []byte, usedEnergy uint64, failed bool, err error)
+ msgResult, err := core.ApplyMessage(cvm, msg, energypool)
+ if err != nil {
+ statedb.RevertToSnapshot(snapshot)
+ log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err)
+ rejectedTxs = append(rejectedTxs, i)
+ continue
+ }
+ includedTxs = append(includedTxs, tx)
+ if hashError != nil {
+ return nil, nil, NewError(ErrorMissingBlockhash, hashError)
+ }
+ energyUsed += msgResult.UsedEnergy
+ // Create a new receipt for the transaction, storing the intermediate root and energy used by the tx
+ {
+ var root []byte
+ root = statedb.IntermediateRoot(true).Bytes()
+
+ receipt := types.NewReceipt(root, msgResult.Failed(), energyUsed)
+ receipt.TxHash = tx.Hash()
+ receipt.EnergyUsed = msgResult.UsedEnergy
+ // if the transaction created a contract, store the creation address in the receipt.
+ if msg.To() == nil {
+ receipt.ContractAddress = crypto.CreateAddress(cvm.Context.Origin, tx.Nonce())
+ }
+ // Set the receipt logs and create a bloom for filtering
+ receipt.Logs = statedb.GetLogs(tx.Hash())
+ receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
+ // These three are non-consensus fields
+ //receipt.BlockHash
+ //receipt.BlockNumber =
+ receipt.TransactionIndex = uint(txIndex)
+ receipts = append(receipts, receipt)
+ }
+ txIndex++
+ }
+ statedb.IntermediateRoot(true)
+ // Add mining reward?
+ if miningReward > 0 {
+ // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases
+ // where
+ // - the coinbase suicided, or
+ // - there are only 'bad' transactions, which aren't executed. In those cases,
+ // the coinbase gets no txfee, so isn't created, and thus needs to be touched
+ var (
+ blockReward = big.NewInt(miningReward)
+ minerReward = new(big.Int).Set(blockReward)
+ perOmmer = new(big.Int).Div(blockReward, big.NewInt(32))
+ )
+ for _, ommer := range pre.Env.Ommers {
+ // Add 1/32th for each ommer included
+ minerReward.Add(minerReward, perOmmer)
+ // Add (8-delta)/8
+ reward := big.NewInt(8)
+ reward.Sub(reward, big.NewInt(0).SetUint64(ommer.Delta))
+ reward.Mul(reward, blockReward)
+ reward.Div(reward, big.NewInt(8))
+ statedb.AddBalance(ommer.Address, reward)
+ }
+ statedb.AddBalance(pre.Env.Coinbase, minerReward)
+ }
+ // Commit block
+ root, err := statedb.Commit(true)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Could not commit state: %v", err)
+ return nil, nil, NewError(ErrorCVM, fmt.Errorf("could not commit state: %v", err))
+ }
+ execRs := &ExecutionResult{
+ StateRoot: root,
+ TxRoot: types.DeriveSha(includedTxs, new(trie.Trie)),
+ ReceiptRoot: types.DeriveSha(receipts, new(trie.Trie)),
+ Bloom: types.CreateBloom(receipts),
+ LogsHash: rlpHash(statedb.Logs()),
+ Receipts: receipts,
+ Rejected: rejectedTxs,
+ }
+ return statedb, execRs, nil
+}
+
+func MakePreState(db xcbdb.Database, accounts core.GenesisAlloc) *state.StateDB {
+ sdb := state.NewDatabase(db)
+ statedb, _ := state.New(common.Hash{}, sdb, nil)
+ for addr, a := range accounts {
+ statedb.SetCode(addr, a.Code)
+ statedb.SetNonce(addr, a.Nonce)
+ statedb.SetBalance(addr, a.Balance)
+ for k, v := range a.Storage {
+ statedb.SetState(addr, k, v)
+ }
+ }
+ // Commit and re-open to start with a clean state.
+ root, _ := statedb.Commit(false)
+ statedb, _ = state.New(root, sdb, nil)
+ return statedb
+}
+
+func rlpHash(x interface{}) (h common.Hash) {
+ hw := sha3.New256()
+ rlp.Encode(hw, x)
+ hw.Sum(h[:0])
+ return h
+}
diff --git a/cmd/cvm/t8ntool/flags.go b/cmd/cvm/t8ntool/flags.go
new file mode 100644
index 000000000..a2964d557
--- /dev/null
+++ b/cmd/cvm/t8ntool/flags.go
@@ -0,0 +1,86 @@
+// Copyright 2022 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package t8ntool
+
+import (
+ "gopkg.in/urfave/cli.v1"
+)
+
+var (
+ TraceFlag = cli.BoolFlag{
+ Name: "trace",
+ Usage: "Output full trace logs to files .jsonl",
+ }
+ TraceDisableMemoryFlag = cli.BoolFlag{
+ Name: "trace.nomemory",
+ Usage: "Disable full memory dump in traces",
+ }
+ TraceDisableStackFlag = cli.BoolFlag{
+ Name: "trace.nostack",
+ Usage: "Disable stack output in traces",
+ }
+ TraceDisableReturnDataFlag = cli.BoolFlag{
+ Name: "trace.noreturndata",
+ Usage: "Disable return data output in traces",
+ }
+ OutputAllocFlag = cli.StringFlag{
+ Name: "output.alloc",
+ Usage: "Determines where to put the `alloc` of the post-state.\n" +
+ "\t`stdout` - into the stdout output\n" +
+ "\t`stderr` - into the stderr output\n" +
+ "\t - into the file ",
+ Value: "alloc.json",
+ }
+ OutputResultFlag = cli.StringFlag{
+ Name: "output.result",
+ Usage: "Determines where to put the `result` (stateroot, txroot etc) of the post-state.\n" +
+ "\t`stdout` - into the stdout output\n" +
+ "\t`stderr` - into the stderr output\n" +
+ "\t - into the file ",
+ Value: "result.json",
+ }
+ InputAllocFlag = cli.StringFlag{
+ Name: "input.alloc",
+ Usage: "`stdin` or file name of where to find the prestate alloc to use.",
+ Value: "alloc.json",
+ }
+ InputEnvFlag = cli.StringFlag{
+ Name: "input.env",
+ Usage: "`stdin` or file name of where to find the prestate env to use.",
+ Value: "env.json",
+ }
+ InputTxsFlag = cli.StringFlag{
+ Name: "input.txs",
+ Usage: "`stdin` or file name of where to find the transactions to apply.",
+ Value: "txs.json",
+ }
+ RewardFlag = cli.Int64Flag{
+ Name: "state.reward",
+ Usage: "Mining reward. Set to -1 to disable",
+ Value: 0,
+ }
+ NetworkIDFlag = cli.Int64Flag{
+ Name: "state.networkid",
+ Usage: "NetworkID to use",
+ Value: 1,
+ }
+ VerbosityFlag = cli.IntFlag{
+ Name: "verbosity",
+ Usage: "sets the verbosity level",
+ Value: 3,
+ }
+)
diff --git a/cmd/cvm/t8ntool/gen_stenv.go b/cmd/cvm/t8ntool/gen_stenv.go
new file mode 100644
index 000000000..69c538f4f
--- /dev/null
+++ b/cmd/cvm/t8ntool/gen_stenv.go
@@ -0,0 +1,80 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package t8ntool
+
+import (
+ "encoding/json"
+ "errors"
+ "math/big"
+
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/common/math"
+)
+
+var _ = (*stEnvMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (s stEnv) MarshalJSON() ([]byte, error) {
+ type stEnv struct {
+ Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ EnergyLimit math.HexOrDecimal64 `json:"currentEnergyLimit" gencodec:"required"`
+ Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
+ Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ }
+ var enc stEnv
+ enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
+ enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
+ enc.EnergyLimit = math.HexOrDecimal64(s.EnergyLimit)
+ enc.Number = math.HexOrDecimal64(s.Number)
+ enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
+ enc.BlockHashes = s.BlockHashes
+ enc.Ommers = s.Ommers
+ return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (s *stEnv) UnmarshalJSON(input []byte) error {
+ type stEnv struct {
+ Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ EnergyLimit *math.HexOrDecimal64 `json:"currentEnergyLimit" gencodec:"required"`
+ Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
+ Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ }
+ var dec stEnv
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ if dec.Coinbase == nil {
+ return errors.New("missing required field 'currentCoinbase' for stEnv")
+ }
+ s.Coinbase = common.Address(*dec.Coinbase)
+ if dec.Difficulty == nil {
+ return errors.New("missing required field 'currentDifficulty' for stEnv")
+ }
+ s.Difficulty = (*big.Int)(dec.Difficulty)
+ if dec.EnergyLimit == nil {
+ return errors.New("missing required field 'currentEnergyLimit' for stEnv")
+ }
+ s.EnergyLimit = uint64(*dec.EnergyLimit)
+ if dec.Number == nil {
+ return errors.New("missing required field 'currentNumber' for stEnv")
+ }
+ s.Number = uint64(*dec.Number)
+ if dec.Timestamp == nil {
+ return errors.New("missing required field 'currentTimestamp' for stEnv")
+ }
+ s.Timestamp = uint64(*dec.Timestamp)
+ if dec.BlockHashes != nil {
+ s.BlockHashes = dec.BlockHashes
+ }
+ if dec.Ommers != nil {
+ s.Ommers = dec.Ommers
+ }
+ return nil
+}
diff --git a/cmd/cvm/t8ntool/transition.go b/cmd/cvm/t8ntool/transition.go
new file mode 100644
index 000000000..b9fb407dd
--- /dev/null
+++ b/cmd/cvm/t8ntool/transition.go
@@ -0,0 +1,275 @@
+// Copyright 2022 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package t8ntool
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/core"
+ "github.com/core-coin/go-core/core/state"
+ "github.com/core-coin/go-core/core/types"
+ "github.com/core-coin/go-core/core/vm"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/params"
+ "github.com/core-coin/go-core/tests"
+ "gopkg.in/urfave/cli.v1"
+ "io/ioutil"
+ "math/big"
+ "os"
+)
+
+const (
+ ErrorCVM = 2
+ ErrorVMConfig = 3
+ ErrorMissingBlockhash = 4
+
+ ErrorJson = 10
+ ErrorIO = 11
+
+ stdinSelector = "stdin"
+)
+
+type NumberedError struct {
+ errorCode int
+ err error
+}
+
+func NewError(errorCode int, err error) *NumberedError {
+ return &NumberedError{errorCode, err}
+}
+
+func (n *NumberedError) Error() string {
+ return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error())
+}
+
+func (n *NumberedError) Code() int {
+ return n.errorCode
+}
+
+type input struct {
+ Alloc core.GenesisAlloc `json:"alloc,omitempty"`
+ Env *stEnv `json:"env,omitempty"`
+ Txs types.Transactions `json:"txs,omitempty"`
+}
+
+func Main(ctx *cli.Context) error {
+ // Configure the go-core logger
+ glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
+ glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
+ log.Root().SetHandler(glogger)
+
+ var (
+ err error
+ tracer vm.Tracer
+ )
+ var getTracer func(txIndex int) (vm.Tracer, error)
+
+ if ctx.Bool(TraceFlag.Name) {
+ // Configure the CVM logger
+ logConfig := &vm.LogConfig{
+ DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
+ DisableMemory: ctx.Bool(TraceDisableMemoryFlag.Name),
+ DisableReturnData: ctx.Bool(TraceDisableReturnDataFlag.Name),
+ Debug: true,
+ }
+ var prevFile *os.File
+ // This one closes the last file
+ defer func() {
+ if prevFile != nil {
+ prevFile.Close()
+ }
+ }()
+ getTracer = func(txIndex int) (vm.Tracer, error) {
+ if prevFile != nil {
+ prevFile.Close()
+ }
+ traceFile, err := os.Create(fmt.Sprintf("trace-%d.jsonl", txIndex))
+ if err != nil {
+ return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
+ }
+ prevFile = traceFile
+ return vm.NewJSONLogger(logConfig, traceFile), nil
+ }
+ } else {
+ getTracer = func(txIndex int) (tracer vm.Tracer, err error) {
+ return nil, nil
+ }
+ }
+ // We need to load three things: alloc, env and transactions. May be either in
+ // stdin input or in files.
+ // Check if anything needs to be read from stdin
+ var (
+ prestate Prestate
+ txs types.Transactions // txs to apply
+ allocStr = ctx.String(InputAllocFlag.Name)
+
+ envStr = ctx.String(InputEnvFlag.Name)
+ txStr = ctx.String(InputTxsFlag.Name)
+ inputData = &input{}
+ )
+
+ if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector {
+ decoder := json.NewDecoder(os.Stdin)
+ decoder.Decode(inputData)
+ }
+ if allocStr != stdinSelector {
+ inFile, err := os.Open(allocStr)
+ if err != nil {
+ return NewError(ErrorIO, fmt.Errorf("failed reading alloc file: %v", err))
+ }
+ defer inFile.Close()
+ decoder := json.NewDecoder(inFile)
+ if err := decoder.Decode(&inputData.Alloc); err != nil {
+ return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling alloc-file: %v", err))
+ }
+ }
+
+ if envStr != stdinSelector {
+ inFile, err := os.Open(envStr)
+ if err != nil {
+ return NewError(ErrorIO, fmt.Errorf("failed reading env file: %v", err))
+ }
+ defer inFile.Close()
+ decoder := json.NewDecoder(inFile)
+ var env stEnv
+ if err := decoder.Decode(&env); err != nil {
+ return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling env-file: %v", err))
+ }
+ inputData.Env = &env
+ }
+
+ if txStr != stdinSelector {
+ inFile, err := os.Open(txStr)
+ if err != nil {
+ return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
+ }
+ defer inFile.Close()
+ decoder := json.NewDecoder(inFile)
+ var txs types.Transactions
+ if err := decoder.Decode(&txs); err != nil {
+ return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err))
+ }
+ inputData.Txs = txs
+ }
+
+ prestate.Pre = inputData.Alloc
+ prestate.Env = *inputData.Env
+ txs = inputData.Txs
+
+ // Iterate over all the tests, run them and aggregate the results
+ vmConfig := vm.Config{
+ Tracer: tracer,
+ Debug: (tracer != nil),
+ }
+ // Construct the chainconfig
+ var chainConfig *params.ChainConfig
+ if cConf, _, err := tests.GetChainConfig("Mainnet"); err != nil {
+ return NewError(ErrorVMConfig, fmt.Errorf("Failed constructing chain configuration: %v", err))
+ } else {
+ chainConfig = cConf
+ }
+ // Set the chain id
+ chainConfig.NetworkID = big.NewInt(ctx.Int64(NetworkIDFlag.Name))
+
+ // Run the test and aggregate the result
+ state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
+ if err != nil {
+ return err
+ }
+ // Dump the excution result
+ //postAlloc := state.DumpGenesisFormat(false, false, false)
+ collector := make(Alloc)
+ state.DumpToCollector(collector, false, false, false, nil, -1)
+ return dispatchOutput(ctx, result, collector)
+
+}
+
+type Alloc map[common.Address]core.GenesisAccount
+
+func (g Alloc) OnRoot(common.Hash) {}
+
+func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) {
+ balance, _ := new(big.Int).SetString(dumpAccount.Balance, 10)
+ var storage map[common.Hash]common.Hash
+ if dumpAccount.Storage != nil {
+ storage = make(map[common.Hash]common.Hash)
+ for k, v := range dumpAccount.Storage {
+ storage[k] = common.HexToHash(v)
+ }
+ }
+ genesisAccount := core.GenesisAccount{
+ Code: common.FromHex(dumpAccount.Code),
+ Storage: storage,
+ Balance: balance,
+ Nonce: dumpAccount.Nonce,
+ }
+ g[addr] = genesisAccount
+}
+
+// saveFile marshalls the object to the given file
+func saveFile(filename string, data interface{}) error {
+ b, err := json.MarshalIndent(data, "", " ")
+ if err != nil {
+ return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
+ }
+ if err = ioutil.WriteFile(filename, b, 0644); err != nil {
+ return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err))
+ }
+ return nil
+}
+
+// dispatchOutput writes the output data to either stderr or stdout, or to the specified
+// files
+func dispatchOutput(ctx *cli.Context, result *ExecutionResult, alloc Alloc) error {
+ stdOutObject := make(map[string]interface{})
+ stdErrObject := make(map[string]interface{})
+ dispatch := func(fName, name string, obj interface{}) error {
+ switch fName {
+ case "stdout":
+ stdOutObject[name] = obj
+ case "stderr":
+ stdErrObject[name] = obj
+ default: // save to file
+ if err := saveFile(fName, obj); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if err := dispatch(ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil {
+ return err
+ }
+ if err := dispatch(ctx.String(OutputResultFlag.Name), "result", result); err != nil {
+ return err
+ }
+ if len(stdOutObject) > 0 {
+ b, err := json.MarshalIndent(stdOutObject, "", " ")
+ if err != nil {
+ return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
+ }
+ os.Stdout.Write(b)
+ }
+ if len(stdErrObject) > 0 {
+ b, err := json.MarshalIndent(stdErrObject, "", " ")
+ if err != nil {
+ return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
+ }
+ os.Stderr.Write(b)
+ }
+ return nil
+}
diff --git a/cmd/cvm/testdata/1/alloc.json b/cmd/cvm/testdata/1/alloc.json
new file mode 100644
index 000000000..cef1a25ff
--- /dev/null
+++ b/cmd/cvm/testdata/1/alloc.json
@@ -0,0 +1,12 @@
+{
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x5ffd4878be161d74",
+ "code": "0x",
+ "nonce": "0xac",
+ "storage": {}
+ },
+ "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
+ "balance": "0xfeedbead",
+ "nonce" : "0x00"
+ }
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/1/env.json b/cmd/cvm/testdata/1/env.json
new file mode 100644
index 000000000..dd60abd20
--- /dev/null
+++ b/cmd/cvm/testdata/1/env.json
@@ -0,0 +1,7 @@
+{
+ "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
+ "currentDifficulty": "0x20000",
+ "currentGasLimit": "0x750a163df65e8a",
+ "currentNumber": "1",
+ "currentTimestamp": "1000"
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/1/txs.json b/cmd/cvm/testdata/1/txs.json
new file mode 100644
index 000000000..6d7c1e1c7
--- /dev/null
+++ b/cmd/cvm/testdata/1/txs.json
@@ -0,0 +1,26 @@
+[
+ {
+ "gas": "0x5208",
+ "gasPrice": "0x2",
+ "hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
+ "input": "0x",
+ "nonce": "0x0",
+ "r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb",
+ "s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600",
+ "to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192",
+ "v": "0x1b",
+ "value": "0x1"
+ },
+ {
+ "gas": "0x5208",
+ "gasPrice": "0x2",
+ "hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
+ "input": "0x",
+ "nonce": "0x0",
+ "r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb",
+ "s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600",
+ "to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192",
+ "v": "0x1b",
+ "value": "0x1"
+ }
+]
\ No newline at end of file
diff --git a/cmd/cvm/testdata/2/alloc.json b/cmd/cvm/testdata/2/alloc.json
new file mode 100644
index 000000000..a9720afc9
--- /dev/null
+++ b/cmd/cvm/testdata/2/alloc.json
@@ -0,0 +1,16 @@
+{
+ "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x6001600053600160006001f0ff00",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/2/env.json b/cmd/cvm/testdata/2/env.json
new file mode 100644
index 000000000..ebadd3f06
--- /dev/null
+++ b/cmd/cvm/testdata/2/env.json
@@ -0,0 +1,7 @@
+{
+ "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
+ "currentDifficulty" : "0x020000",
+ "currentGasLimit" : "0x3b9aca00",
+ "currentNumber" : "0x01",
+ "currentTimestamp" : "0x03e8"
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/2/readme.md b/cmd/cvm/testdata/2/readme.md
new file mode 100644
index 000000000..ffbe3f100
--- /dev/null
+++ b/cmd/cvm/testdata/2/readme.md
@@ -0,0 +1 @@
+These files examplify a selfdestruct to the `0`-address.
diff --git a/cmd/cvm/testdata/2/txs.json b/cmd/cvm/testdata/2/txs.json
new file mode 100644
index 000000000..304445858
--- /dev/null
+++ b/cmd/cvm/testdata/2/txs.json
@@ -0,0 +1,14 @@
+[
+ {
+ "input" : "0x",
+ "gas" : "0x5f5e100",
+ "gasPrice" : "0x1",
+ "nonce" : "0x0",
+ "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x186a0",
+ "v" : "0x1b",
+ "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b",
+ "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28",
+ "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81"
+ }
+]
\ No newline at end of file
diff --git a/cmd/cvm/testdata/3/alloc.json b/cmd/cvm/testdata/3/alloc.json
new file mode 100644
index 000000000..dca318ee5
--- /dev/null
+++ b/cmd/cvm/testdata/3/alloc.json
@@ -0,0 +1,16 @@
+{
+ "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x600140",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/3/env.json b/cmd/cvm/testdata/3/env.json
new file mode 100644
index 000000000..e283eff46
--- /dev/null
+++ b/cmd/cvm/testdata/3/env.json
@@ -0,0 +1,8 @@
+{
+ "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
+ "currentDifficulty" : "0x020000",
+ "currentGasLimit" : "0x3b9aca00",
+ "currentNumber" : "0x05",
+ "currentTimestamp" : "0x03e8",
+ "blockHashes" : { "1" : "0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"}
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/3/readme.md b/cmd/cvm/testdata/3/readme.md
new file mode 100644
index 000000000..e742cd706
--- /dev/null
+++ b/cmd/cvm/testdata/3/readme.md
@@ -0,0 +1,2 @@
+These files examplify a transition where a transaction (excuted on block 5) requests
+the blockhash for block `1`.
\ No newline at end of file
diff --git a/cmd/cvm/testdata/3/txs.json b/cmd/cvm/testdata/3/txs.json
new file mode 100644
index 000000000..304445858
--- /dev/null
+++ b/cmd/cvm/testdata/3/txs.json
@@ -0,0 +1,14 @@
+[
+ {
+ "input" : "0x",
+ "gas" : "0x5f5e100",
+ "gasPrice" : "0x1",
+ "nonce" : "0x0",
+ "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x186a0",
+ "v" : "0x1b",
+ "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b",
+ "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28",
+ "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81"
+ }
+]
\ No newline at end of file
diff --git a/cmd/cvm/testdata/4/alloc.json b/cmd/cvm/testdata/4/alloc.json
new file mode 100644
index 000000000..fadf2bdc4
--- /dev/null
+++ b/cmd/cvm/testdata/4/alloc.json
@@ -0,0 +1,16 @@
+{
+ "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x600340",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/4/env.json b/cmd/cvm/testdata/4/env.json
new file mode 100644
index 000000000..e283eff46
--- /dev/null
+++ b/cmd/cvm/testdata/4/env.json
@@ -0,0 +1,8 @@
+{
+ "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
+ "currentDifficulty" : "0x020000",
+ "currentGasLimit" : "0x3b9aca00",
+ "currentNumber" : "0x05",
+ "currentTimestamp" : "0x03e8",
+ "blockHashes" : { "1" : "0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"}
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/4/readme.md b/cmd/cvm/testdata/4/readme.md
new file mode 100644
index 000000000..7afb4e632
--- /dev/null
+++ b/cmd/cvm/testdata/4/readme.md
@@ -0,0 +1,3 @@
+These files examplify a transition where a transaction (excuted on block 5) requests
+the blockhash for block `4`, but where the hash for that block is missing.
+It's expected that executing these should cause `exit` with errorcode `4`.
\ No newline at end of file
diff --git a/cmd/cvm/testdata/4/txs.json b/cmd/cvm/testdata/4/txs.json
new file mode 100644
index 000000000..304445858
--- /dev/null
+++ b/cmd/cvm/testdata/4/txs.json
@@ -0,0 +1,14 @@
+[
+ {
+ "input" : "0x",
+ "gas" : "0x5f5e100",
+ "gasPrice" : "0x1",
+ "nonce" : "0x0",
+ "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x186a0",
+ "v" : "0x1b",
+ "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b",
+ "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28",
+ "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81"
+ }
+]
\ No newline at end of file
diff --git a/cmd/cvm/testdata/5/alloc.json b/cmd/cvm/testdata/5/alloc.json
new file mode 100644
index 000000000..9e26dfeeb
--- /dev/null
+++ b/cmd/cvm/testdata/5/alloc.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/5/env.json b/cmd/cvm/testdata/5/env.json
new file mode 100644
index 000000000..1085f63e6
--- /dev/null
+++ b/cmd/cvm/testdata/5/env.json
@@ -0,0 +1,11 @@
+{
+ "currentCoinbase": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "currentDifficulty": "0x20000",
+ "currentGasLimit": "0x750a163df65e8a",
+ "currentNumber": "1",
+ "currentTimestamp": "1000",
+ "ommers": [
+ {"delta": 1, "address": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" },
+ {"delta": 2, "address": "0xcccccccccccccccccccccccccccccccccccccccc" }
+ ]
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/5/readme.md b/cmd/cvm/testdata/5/readme.md
new file mode 100644
index 000000000..e2b608fac
--- /dev/null
+++ b/cmd/cvm/testdata/5/readme.md
@@ -0,0 +1 @@
+These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2).
\ No newline at end of file
diff --git a/cmd/cvm/testdata/5/txs.json b/cmd/cvm/testdata/5/txs.json
new file mode 100644
index 000000000..0637a088a
--- /dev/null
+++ b/cmd/cvm/testdata/5/txs.json
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/cvm/testdata/7/alloc.json b/cmd/cvm/testdata/7/alloc.json
new file mode 100644
index 000000000..cef1a25ff
--- /dev/null
+++ b/cmd/cvm/testdata/7/alloc.json
@@ -0,0 +1,12 @@
+{
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x5ffd4878be161d74",
+ "code": "0x",
+ "nonce": "0xac",
+ "storage": {}
+ },
+ "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
+ "balance": "0xfeedbead",
+ "nonce" : "0x00"
+ }
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/7/env.json b/cmd/cvm/testdata/7/env.json
new file mode 100644
index 000000000..8fd9bc041
--- /dev/null
+++ b/cmd/cvm/testdata/7/env.json
@@ -0,0 +1,7 @@
+{
+ "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
+ "currentDifficulty": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff020000",
+ "currentGasLimit": "0x750a163df65e8a",
+ "currentNumber": "5",
+ "currentTimestamp": "1000"
+}
\ No newline at end of file
diff --git a/cmd/cvm/testdata/7/readme.md b/cmd/cvm/testdata/7/readme.md
new file mode 100644
index 000000000..62d998118
--- /dev/null
+++ b/cmd/cvm/testdata/7/readme.md
@@ -0,0 +1,7 @@
+This is a test for HomesteadToDao, checking if the
+DAO-transition works
+
+Example:
+```
+./statet8n --input.alloc=./testdata/7/alloc.json --input.txs=./testdata/7/txs.json --input.env=./testdata/7/env.json --output.alloc=stdout --state.fork=HomesteadToDaoAt5
+```
\ No newline at end of file
diff --git a/cmd/cvm/testdata/7/txs.json b/cmd/cvm/testdata/7/txs.json
new file mode 100644
index 000000000..0637a088a
--- /dev/null
+++ b/cmd/cvm/testdata/7/txs.json
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff --git a/cmd/cvm/transition-test.sh b/cmd/cvm/transition-test.sh
new file mode 100644
index 000000000..3580918b0
--- /dev/null
+++ b/cmd/cvm/transition-test.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+ticks="\`\`\`"
+
+function showjson(){
+ echo "\`$1\`:"
+ echo "${ticks}json"
+ cat $1
+ echo ""
+ echo "$ticks"
+}
+function demo(){
+ echo "$ticks"
+ echo "$1"
+ echo "$ticks"
+ echo ""
+}
+function tick(){
+ echo "$ticks"
+}
+
+cat << EOF
+## CVM state transition tool
+The \`cvm t8n\` tool is a stateless state transition utility. It is a utility
+which can
+1. Take a prestate, including
+ - Accounts,
+ - Block context information,
+ - Previous blockshashes (*optional)
+2. Apply a set of transactions,
+3. Apply a mining-reward (*optional),
+4. And generate a post-state, including
+ - State root, transaction root, receipt root,
+ - Information about rejected transactions,
+ - Optionally: a full or partial post-state dump
+## Specification
+The idea is to specify the behaviour of this binary very _strict_, so that other
+node implementors can build replicas based on their own state-machines, and the
+state generators can swap between a \`geth\`-based implementation and a \`parityvm\`-based
+implementation.
+### Command line params
+Command line params that has to be supported are
+$(tick)
+` ./cvm t8n -h | grep "trace\|output\|state\."`
+$(tick)
+### Error codes and output
+All logging should happen against the \`stderr\`.
+There are a few (not many) errors that can occur, those are defined below.
+#### CVM-based errors (\`2\` to \`9\`)
+- Other CVM error. Exit code \`2\`
+- Failed configuration: when a non-supported or invalid fork was specified. Exit code \`3\`.
+- Block history is not supplied, but needed for a \`BLOCKHASH\` operation. If \`BLOCKHASH\`
+ is invoked targeting a block which history has not been provided for, the program will
+ exit with code \`4\`.
+#### IO errors (\`10\`-\`20\`)
+- Invalid input json: the supplied data could not be marshalled.
+ The program will exit with code \`10\`
+- IO problems: failure to load or save files, the program will exit with code \`11\`
+EOF
+
+# This should exit with 3
+./cvm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json 2>/dev/null
+if [ $? != 3 ]; then
+ echo "Failed, exitcode should be 3"
+fi
+cat << EOF
+## Examples
+### Basic usage
+Invoking it with the provided example files
+EOF
+cmd="./cvm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json"
+tick;echo "$cmd"; tick
+$cmd 2>/dev/null
+echo "Two resulting files:"
+echo ""
+showjson alloc.json
+showjson result.json
+echo ""
+
+echo "We can make them spit out the data to e.g. \`stdout\` like this:"
+cmd="./cvm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout"
+tick;echo "$cmd"; tick
+output=`$cmd 2>/dev/null`
+echo "Output:"
+echo "${ticks}json"
+echo "$output"
+echo "$ticks"
+
+cat << EOF
+## About Ommers
+Mining rewards and ommer rewards might need to be added. This is how those are applied:
+- \`block_reward\` is the block mining reward for the miner (\`0xaa\`), of a block at height \`N\`.
+- For each ommer (mined by \`0xbb\`), with blocknumber \`N-delta\`
+ - (where \`delta\` is the difference between the current block and the ommer)
+ - The account \`0xbb\` (ommer miner) is awarded \`(8-delta)/ 8 * block_reward\`
+ - The account \`0xaa\` (block miner) is awarded \`block_reward / 32\`
+To make \`state_t8n\` apply these, the following inputs are required:
+- \`state.reward\`
+ - For ethash, it is \`5000000000000000000\` \`wei\`,
+ - If this is not defined, mining rewards are not applied,
+ - A value of \`0\` is valid, and causes accounts to be 'touched'.
+- For each ommer, the tool needs to be given an \`address\` and a \`delta\`. This
+ is done via the \`env\`.
+Note: the tool does not verify that e.g. the normal uncle rules apply,
+and allows e.g two uncles at the same height, or the uncle-distance. This means that
+the tool allows for negative uncle reward (distance > 8)
+Example:
+EOF
+
+showjson ./testdata/5/env.json
+
+echo "When applying this, using a reward of \`0x08\`"
+cmd="./cvm t8n --input.alloc=./testdata/5/alloc.json -input.txs=./testdata/5/txs.json --input.env=./testdata/5/env.json --output.alloc=stdout --state.reward=0x80"
+output=`$cmd 2>/dev/null`
+echo "Output:"
+echo "${ticks}json"
+echo "$output"
+echo "$ticks"
+
+
+echo "### Block history"
+echo ""
+echo "The \`BLOCKHASH\` opcode requires blockhashes to be provided by the caller, inside the \`env\`."
+echo "If a required blockhash is not provided, the exit code should be \`4\`:"
+echo "Example where blockhashes are provided: "
+cmd="./cvm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace"
+tick && echo $cmd && tick
+$cmd 2>&1 >/dev/null
+cmd="cat trace-0.jsonl | grep BLOCKHASH -C2"
+tick && echo $cmd && tick
+echo "$ticks"
+cat trace-0.jsonl | grep BLOCKHASH -C2
+echo "$ticks"
+echo ""
+
+echo "In this example, the caller has not provided the required blockhash:"
+cmd="./cvm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace"
+tick && echo $cmd && tick
+tick
+$cmd
+errc=$?
+tick
+echo "Error code: $errc"
+
+
+echo "### Chaining"
+echo ""
+echo "Another thing that can be done, is to chain invocations:"
+cmd1="./cvm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout"
+cmd2="./cvm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json"
+echo "$ticks"
+echo "$cmd1 | $cmd2"
+output=$($cmd1 | $cmd2 )
+echo $output
+echo "$ticks"
+echo "What happened here, is that we first applied two identical transactions, so the second one was rejected. "
+echo "Then, taking the poststate alloc as the input for the next state, we tried again to include"
+echo "the same two transactions: this time, both failed due to too low nonce."
+echo ""
+echo "In order to meaningfully chain invocations, one would need to provide meaningful new \`env\`, otherwise the"
+echo "actual blocknumber (exposed to the CVM) would not increase."
+echo ""
diff --git a/cmd/devp2p/keycmd.go b/cmd/devp2p/keycmd.go
index f075378f6..2c6aaaed6 100644
--- a/cmd/devp2p/keycmd.go
+++ b/cmd/devp2p/keycmd.go
@@ -1,18 +1,18 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of go-ethereum.
+// Copyright 2020 The go-core Authors
+// This file is part of go-core.
//
-// go-ethereum is free software: you can redistribute it and/or modify
+// go-core is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
-// go-ethereum is distributed in the hope that it will be useful,
+// go-core is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see .
+// along with go-core. If not, see .
package main
diff --git a/cmd/devp2p/main.go b/cmd/devp2p/main.go
index 5c3845222..6b0c9e945 100644
--- a/cmd/devp2p/main.go
+++ b/cmd/devp2p/main.go
@@ -29,13 +29,14 @@ import (
)
var (
- // Git information set by linker when building with ci.go.
+ // Git information set by linker when building with ci.go
+ gitTag string
gitCommit string
gitDate string
app = &cli.App{
Name: filepath.Base(os.Args[0]),
Usage: "go-core devp2p tool",
- Version: params.VersionWithCommit(gitCommit, gitDate),
+ Version: params.VersionWithTag(gitTag, gitCommit, gitDate),
Writer: os.Stdout,
HideVersion: true,
}
diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go
index e831f6626..acb8c2f85 100644
--- a/cmd/faucet/faucet.go
+++ b/cmd/faucet/faucet.go
@@ -89,6 +89,7 @@ var (
)
var (
+ gitTag = ""
gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags)
gitDate = "" // Git commit date YYYYMMDD of the release (set via linker flags)
)
@@ -221,7 +222,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
// Assemble the raw devp2p protocol stack
stack, err := node.New(&node.Config{
Name: "gocore",
- Version: params.VersionWithCommit(gitCommit, gitDate),
+ Version: params.VersionWithTag("faucet", gitCommit, gitDate),
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
P2P: p2p.Config{
NAT: nat.Any(),
@@ -235,26 +236,20 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
if err != nil {
return nil, err
}
- // Assemble the Core light client protocol
- if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- cfg := xcb.DefaultConfig
- cfg.SyncMode = downloader.LightSync
- cfg.NetworkId = network
- cfg.Genesis = genesis
- return les.New(ctx, &cfg)
- }); err != nil {
- return nil, err
- }
- common.DefaultNetworkID = common.NetworkID(network)
+ // Assemble the Сore light client protocol
+ cfg := xcb.DefaultConfig
+ cfg.SyncMode = downloader.LightSync
+ cfg.NetworkId = network
+ cfg.Genesis = genesis
+ lesBackend, err := les.New(stack, &cfg)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to register the Core service: %w", err)
+ }
// Assemble the xcbstats monitoring and reporting service'
if stats != "" {
- if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- var serv *les.LightCore
- ctx.Service(&serv)
- return xcbstats.New(stats, nil, serv)
- }); err != nil {
+ if err := xcbstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil {
return nil, err
}
}
@@ -271,7 +266,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
// Attach to the client and retrieve and interesting metadatas
api, err := stack.Attach()
if err != nil {
- stack.Stop()
+ stack.Close()
return nil, err
}
client := xcbclient.NewClient(api)
diff --git a/cmd/faucet/website.go b/cmd/faucet/website.go
index 2bd17c040..edc9b0e04 100644
--- a/cmd/faucet/website.go
+++ b/cmd/faucet/website.go
@@ -84,7 +84,7 @@ func faucetHtml() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "faucet.html", size: 10361, mode: os.FileMode(0664), modTime: time.Unix(1609843324, 0)}
+ info := bindataFileInfo{name: "faucet.html", size: 10361, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4b, 0xf1, 0x71, 0x66, 0x62, 0x76, 0x8e, 0xce, 0xe7, 0xde, 0x64, 0xa4, 0x3f, 0xa0, 0xbb, 0xf9, 0xa6, 0xec, 0x3c, 0x54, 0x81, 0xcb, 0x7b, 0x4d, 0xce, 0xbd, 0x54, 0x12, 0x0, 0xf, 0x3b, 0xfa}}
return a, nil
}
diff --git a/cmd/gocore/chaincmd.go b/cmd/gocore/chaincmd.go
index 3874a77c6..9fa85330f 100644
--- a/cmd/gocore/chaincmd.go
+++ b/cmd/gocore/chaincmd.go
@@ -94,6 +94,7 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to
utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBTagsFlag,
utils.NetworkIdFlag,
+ utils.TxLookupLimitFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -163,6 +164,7 @@ The export-preimages command export hash preimages to an RLP encoded stream`,
utils.SyncModeFlag,
utils.FakePoWFlag,
utils.DevinFlag,
+ utils.TxLookupLimitFlag,
utils.NetworkIdFlag,
},
Category: "BLOCKCHAIN COMMANDS",
@@ -234,7 +236,7 @@ func initGenesis(ctx *cli.Context) error {
defer file.Close()
// Open an initialise both full and light databases
- stack := makeFullNode(ctx)
+ stack, _ := makeConfigNode(ctx)
defer stack.Close()
genesis := new(core.Genesis)
@@ -276,10 +278,10 @@ func importChain(ctx *cli.Context) error {
utils.SetupMetrics(ctx)
// Start system runtime metrics collection
go metrics.CollectProcessMetrics(3 * time.Second)
- stack := makeFullNode(ctx)
+ stack, _ := makeFullNode(ctx)
defer stack.Close()
- chain, db := utils.MakeChain(ctx, stack)
+ chain, db := utils.MakeChain(ctx, stack, false)
defer db.Close()
// Start periodically gathering memory profiles
@@ -366,10 +368,10 @@ func exportChain(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
- stack := makeFullNode(ctx)
+ stack, _ := makeFullNode(ctx)
defer stack.Close()
- chain, _ := utils.MakeChain(ctx, stack)
+ chain, _ := utils.MakeChain(ctx, stack, true)
start := time.Now()
var err error
@@ -401,7 +403,7 @@ func importPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
- stack := makeFullNode(ctx)
+ stack, _ := makeFullNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack)
@@ -419,7 +421,7 @@ func exportPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
- stack := makeFullNode(ctx)
+ stack, _ := makeFullNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack)
@@ -441,10 +443,10 @@ func copyDb(ctx *cli.Context) error {
utils.Fatalf("Source ancient chain directory path argument missing")
}
// Initialize a new chain for the running node to sync into
- stack := makeFullNode(ctx)
+ stack, _ := makeFullNode(ctx)
defer stack.Close()
- chain, chainDb := utils.MakeChain(ctx, stack)
+ chain, chainDb := utils.MakeChain(ctx, stack, false)
syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
var syncBloom *trie.SyncBloom
@@ -549,10 +551,10 @@ func confirmAndRemoveDB(database string, kind string) {
}
func dump(ctx *cli.Context) error {
- stack := makeFullNode(ctx)
+ stack, _ := makeFullNode(ctx)
defer stack.Close()
- chain, chainDb := utils.MakeChain(ctx, stack)
+ chain, chainDb := utils.MakeChain(ctx, stack, true)
defer chainDb.Close()
for _, arg := range ctx.Args() {
var block *types.Block
@@ -591,7 +593,7 @@ func inspect(ctx *cli.Context) error {
node, _ := makeConfigNode(ctx)
defer node.Close()
- _, chainDb := utils.MakeChain(ctx, node)
+ _, chainDb := utils.MakeChain(ctx, node, true)
defer chainDb.Close()
return rawdb.InspectDatabase(chainDb)
diff --git a/cmd/gocore/config.go b/cmd/gocore/config.go
index ea9162072..f1b09158a 100644
--- a/cmd/gocore/config.go
+++ b/cmd/gocore/config.go
@@ -20,6 +20,7 @@ import (
"bufio"
"errors"
"fmt"
+ "github.com/core-coin/go-core/internal/xcbapi"
"os"
"reflect"
"unicode"
@@ -95,7 +96,7 @@ func loadConfig(file string, cfg *gocoreConfig) error {
func defaultNodeConfig() node.Config {
cfg := node.DefaultConfig
cfg.Name = clientIdentifier
- cfg.Version = params.VersionWithCommit(gitCommit, gitDate)
+ cfg.Version = params.VersionWithTag(gitTag, gitCommit, gitDate)
cfg.HTTPModules = append(cfg.HTTPModules, "xcb")
cfg.WSModules = append(cfg.WSModules, "xcb")
cfg.IPCPath = "gocore.ipc"
@@ -134,19 +135,20 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gocoreConfig) {
return stack, cfg
}
-func makeFullNode(ctx *cli.Context) *node.Node {
+func makeFullNode(ctx *cli.Context) (*node.Node, xcbapi.Backend) {
stack, cfg := makeConfigNode(ctx)
- utils.RegisterXcbService(stack, &cfg.Xcb)
+
+ backend := utils.RegisterXcbService(stack, &cfg.Xcb)
// Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
- utils.RegisterGraphQLService(stack, cfg.Node.GraphQLEndpoint(), cfg.Node.GraphQLCors, cfg.Node.GraphQLVirtualHosts, cfg.Node.HTTPTimeouts)
+ utils.RegisterGraphQLService(stack, backend, cfg.Node)
}
// Add the Core Stats daemon if requested.
if cfg.Xcbstats.URL != "" {
- utils.RegisterXcbStatsService(stack, cfg.Xcbstats.URL)
+ utils.RegisterXcbStatsService(stack, backend, cfg.Xcbstats.URL)
}
- return stack
+ return stack, backend
}
// dumpConfig is the dumpconfig command.
diff --git a/cmd/gocore/consolecmd.go b/cmd/gocore/consolecmd.go
index 146659166..377a56800 100644
--- a/cmd/gocore/consolecmd.go
+++ b/cmd/gocore/consolecmd.go
@@ -78,12 +78,12 @@ JavaScript API. See https://developer.coreblockchain.cc/JavaScript-Console`,
func localConsole(ctx *cli.Context) error {
// Create and start the node based on the CLI flags
prepare(ctx)
- node := makeFullNode(ctx)
- startNode(ctx, node)
- defer node.Close()
+ stack, backend := makeFullNode(ctx)
+ startNode(ctx, stack, backend)
+ defer stack.Close()
// Attach to the newly started node and start the JavaScript console
- client, err := node.Attach()
+ client, err := stack.Attach()
if err != nil {
utils.Fatalf("Failed to attach to the inproc gocore: %v", err)
}
@@ -177,12 +177,12 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
// everything down.
func ephemeralConsole(ctx *cli.Context) error {
// Create and start the node based on the CLI flags
- node := makeFullNode(ctx)
- startNode(ctx, node)
- defer node.Close()
+ stack, backend := makeFullNode(ctx)
+ startNode(ctx, stack, backend)
+ defer stack.Close()
// Attach to the newly started node and start the JavaScript console
- client, err := node.Attach()
+ client, err := stack.Attach()
if err != nil {
utils.Fatalf("Failed to attach to the inproc gocore: %v", err)
}
diff --git a/cmd/gocore/consolecmd_test.go b/cmd/gocore/consolecmd_test.go
index 0b10e9431..a41e8dd78 100644
--- a/cmd/gocore/consolecmd_test.go
+++ b/cmd/gocore/consolecmd_test.go
@@ -50,7 +50,7 @@ func TestConsoleWelcome(t *testing.T) {
gocore.SetTemplateFunc("goos", func() string { return runtime.GOOS })
gocore.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
gocore.SetTemplateFunc("gover", runtime.Version)
- gocore.SetTemplateFunc("gocorever", func() string { return params.VersionWithCommit("", "") })
+ gocore.SetTemplateFunc("gocorever", func() string { return params.VersionWithTag("", "", "") })
gocore.SetTemplateFunc("niltime", func() string {
return time.Unix(1651833293, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
})
@@ -60,7 +60,7 @@ func TestConsoleWelcome(t *testing.T) {
gocore.Expect(`
Welcome to the Gocore JavaScript console!
-instance: Gocore/v{{gocorever}}/{{goos}}-{{goarch}}/{{gover}}
+instance: Gocore/{{goos}}-{{goarch}}/{{gover}}
coinbase: {{.Corebase}}
at block: 0 ({{niltime}})
datadir: {{.Datadir}}
@@ -143,7 +143,7 @@ func testAttachWelcome(t *testing.T, gocore *testgocore, endpoint, apis string)
attach.SetTemplateFunc("goos", func() string { return runtime.GOOS })
attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH })
attach.SetTemplateFunc("gover", runtime.Version)
- attach.SetTemplateFunc("gocorever", func() string { return params.VersionWithCommit("", "") })
+ attach.SetTemplateFunc("gocorever", func() string { return params.VersionWithTag("", "", "") })
attach.SetTemplateFunc("corebase", func() string { return gocore.Corebase })
attach.SetTemplateFunc("niltime", func() string {
return time.Unix(1651833293, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
@@ -156,7 +156,7 @@ func testAttachWelcome(t *testing.T, gocore *testgocore, endpoint, apis string)
attach.Expect(`
Welcome to the Gocore JavaScript console!
-instance: Gocore/v{{gocorever}}/{{goos}}-{{goarch}}/{{gover}}
+instance: Gocore/{{goos}}-{{goarch}}/{{gover}}
coinbase: {{corebase}}
at block: 0 ({{niltime}}){{if ipc}}
datadir: {{datadir}}{{end}}
diff --git a/cmd/gocore/main.go b/cmd/gocore/main.go
index fc7c66861..d040cfb83 100644
--- a/cmd/gocore/main.go
+++ b/cmd/gocore/main.go
@@ -20,6 +20,7 @@ package main
import (
"fmt"
"github.com/core-coin/go-core/core/led"
+ "github.com/core-coin/go-core/internal/xcbapi"
"math"
"os"
"runtime"
@@ -35,7 +36,6 @@ import (
"github.com/core-coin/go-core/common"
"github.com/core-coin/go-core/console"
"github.com/core-coin/go-core/internal/debug"
- "github.com/core-coin/go-core/les"
"github.com/core-coin/go-core/log"
"github.com/core-coin/go-core/metrics"
"github.com/core-coin/go-core/node"
@@ -54,8 +54,9 @@ var (
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
gitDate = ""
+ gitTag = ""
// The app that holds all commands and flags.
- app = utils.NewApp(gitCommit, gitDate, "the go-core command line interface")
+ app = utils.NewApp(gitTag, gitCommit, gitDate, "the go-core command line interface")
// flags that configure the node
nodeFlags = []cli.Flag{
utils.IdentityFlag,
@@ -88,6 +89,7 @@ var (
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.LightServeFlag,
+ utils.TxLookupLimitFlag,
utils.LightIngressFlag,
utils.LightEgressFlag,
utils.LightMaxPeersFlag,
@@ -157,6 +159,8 @@ var (
utils.HTTPListenAddrFlag,
utils.HTTPPortFlag,
utils.HTTPCORSDomainFlag,
+ utils.AuthPortFlag,
+ utils.JWTSecretFlag,
utils.HTTPVirtualHostsFlag,
utils.LegacyRPCEnabledFlag,
utils.LegacyRPCListenAddrFlag,
@@ -164,11 +168,10 @@ var (
utils.LegacyRPCCORSDomainFlag,
utils.LegacyRPCVirtualHostsFlag,
utils.GraphQLEnabledFlag,
- utils.GraphQLListenAddrFlag,
- utils.GraphQLPortFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
utils.HTTPApiFlag,
+ utils.HTTPPathPrefixFlag,
utils.LegacyRPCApiFlag,
utils.WSEnabledFlag,
utils.WSListenAddrFlag,
@@ -178,11 +181,13 @@ var (
utils.WSApiFlag,
utils.LegacyWSApiFlag,
utils.WSAllowedOriginsFlag,
+ utils.WSPathPrefixFlag,
utils.LegacyWSAllowedOriginsFlag,
utils.IPCDisabledFlag,
utils.IPCPathFlag,
utils.InsecureUnlockAllowedFlag,
utils.RPCGlobalEnergyCap,
+ utils.RPCGlobalTxFeeCap,
}
metricsFlags = []cli.Flag{
@@ -306,17 +311,18 @@ func gocore(ctx *cli.Context) error {
return fmt.Errorf("invalid command: %q", args[0])
}
prepare(ctx)
- node := makeFullNode(ctx)
- defer node.Close()
- startNode(ctx, node)
- node.Wait()
+ stack, backend := makeFullNode(ctx)
+ defer stack.Close()
+
+ startNode(ctx, stack, backend)
+ stack.Wait()
return nil
}
// startNode boots up the system node and all registered protocols, after which
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
// miner.
-func startNode(ctx *cli.Context, stack *node.Node) {
+func startNode(ctx *cli.Context, stack *node.Node, backend xcbapi.Backend) {
debug.Memsize.Add("node", stack)
// Start up the node itself
@@ -336,25 +342,6 @@ func startNode(ctx *cli.Context, stack *node.Node) {
}
xcbClient := xcbclient.NewClient(rpcClient)
- // Set contract backend for core service if local node
- // is serving LES requests.
- if ctx.GlobalInt(utils.LightServeFlag.Name) > 0 {
- var xcbService *xcb.Core
- if err := stack.Service(&xcbService); err != nil {
- utils.Fatalf("Failed to retrieve core service: %v", err)
- }
- xcbService.SetContractBackend(xcbClient)
- }
- // Set contract backend for les service if local node is
- // running as a light client.
- if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
- var lesService *les.LightCore
- if err := stack.Service(&lesService); err != nil {
- utils.Fatalf("Failed to retrieve light core service: %v", err)
- }
- lesService.SetContractBackend(xcbClient)
- }
-
go func() {
// Open any wallets already attached
for _, wallet := range stack.AccountManager().Wallets() {
@@ -406,7 +393,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
if timestamp := time.Unix(int64(done.Latest.Time), 0); time.Since(timestamp) < 10*time.Minute {
log.Info("Synchronisation completed", "latestnum", done.Latest.Number, "latesthash", done.Latest.Hash(),
"age", common.PrettyAge(timestamp))
- stack.Stop()
+ stack.Close()
}
}
}()
@@ -418,8 +405,8 @@ func startNode(ctx *cli.Context, stack *node.Node) {
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
utils.Fatalf("Light clients do not support mining")
}
- var core *xcb.Core
- if err := stack.Service(&core); err != nil {
+ xcbBackend, ok := backend.(*xcb.XcbAPIBackend)
+ if !ok {
utils.Fatalf("Core service not running: %v", err)
}
// Set the energy price to the limits from the CLI and start mining
@@ -427,15 +414,15 @@ func startNode(ctx *cli.Context, stack *node.Node) {
if ctx.GlobalIsSet(utils.LegacyMinerEnergyPriceFlag.Name) && !ctx.GlobalIsSet(utils.MinerEnergyPriceFlag.Name) {
energyprice = utils.GlobalBig(ctx, utils.LegacyMinerEnergyPriceFlag.Name)
}
- core.TxPool().SetEnergyPrice(energyprice)
-
+ xcbBackend.TxPool().SetEnergyPrice(energyprice)
+ // start mining
threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name)
if ctx.GlobalIsSet(utils.LegacyMinerThreadsFlag.Name) && !ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
threads = ctx.GlobalInt(utils.LegacyMinerThreadsFlag.Name)
log.Warn("The flag --minerthreads is deprecated and will be removed in the future, please use --miner.threads")
}
- if err := core.StartMining(threads); err != nil {
+ if err := xcbBackend.StartMining(threads); err != nil {
utils.Fatalf("Failed to start mining: %v", err)
}
}
diff --git a/cmd/gocore/retestxcb.go b/cmd/gocore/retestxcb.go
index 10848d3e2..c52305ef1 100644
--- a/cmd/gocore/retestxcb.go
+++ b/cmd/gocore/retestxcb.go
@@ -188,11 +188,11 @@ func (e *NoRewardEngine) Author(header *types.Header) (common.Address, error) {
return e.inner.Author(header)
}
-func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
+func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
return e.inner.VerifyHeader(chain, header, seal)
}
-func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
+func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
return e.inner.VerifyHeaders(chain, headers, seals)
}
@@ -200,11 +200,11 @@ func (e *NoRewardEngine) VerifyUncles(chain consensus.ChainReader, block *types.
return e.inner.VerifyUncles(chain, block)
}
-func (e *NoRewardEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
+func (e *NoRewardEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
return e.inner.VerifySeal(chain, header)
}
-func (e *NoRewardEngine) Prepare(chain consensus.ChainReader, header *types.Header) error {
+func (e *NoRewardEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
return e.inner.Prepare(chain, header)
}
@@ -217,7 +217,7 @@ func (e *NoRewardEngine) accumulateRewards(config *params.ChainConfig, state *st
state.AddBalance(header.Coinbase, reward)
}
-func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
+func (e *NoRewardEngine) Finalize(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
uncles []*types.Header) {
if e.rewardsOn {
e.inner.Finalize(chain, header, statedb, txs, uncles)
@@ -227,7 +227,7 @@ func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Hea
}
}
-func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
+func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction,
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
if e.rewardsOn {
return e.inner.FinalizeAndAssemble(chain, header, statedb, txs, uncles, receipts)
@@ -240,7 +240,7 @@ func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header
}
}
-func (e *NoRewardEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
+func (e *NoRewardEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
return e.inner.Seal(chain, block, results, stop)
}
@@ -248,11 +248,11 @@ func (e *NoRewardEngine) SealHash(header *types.Header) common.Hash {
return e.inner.SealHash(header)
}
-func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
+func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
return e.inner.CalcDifficulty(chain, time, parent)
}
-func (e *NoRewardEngine) APIs(chain consensus.ChainReader) []rpc.API {
+func (e *NoRewardEngine) APIs(chain consensus.ChainHeaderReader) []rpc.API {
return e.inner.APIs(chain)
}
@@ -330,7 +330,7 @@ func (api *RetestxcbAPI) SetChainParams(ctx context.Context, chainParams ChainPa
}
engine := &NoRewardEngine{inner: inner, rewardsOn: chainParams.SealEngine != "NoReward"}
- blockchain, err := core.NewBlockChain(xcbDb, nil, chainConfig, engine, vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(xcbDb, nil, chainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
return false, err
}
@@ -592,7 +592,7 @@ func (api *RetestxcbAPI) AccountRange(ctx context.Context,
context := core.NewCVMContext(msg, block.Header(), api.blockchain, nil)
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewCVM(context, statedb, api.blockchain.Config(), vm.Config{})
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(tx.Energy())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(tx.Energy())); err != nil {
return AccountRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
@@ -701,7 +701,7 @@ func (api *RetestxcbAPI) StorageRangeAt(ctx context.Context,
context := core.NewCVMContext(msg, block.Header(), api.blockchain, nil)
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewCVM(context, statedb, api.blockchain.Config(), vm.Config{})
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(tx.Energy())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(tx.Energy())); err != nil {
return StorageRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
@@ -749,7 +749,7 @@ func (api *RetestxcbAPI) StorageRangeAt(ctx context.Context,
}
func (api *RetestxcbAPI) ClientVersion(ctx context.Context) (string, error) {
- return "Gocore-" + params.VersionWithCommit(gitCommit, gitDate), nil
+ return "Gocore-" + params.VersionWithTag(gitTag, gitCommit, gitDate), nil
}
// splitAndTrim splits input separated by a comma
@@ -801,15 +801,22 @@ func retestxcb(ctx *cli.Context) error {
}
vhosts := splitAndTrim(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name))
cors := splitAndTrim(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name))
+ // register apis and create handler stack
+ srv := rpc.NewServer()
+ err := node.RegisterApisFromWhitelist(rpcAPI, []string{"test", "eth", "debug", "web3"}, srv, false)
+ if err != nil {
+ utils.Fatalf("Could not register RPC apis: %w", err)
+ }
+ handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
// start http server
- var RetestxcbHTTPTimeouts = rpc.HTTPTimeouts{
+ var RetestethHTTPTimeouts = rpc.HTTPTimeouts{
ReadTimeout: 120 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
}
httpEndpoint := fmt.Sprintf("%s:%d", ctx.GlobalString(utils.HTTPListenAddrFlag.Name), ctx.Int(rpcPortFlag.Name))
- listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"test", "xcb", "debug", "web3"}, cors, vhosts, RetestxcbHTTPTimeouts)
+ listener, err := node.StartHTTPEndpoint(httpEndpoint, RetestethHTTPTimeouts, handler)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
diff --git a/cmd/gocore/usage.go b/cmd/gocore/usage.go
index f611dabe3..0e04f913e 100644
--- a/cmd/gocore/usage.go
+++ b/cmd/gocore/usage.go
@@ -80,6 +80,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.XcbStatsURLFlag,
+ utils.TxLookupLimitFlag,
utils.IdentityFlag,
utils.LightKDFFlag,
utils.WhitelistFlag,
@@ -148,23 +149,25 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.IPCDisabledFlag,
utils.IPCPathFlag,
+ utils.JWTSecretFlag,
utils.HTTPEnabledFlag,
utils.HTTPListenAddrFlag,
utils.HTTPPortFlag,
utils.HTTPApiFlag,
+ utils.HTTPPathPrefixFlag,
utils.HTTPCORSDomainFlag,
utils.HTTPVirtualHostsFlag,
utils.WSEnabledFlag,
utils.WSListenAddrFlag,
utils.WSPortFlag,
utils.WSApiFlag,
+ utils.WSPathPrefixFlag,
utils.WSAllowedOriginsFlag,
utils.GraphQLEnabledFlag,
- utils.GraphQLListenAddrFlag,
- utils.GraphQLPortFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
utils.RPCGlobalEnergyCap,
+ utils.RPCGlobalTxFeeCap,
utils.JSpathFlag,
utils.ExecFlag,
utils.PreloadJSFlag,
@@ -247,6 +250,8 @@ var AppHelpFlagGroups = []flagGroup{
utils.LegacyWSApiFlag,
utils.LegacyGpoBlocksFlag,
utils.LegacyGpoPercentileFlag,
+ utils.LegacyGraphQLListenAddrFlag,
+ utils.LegacyGraphQLPortFlag,
}, debug.DeprecatedFlags...),
},
{
diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go
index 1670ffab0..16a9aa32d 100644
--- a/cmd/p2psim/main.go
+++ b/cmd/p2psim/main.go
@@ -291,7 +291,7 @@ func createNode(ctx *cli.Context) error {
config.PrivateKey = privKey
}
if services := ctx.String("services"); services != "" {
- config.Services = strings.Split(services, ",")
+ config.Lifecycles = strings.Split(services, ",")
}
node, err := client.CreateNode(config)
if err != nil {
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index 629ac26d2..d35050d1b 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -73,7 +73,7 @@ func StartNode(stack *node.Node) {
defer signal.Stop(sigc)
<-sigc
log.Info("Got interrupt, shutting down...")
- go stack.Stop()
+ go stack.Close()
for i := 10; i > 0; i-- {
<-sigc
if i > 1 {
@@ -301,7 +301,7 @@ func ExportPreimages(db xcbdb.Database, fn string) error {
defer writer.(*gzip.Writer).Close()
}
// Iterate over the preimages and export them
- it := db.NewIteratorWithPrefix([]byte("secure-key-"))
+ it := db.NewIterator([]byte("secure-key-"), nil)
defer it.Release()
for it.Next() {
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index b9d660615..7ddcbba52 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -18,8 +18,8 @@
package utils
import (
- "errors"
"fmt"
+ "github.com/core-coin/go-core/internal/xcbapi"
"io"
"io/ioutil"
"math"
@@ -60,7 +60,6 @@ import (
"github.com/core-coin/go-core/p2p/nat"
"github.com/core-coin/go-core/p2p/netutil"
"github.com/core-coin/go-core/params"
- "github.com/core-coin/go-core/rpc"
"github.com/core-coin/go-core/xcb"
"github.com/core-coin/go-core/xcb/downloader"
"github.com/core-coin/go-core/xcb/energyprice"
@@ -75,8 +74,8 @@ var (
{{if .cmd.Description}}{{.cmd.Description}}
{{end}}{{if .cmd.Subcommands}}
SUBCOMMANDS:
- {{range .cmd.Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
- {{end}}{{end}}{{if .categorizedFlags}}
+ {{range .cmd.Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
+ {{end}}{{end}}{{if .categorizedFlags}}
{{range $idx, $categorized := .categorizedFlags}}{{$categorized.Name}} OPTIONS:
{{range $categorized.Flags}}{{"\t"}}{{.}}
{{end}}
@@ -86,10 +85,10 @@ SUBCOMMANDS:
{{if .Description}}{{.Description}}
{{end}}{{if .Subcommands}}
SUBCOMMANDS:
- {{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
- {{end}}{{end}}{{if .Flags}}
+ {{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
+ {{end}}{{end}}{{if .Flags}}
OPTIONS:
-{{range $.Flags}}{{"\t"}}{{.}}
+{{range $.Flags}} {{.}}
{{end}}
{{end}}`
)
@@ -112,12 +111,12 @@ GLOBAL OPTIONS:
}
// NewApp creates an app with sane defaults.
-func NewApp(gitCommit, gitDate, usage string) *cli.App {
+func NewApp(gitTag, gitCommit, gitDate, usage string) *cli.App {
app := cli.NewApp()
app.Name = filepath.Base(os.Args[0])
app.Author = ""
app.Email = ""
- app.Version = params.VersionWithCommit(gitCommit, gitDate)
+ app.Version = params.VersionWithTag(gitTag, gitCommit, gitDate)
app.Usage = usage
return app
}
@@ -234,6 +233,11 @@ var (
Usage: `Blockchain garbage collection mode ("full", "archive")`,
Value: "full",
}
+ TxLookupLimitFlag = cli.Int64Flag{
+ Name: "txlookuplimit",
+ Usage: "Number of recent blocks to maintain transactions index by-hash for (default = index all blocks)",
+ Value: 0,
+ }
LightKDFFlag = cli.BoolFlag{
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
@@ -453,6 +457,21 @@ var (
Name: "rpc.energycap",
Usage: "Sets a cap on energy that can be used in xcb_call/estimateEnergy",
}
+ RPCGlobalTxFeeCap = cli.Float64Flag{
+ Name: "rpc.txfeecap",
+ Usage: "Sets a cap on transaction fee (in core) that can be sent via the RPC APIs (0 = no cap)",
+ Value: xcb.DefaultConfig.RPCTxFeeCap,
+ }
+ // Authenticated port settings
+ AuthPortFlag = cli.IntFlag{
+ Name: "authrpc.port",
+ Usage: "Listening port for authenticated APIs",
+ Value: node.DefaultAuthPort,
+ }
+ JWTSecretFlag = cli.StringFlag{
+ Name: "authrpc.jwtsecret",
+ Usage: "JWT secret (or path to a jwt secret) to use for authenticated RPC endpoints",
+ }
// Logging and debug settings
XcbStatsURLFlag = cli.StringFlag{
Name: "xcbstats",
@@ -504,6 +523,25 @@ var (
Usage: "API's offered over the HTTP-RPC interface",
Value: "",
}
+ HTTPPathPrefixFlag = cli.StringFlag{
+ Name: "http.rpcprefix",
+ Usage: "HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths.",
+ Value: "",
+ }
+ GraphQLEnabledFlag = cli.BoolFlag{
+ Name: "graphql",
+ Usage: "Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.",
+ }
+ GraphQLCORSDomainFlag = cli.StringFlag{
+ Name: "graphql.corsdomain",
+ Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
+ Value: "",
+ }
+ GraphQLVirtualHostsFlag = cli.StringFlag{
+ Name: "graphql.vhosts",
+ Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
+ Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","),
+ }
WSEnabledFlag = cli.BoolFlag{
Name: "ws",
Usage: "Enable the WS-RPC server",
@@ -528,30 +566,11 @@ var (
Usage: "Origins from which to accept websockets requests",
Value: "",
}
- GraphQLEnabledFlag = cli.BoolFlag{
- Name: "graphql",
- Usage: "Enable the GraphQL server",
- }
- GraphQLListenAddrFlag = cli.StringFlag{
- Name: "graphql.addr",
- Usage: "GraphQL server listening interface",
- Value: node.DefaultGraphQLHost,
- }
- GraphQLPortFlag = cli.IntFlag{
- Name: "graphql.port",
- Usage: "GraphQL server listening port",
- Value: node.DefaultGraphQLPort,
- }
- GraphQLCORSDomainFlag = cli.StringFlag{
- Name: "graphql.corsdomain",
- Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
+ WSPathPrefixFlag = cli.StringFlag{
+ Name: "ws.rpcprefix",
+ Usage: "HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.",
Value: "",
}
- GraphQLVirtualHostsFlag = cli.StringFlag{
- Name: "graphql.vhosts",
- Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
- Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","),
- }
ExecFlag = cli.StringFlag{
Name: "exec",
Usage: "Execute JavaScript statement",
@@ -883,6 +902,11 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
cfg.HTTPCors = splitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name))
log.Warn("The flag --rpccorsdomain is deprecated and will be removed in the future, please use --http.corsdomain")
}
+
+ if ctx.GlobalIsSet(AuthPortFlag.Name) {
+ cfg.AuthPort = ctx.GlobalInt(AuthPortFlag.Name)
+ }
+
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
cfg.HTTPCors = splitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
}
@@ -902,18 +926,14 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) {
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
}
+ if ctx.GlobalIsSet(HTTPPathPrefixFlag.Name) {
+ cfg.HTTPPathPrefix = ctx.GlobalString(HTTPPathPrefixFlag.Name)
+ }
}
// setGraphQL creates the GraphQL listener interface string from the set
// command line flags, returning empty if the GraphQL endpoint is disabled.
func setGraphQL(ctx *cli.Context, cfg *node.Config) {
- if ctx.GlobalBool(GraphQLEnabledFlag.Name) && cfg.GraphQLHost == "" {
- cfg.GraphQLHost = "127.0.0.1"
- if ctx.GlobalIsSet(GraphQLListenAddrFlag.Name) {
- cfg.GraphQLHost = ctx.GlobalString(GraphQLListenAddrFlag.Name)
- }
- }
- cfg.GraphQLPort = ctx.GlobalInt(GraphQLPortFlag.Name)
if ctx.GlobalIsSet(GraphQLCORSDomainFlag.Name) {
cfg.GraphQLCors = splitAndTrim(ctx.GlobalString(GraphQLCORSDomainFlag.Name))
}
@@ -955,6 +975,11 @@ func setWS(ctx *cli.Context, cfg *node.Config) {
cfg.WSModules = splitAndTrim(ctx.GlobalString(LegacyWSApiFlag.Name))
log.Warn("The flag --wsapi is deprecated and will be removed in the future, please use --ws.api")
}
+
+ if ctx.GlobalIsSet(WSPathPrefixFlag.Name) {
+ cfg.WSPathPrefix = ctx.GlobalString(WSPathPrefixFlag.Name)
+ }
+
if ctx.GlobalIsSet(WSApiFlag.Name) {
cfg.WSModules = splitAndTrim(ctx.GlobalString(WSApiFlag.Name))
}
@@ -1181,6 +1206,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setDataDir(ctx, cfg)
setSmartCard(ctx, cfg)
+ if ctx.GlobalIsSet(JWTSecretFlag.Name) {
+ cfg.JWTSecret = ctx.GlobalString(JWTSecretFlag.Name)
+ }
+
if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
}
@@ -1250,7 +1279,7 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
func setGPO(ctx *cli.Context, cfg *energyprice.Config, light bool) {
// If we are running the light client, apply another group
- // settings for gas oracle.
+ // settings for energy oracle.
if light {
cfg.Blocks = xcb.DefaultLightGPOConfig.Blocks
cfg.Percentile = xcb.DefaultLightGPOConfig.Percentile
@@ -1426,6 +1455,10 @@ func SetXcbConfig(ctx *cli.Context, stack *node.Node, cfg *xcb.Config) {
// Avoid conflicting network flags
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
+ CheckExclusive(ctx, GCModeFlag, "archive", TxLookupLimitFlag)
+ // todo(rjl493456442) make it available for les server
+ // Ancient tx indices pruning is not available for les server now
+ // since light client relies on the server for transaction status query.
var ks *keystore.KeyStore
if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 {
@@ -1461,6 +1494,9 @@ func SetXcbConfig(ctx *cli.Context, stack *node.Node, cfg *xcb.Config) {
if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) {
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
}
+ if ctx.GlobalIsSet(TxLookupLimitFlag.Name) {
+ cfg.TxLookupLimit = ctx.GlobalUint64(TxLookupLimitFlag.Name)
+ }
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
}
@@ -1500,6 +1536,9 @@ func SetXcbConfig(ctx *cli.Context, stack *node.Node, cfg *xcb.Config) {
} else {
cfg.RPCEnergyCap = new(big.Int).SetUint64(math.MaxUint64 / 2)
}
+ if ctx.GlobalIsSet(RPCGlobalTxFeeCap.Name) {
+ cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCap.Name)
+ }
if ctx.GlobalIsSet(DNSDiscoveryFlag.Name) {
urls := ctx.GlobalString(DNSDiscoveryFlag.Name)
if urls == "" {
@@ -1520,7 +1559,7 @@ func SetXcbConfig(ctx *cli.Context, stack *node.Node, cfg *xcb.Config) {
case ctx.GlobalInt(NetworkIdFlag.Name) == 3, ctx.GlobalBool(DevinFlag.Name):
cfg.NetworkId = 3
cfg.Genesis = core.DefaultDevinGenesisBlock()
- setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.DevinGenesisHash])
+ setDNSDiscoveryDefaults(cfg, params.DevinGenesisHash)
case ctx.GlobalBool(DeveloperFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337
@@ -1570,7 +1609,7 @@ func SetXcbConfig(ctx *cli.Context, stack *node.Node, cfg *xcb.Config) {
default:
if cfg.NetworkId == 1 {
cfg.Genesis = core.DefaultGenesisBlock()
- setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.MainnetGenesisHash])
+ setDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash)
}
}
common.DefaultNetworkID = common.NetworkID(cfg.NetworkId)
@@ -1578,71 +1617,54 @@ func SetXcbConfig(ctx *cli.Context, stack *node.Node, cfg *xcb.Config) {
// setDNSDiscoveryDefaults configures DNS discovery with the given URL if
// no URLs are set.
-func setDNSDiscoveryDefaults(cfg *xcb.Config, url string) {
+func setDNSDiscoveryDefaults(cfg *xcb.Config, genesis common.Hash) {
if cfg.DiscoveryURLs != nil {
- return
+ return // already set through flags/config
}
- if cfg.UseDNSDiscovery {
+
+ protocol := "xcb"
+ if cfg.SyncMode == downloader.LightSync {
+ protocol = "les"
+ }
+ if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
cfg.DiscoveryURLs = []string{url}
}
}
// RegisterXcbService adds an Core client to the stack.
-func RegisterXcbService(stack *node.Node, cfg *xcb.Config) {
- var err error
+func RegisterXcbService(stack *node.Node, cfg *xcb.Config) xcbapi.Backend {
if cfg.SyncMode == downloader.LightSync {
- err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- return les.New(ctx, cfg)
- })
+ backend, err := les.New(stack, cfg)
+ if err != nil {
+ Fatalf("Failed to register the Core service: %v", err)
+ }
+ return backend.ApiBackend
} else {
- err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- fullNode, err := xcb.New(ctx, cfg)
- if fullNode != nil && cfg.LightServ > 0 {
- ls, _ := les.NewLesServer(fullNode, cfg)
- fullNode.AddLesServer(ls)
+ backend, err := xcb.New(stack, cfg)
+ if err != nil {
+ Fatalf("Failed to register the Core service: %v", err)
+ }
+ if cfg.LightServ > 0 {
+ _, err := les.NewLesServer(stack, backend, cfg)
+ if err != nil {
+ Fatalf("Failed to create the LES server: %v", err)
}
- return fullNode, err
- })
- }
- if err != nil {
- Fatalf("Failed to register the Core service: %v", err)
+ }
+ return backend.APIBackend
}
}
// RegisterXcbStatsService configures the Core Stats daemon and adds it to
// the given node.
-func RegisterXcbStatsService(stack *node.Node, url string) {
- if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- // Retrieve both xcb and les services
- var xcbServ *xcb.Core
- ctx.Service(&xcbServ)
-
- var lesServ *les.LightCore
- ctx.Service(&lesServ)
-
- // Let xcbstats use whichever is not nil
- return xcbstats.New(url, xcbServ, lesServ)
- }); err != nil {
+func RegisterXcbStatsService(stack *node.Node, backend xcbapi.Backend, url string) {
+ if err := xcbstats.New(stack, backend, backend.Engine(), url); err != nil {
Fatalf("Failed to register the Core Stats service: %v", err)
}
}
// RegisterGraphQLService is a utility function to construct a new service and register it against a node.
-func RegisterGraphQLService(stack *node.Node, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) {
- if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- // Try to construct the GraphQL service backed by a full node
- var xcbServ *xcb.Core
- if err := ctx.Service(&xcbServ); err == nil {
- return graphql.New(xcbServ.APIBackend, endpoint, cors, vhosts, timeouts)
- }
- // Try to construct the GraphQL service backed by a light node
- var lesServ *les.LightCore
- if err := ctx.Service(&lesServ); err == nil {
- return graphql.New(lesServ.ApiBackend, endpoint, cors, vhosts, timeouts)
- }
- // Well, this should not have happened, bail out
- return nil, errors.New("no Core service")
- }); err != nil {
+func RegisterGraphQLService(stack *node.Node, backend xcbapi.Backend, cfg node.Config) {
+ if err := graphql.New(stack, backend, cfg.GraphQLCors, cfg.GraphQLVirtualHosts); err != nil {
Fatalf("Failed to register the GraphQL service: %v", err)
}
}
@@ -1719,7 +1741,7 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
}
// MakeChain creates a chain manager from set command line flags.
-func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb xcbdb.Database) {
+func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.BlockChain, chainDb xcbdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack)
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
@@ -1756,7 +1778,12 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
- chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)
+ var limit *uint64
+ if ctx.GlobalIsSet(TxLookupLimitFlag.Name) && !readOnly {
+ l := ctx.GlobalUint64(TxLookupLimitFlag.Name)
+ limit = &l
+ }
+ chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil, limit)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}
diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go
index 80f69fab9..241f5f043 100644
--- a/cmd/utils/flags_legacy.go
+++ b/cmd/utils/flags_legacy.go
@@ -135,6 +135,16 @@ var (
Usage: "Suggested energy price is the given percentile of a set of recent transaction energy prices (deprecated, use --gpo.percentile)",
Value: xcb.DefaultConfig.GPO.Percentile,
}
+ // (Deprecated June 2022, shown in aliased flags section)
+ LegacyGraphQLListenAddrFlag = cli.StringFlag{
+ Name: "graphql.addr",
+ Usage: "GraphQL server listening interface (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
+ }
+ LegacyGraphQLPortFlag = cli.IntFlag{
+ Name: "graphql.port",
+ Usage: "GraphQL server listening port (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
+ Value: node.DefaultHTTPPort,
+ }
)
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
diff --git a/cmd/xcbkey/main.go b/cmd/xcbkey/main.go
index 8ea9ee308..240650420 100644
--- a/cmd/xcbkey/main.go
+++ b/cmd/xcbkey/main.go
@@ -29,13 +29,14 @@ const (
)
// Git SHA1 commit hash of the release (set via linker flags)
+var gitTag = ""
var gitCommit = ""
-var gitDate = ""
+var gitDate = ""
var app *cli.App
func init() {
- app = utils.NewApp(gitCommit, gitDate, "an Core key manager")
+ app = utils.NewApp(gitTag, gitCommit, gitDate, "an Core key manager")
app.Commands = []cli.Command{
commandGenerate,
commandInspect,
diff --git a/common/bytes.go b/common/bytes.go
index 419619bb5..38e1aa706 100644
--- a/common/bytes.go
+++ b/common/bytes.go
@@ -145,3 +145,14 @@ func TrimLeftZeroes(s []byte) []byte {
}
return s[idx:]
}
+
+// TrimRightZeroes returns a subslice of s without trailing zeroes
+func TrimRightZeroes(s []byte) []byte {
+ idx := len(s)
+ for ; idx > 0; idx-- {
+ if s[idx-1] != 0 {
+ break
+ }
+ }
+ return s[:idx]
+}
diff --git a/common/bytes_test.go b/common/bytes_test.go
index bb3a5d0d1..e4e3969e4 100644
--- a/common/bytes_test.go
+++ b/common/bytes_test.go
@@ -105,3 +105,22 @@ func TestNoPrefixShortHexOddLength(t *testing.T) {
t.Errorf("Expected %x got %x", expected, result)
}
}
+
+func TestTrimRightZeroes(t *testing.T) {
+ tests := []struct {
+ arr []byte
+ exp []byte
+ }{
+ {FromHex("0x00ffff00ff0000"), FromHex("0x00ffff00ff")},
+ {FromHex("0x00000000000000"), []byte{}},
+ {FromHex("0xff"), FromHex("0xff")},
+ {[]byte{}, []byte{}},
+ {FromHex("0x00ffffffffffff"), FromHex("0x00ffffffffffff")},
+ }
+ for i, test := range tests {
+ got := TrimRightZeroes(test.arr)
+ if !bytes.Equal(got, test.exp) {
+ t.Errorf("test %d, got %x exp %x", i, got, test.exp)
+ }
+ }
+}
diff --git a/consensus/clique/api.go b/consensus/clique/api.go
index a1420c5d3..d049f4050 100644
--- a/consensus/clique/api.go
+++ b/consensus/clique/api.go
@@ -28,7 +28,7 @@ import (
// API is a user facing RPC API to allow controlling the signer and voting
// mechanisms of the proof-of-authority scheme.
type API struct {
- chain consensus.ChainReader
+ chain consensus.ChainHeaderReader
clique *Clique
}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index b4d7b941b..fd7d9e07b 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -212,14 +212,14 @@ func (c *Clique) Author(header *types.Header) (common.Address, error) {
}
// VerifyHeader checks whether a header conforms to the consensus rules.
-func (c *Clique) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
+func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
return c.verifyHeader(chain, header, nil)
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
// method returns a quit channel to abort the operations and a results channel to
// retrieve the async verifications (the order is that of the input slice).
-func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
+func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
abort := make(chan struct{})
results := make(chan error, len(headers))
@@ -241,7 +241,7 @@ func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Hea
// caller may optionally pass in a batch of parents (ascending order) to avoid
// looking those up from the database. This is useful for concurrently verifying
// a batch of new headers.
-func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
+func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
if header.Number == nil {
return errUnknownBlock
}
@@ -296,7 +296,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
// rather depend on a batch of previous headers. The caller may optionally pass
// in a batch of parents (ascending order) to avoid looking those up from the
// database. This is useful for concurrently verifying a batch of new headers.
-func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
+func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
// The genesis block is the always valid dead-end
number := header.Number.Uint64()
if number == 0 {
@@ -336,7 +336,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
}
// snapshot retrieves the authorization snapshot at a given point in time.
-func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
+func (c *Clique) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
// Search for a snapshot in memory or on disk for checkpoints
var (
headers []*types.Header
@@ -431,7 +431,7 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e
// VerifySeal implements consensus.Engine, checking whether the signature contained
// in the header satisfies the consensus protocol requirements.
-func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
+func (c *Clique) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
return c.verifySeal(chain, header, nil)
}
@@ -439,7 +439,7 @@ func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) e
// consensus protocol requirements. The method accepts an optional list of parent
// headers that aren't yet part of the local blockchain to generate the snapshots
// from.
-func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
+func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error {
// Verifying the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {
@@ -482,7 +482,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
-func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
+func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
// If the block isn't a checkpoint, cast a random vote (good enough for now)
header.Coinbase = common.Address{}
header.Nonce = types.BlockNonce{}
@@ -544,7 +544,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
// Finalize implements consensus.Engine, ensuring no uncles are set, nor block
// rewards given.
-func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
+func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
// No block rewards in PoA, so the state remains as is and uncles are dropped
header.Root = state.IntermediateRoot(true)
header.UncleHash = types.CalcUncleHash(nil)
@@ -552,7 +552,7 @@ func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, sta
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
-func (c *Clique) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
+func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
// No block rewards in PoA, so the state remains as is and uncles are dropped
header.Root = state.IntermediateRoot(true)
header.UncleHash = types.CalcUncleHash(nil)
@@ -573,7 +573,7 @@ func (c *Clique) Authorize(signer common.Address, signFn SignerFn) {
// Seal implements consensus.Engine, attempting to create a sealed block using
// the local signing credentials.
-func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
+func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
header := block.Header()
// Sealing the genesis block is not supported
@@ -646,7 +646,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have based on the previous blocks in the chain and the
// current signer.
-func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
+func (c *Clique) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil)
if err != nil {
return nil
@@ -676,7 +676,7 @@ func (c *Clique) Close() error {
// APIs implements consensus.Engine, returning the user facing RPC API to allow
// controlling the signer voting.
-func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
+func (c *Clique) APIs(chain consensus.ChainHeaderReader) []rpc.API {
return []rpc.API{{
Namespace: "clique",
Version: "1.0",
diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go
index 74250ee2b..174ea2620 100644
--- a/consensus/clique/clique_test.go
+++ b/consensus/clique/clique_test.go
@@ -57,7 +57,7 @@ func TestReimportMirroredState(t *testing.T) {
genesis := genspec.MustCommit(db)
// Generate a batch of blocks, each properly signed
- chain, _ := core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil)
+ chain, _ := core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
defer chain.Stop()
blocks, _ := core.GenerateChain(params.AllCliqueProtocolChanges, genesis, engine, db, 3, func(i int, block *core.BlockGen) {
@@ -91,7 +91,7 @@ func TestReimportMirroredState(t *testing.T) {
db = rawdb.NewMemoryDatabase()
genspec.MustCommit(db)
- chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil)
+ chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[:2]); err != nil {
@@ -104,7 +104,7 @@ func TestReimportMirroredState(t *testing.T) {
// Simulate a crash by creating a new chain on top of the database, without
// flushing the dirty states out. Insert the last block, trigerring a sidechain
// reimport.
- chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil)
+ chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[2:]); err != nil {
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index f82f9339b..b05a2f0f1 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -450,7 +450,7 @@ func TestClique(t *testing.T) {
batches[len(batches)-1] = append(batches[len(batches)-1], block)
}
// Pass all the headers through clique and ensure tallying succeeds
- chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{}, nil)
+ chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{}, nil, nil)
if err != nil {
t.Errorf("test %d: failed to create test chain: %v", i, err)
continue
diff --git a/consensus/consensus.go b/consensus/consensus.go
index 1168f575d..9609a03ed 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -27,9 +27,9 @@ import (
"github.com/core-coin/go-core/rpc"
)
-// ChainReader defines a small collection of methods needed to access the local
-// blockchain during header and/or uncle verification.
-type ChainReader interface {
+// ChainHeaderReader defines a small collection of methods needed to access the local
+// blockchain during header verification.
+type ChainHeaderReader interface {
// Config retrieves the blockchain's chain configuration.
Config() *params.ChainConfig
@@ -44,7 +44,12 @@ type ChainReader interface {
// GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header
+}
+// ChainReader defines a small collection of methods needed to access the local
+// blockchain during header and/or uncle verification.
+type ChainReader interface {
+ ChainHeaderReader
// GetBlock retrieves a block from the database by hash and number.
GetBlock(hash common.Hash, number uint64) *types.Block
}
@@ -59,13 +64,13 @@ type Engine interface {
// VerifyHeader checks whether a header conforms to the consensus rules of a
// given engine. Verifying the seal may be done optionally here, or explicitly
// via the VerifySeal method.
- VerifyHeader(chain ChainReader, header *types.Header, seal bool) error
+ VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications (the order is that of
// the input slice).
- VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
+ VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
// VerifyUncles verifies that the given block's uncles conform to the consensus
// rules of a given engine.
@@ -73,18 +78,18 @@ type Engine interface {
// VerifySeal checks whether the crypto seal on a header is valid according to
// the consensus rules of the given engine.
- VerifySeal(chain ChainReader, header *types.Header) error
+ VerifySeal(chain ChainHeaderReader, header *types.Header) error
// Prepare initializes the consensus fields of a block header according to the
// rules of a particular engine. The changes are executed inline.
- Prepare(chain ChainReader, header *types.Header) error
+ Prepare(chain ChainHeaderReader, header *types.Header) error
// Finalize runs any post-transaction state modifications (e.g. block rewards)
// but does not assemble the block.
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
- Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
+ Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header)
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
@@ -92,7 +97,7 @@ type Engine interface {
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
- FinalizeAndAssemble(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
+ FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes
@@ -100,17 +105,17 @@ type Engine interface {
//
// Note, the method returns immediately and will send the result async. More
// than one result may also be returned depending on the consensus algorithm.
- Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
+ Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
// SealHash returns the hash of a block prior to it being sealed.
SealHash(header *types.Header) common.Hash
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have.
- CalcDifficulty(chain ChainReader, time uint64, parent *types.Header) *big.Int
+ CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
// APIs returns the RPC APIs this consensus engine provides.
- APIs(chain ChainReader) []rpc.API
+ APIs(chain ChainHeaderReader) []rpc.API
// Close terminates any background threads maintained by the consensus engine.
Close() error
diff --git a/consensus/cryptore/consensus.go b/consensus/cryptore/consensus.go
index ec5eb3d19..f776b701a 100644
--- a/consensus/cryptore/consensus.go
+++ b/consensus/cryptore/consensus.go
@@ -67,7 +67,7 @@ func (cryptore *Cryptore) Author(header *types.Header) (common.Address, error) {
// VerifyHeader checks whether a header conforms to the consensus rules of the
// stock Core cryptore engine.
-func (cryptore *Cryptore) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
+func (cryptore *Cryptore) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
// If we're running a full engine faking, accept any input as valid
if cryptore.config.PowMode == ModeFullFake {
return nil
@@ -88,7 +88,7 @@ func (cryptore *Cryptore) VerifyHeader(chain consensus.ChainReader, header *type
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications.
-func (cryptore *Cryptore) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
+func (cryptore *Cryptore) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
// If we're running a full engine faking, accept any input as valid
if cryptore.config.PowMode == ModeFullFake || len(headers) == 0 {
abort, results := make(chan struct{}), make(chan error, len(headers))
@@ -150,7 +150,7 @@ func (cryptore *Cryptore) VerifyHeaders(chain consensus.ChainReader, headers []*
return abort, errorsOut
}
-func (cryptore *Cryptore) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error {
+func (cryptore *Cryptore) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error {
var parent *types.Header
if index == 0 {
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
@@ -224,7 +224,7 @@ func (cryptore *Cryptore) VerifyUncles(chain consensus.ChainReader, block *types
// verifyHeader checks whether a header conforms to the consensus rules of the
// stock Core cryptore engine.
// See YP section 4.3.4. "Block Header Validity"
-func (cryptore *Cryptore) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
+func (cryptore *Cryptore) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error {
// Ensure that the header's extra-data section is of a reasonable size
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
@@ -280,7 +280,7 @@ func (cryptore *Cryptore) verifyHeader(chain consensus.ChainReader, header, pare
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
-func (cryptore *Cryptore) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
+func (cryptore *Cryptore) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
return CalcDifficulty(chain.Config(), time, parent)
}
@@ -351,12 +351,12 @@ func makeDifficultyCalculator() func(time uint64, parent *types.Header) *big.Int
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
// the PoW difficulty requirements.
-func (cryptore *Cryptore) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
+func (cryptore *Cryptore) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
return cryptore.verifySeal(chain, header)
}
// verifySeal checks whether a block satisfies the PoW difficulty requirements.
-func (cryptore *Cryptore) verifySeal(chain consensus.ChainReader, header *types.Header) error {
+func (cryptore *Cryptore) verifySeal(chain consensus.ChainHeaderReader, header *types.Header) error {
// If we're running a fake PoW, accept any seal as valid
if cryptore.config.PowMode == ModeFake || cryptore.config.PowMode == ModeFullFake {
time.Sleep(cryptore.fakeDelay)
@@ -392,7 +392,7 @@ func (cryptore *Cryptore) verifySeal(chain consensus.ChainReader, header *types.
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the cryptore protocol. The changes are done inline.
-func (cryptore *Cryptore) Prepare(chain consensus.ChainReader, header *types.Header) error {
+func (cryptore *Cryptore) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parent == nil {
return consensus.ErrUnknownAncestor
@@ -403,7 +403,7 @@ func (cryptore *Cryptore) Prepare(chain consensus.ChainReader, header *types.Hea
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
// setting the final state on the header
-func (cryptore *Cryptore) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
+func (cryptore *Cryptore) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
// Accumulate any block and uncle rewards and commit the final state root
accumulateRewards(state, header, uncles)
header.Root = state.IntermediateRoot(true)
@@ -411,7 +411,7 @@ func (cryptore *Cryptore) Finalize(chain consensus.ChainReader, header *types.He
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
// uncle rewards, setting the final state and assembling the block.
-func (cryptore *Cryptore) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
+func (cryptore *Cryptore) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
// Accumulate any block and uncle rewards and commit the final state root
accumulateRewards(state, header, uncles)
header.Root = state.IntermediateRoot(true)
diff --git a/consensus/cryptore/cryptore.go b/consensus/cryptore/cryptore.go
index c9fde62e2..04bbd2ff5 100644
--- a/consensus/cryptore/cryptore.go
+++ b/consensus/cryptore/cryptore.go
@@ -257,9 +257,9 @@ func (cryptore *Cryptore) Hashrate() float64 {
}
// APIs implements consensus.Engine, returning the user facing RPC APIs.
-func (cryptore *Cryptore) APIs(chain consensus.ChainReader) []rpc.API {
+func (cryptore *Cryptore) APIs(chain consensus.ChainHeaderReader) []rpc.API {
// In order to ensure backward compatibility, we exposes cryptore RPC APIs
- // to both eth and cryptore namespaces.
+ // to both xcb and cryptore namespaces.
return []rpc.API{
{
Namespace: "xcb",
diff --git a/consensus/cryptore/sealer.go b/consensus/cryptore/sealer.go
index ee22764fb..b699d456d 100644
--- a/consensus/cryptore/sealer.go
+++ b/consensus/cryptore/sealer.go
@@ -49,7 +49,7 @@ var (
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
// the block's difficulty requirements.
-func (cryptore *Cryptore) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
+func (cryptore *Cryptore) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
// If we're running a fake PoW, simply return a 0 nonce immediately
if cryptore.config.PowMode == ModeFake || cryptore.config.PowMode == ModeFullFake {
header := block.Header()
diff --git a/console/console_test.go b/console/console_test.go
index ca3234f2e..167284912 100644
--- a/console/console_test.go
+++ b/console/console_test.go
@@ -112,7 +112,8 @@ func newTester(t *testing.T, confOverride func(*xcb.Config)) *tester {
if confOverride != nil {
confOverride(xcbConf)
}
- if err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return xcb.New(ctx, xcbConf) }); err != nil {
+ xcbBackend, err := xcb.New(stack, xcbConf)
+ if err != nil {
t.Fatalf("failed to register Core protocol: %v", err)
}
// Start the node and assemble the JavaScript console around it
@@ -138,13 +139,10 @@ func newTester(t *testing.T, confOverride func(*xcb.Config)) *tester {
t.Fatalf("failed to create JavaScript console: %v", err)
}
// Create the final tester and return
- var core *xcb.Core
- stack.Service(&core)
-
return &tester{
workspace: workspace,
stack: stack,
- core: core,
+ core: xcbBackend,
console: console,
input: prompter,
output: printer,
diff --git a/core/bench_test.go b/core/bench_test.go
index 002cb27a6..8c0466f7f 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -178,7 +178,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
- chainman, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ chainman, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@@ -290,7 +290,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
- chain, err := NewBlockChain(db, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil)
+ chain, err := NewBlockChain(db, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
b.Fatalf("error creating chain: %v", err)
}
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index 6ad94cd2b..5ae9641c5 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -42,7 +42,7 @@ func TestHeaderVerification(t *testing.T) {
headers[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer chain.Stop()
for i := 0; i < len(blocks); i++ {
@@ -106,11 +106,11 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
var results <-chan error
if valid {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
} else {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
}
@@ -173,7 +173,7 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
defer runtime.GOMAXPROCS(old)
// Start the verifications and immediately abort
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFakeDelayer(time.Millisecond), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, cryptore.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil)
defer chain.Stop()
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
diff --git a/core/blockchain.go b/core/blockchain.go
index e39794d0b..827199405 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -133,6 +133,16 @@ type CacheConfig struct {
SnapshotWait bool // Wait for snapshot construction on startup. TODO(raisty): This is a dirty hack for testing, nuke it
}
+// defaultCacheConfig are the default caching values if none are specified by the
+// user (also used during testing).
+var defaultCacheConfig = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ SnapshotLimit: 256,
+ SnapshotWait: true,
+}
+
// BlockChain represents the canonical chain given a database with a genesis
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
//
@@ -156,6 +166,13 @@ type BlockChain struct {
triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
gcproc time.Duration // Accumulates canonical block processing for trie dumping
+ // txLookupLimit is the maximum number of blocks from head whose tx indices
+ // are reserved:
+ // * 0: means no limit and regenerate any missing indexes
+ // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes
+ // * nil: disable tx reindexer/deleter, but still index new blocks
+ txLookupLimit uint64
+
hc *HeaderChain
rmLogsFeed event.Feed
chainFeed event.Feed
@@ -199,15 +216,9 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Core Validator and
// Processor.
-func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
+func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
if cacheConfig == nil {
- cacheConfig = &CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- SnapshotLimit: 256,
- SnapshotWait: true,
- }
+ cacheConfig = defaultCacheConfig
}
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
@@ -254,18 +265,30 @@ func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *par
bc.currentFastBlock.Store(nilBlock)
// Initialize the chain with ancient data if it isn't empty.
+ var txIndexBlock uint64
+
if bc.empty() {
rawdb.InitDatabaseFromFreezer(bc.db)
+ // If ancient database is not empty, reconstruct all missing
+ // indices in the background.
+ frozen, _ := bc.db.Ancients()
+ if frozen > 0 {
+ txIndexBlock = frozen
+ }
}
if err := bc.loadLastState(); err != nil {
return nil, err
}
- // The first thing the node will do is reconstruct the verification data for
- // the head block (cryptore cache or clique voting snapshot). Might as well do
- // it in advance.
- bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
-
+ // Make sure the state associated with the block is available
+ head := bc.CurrentBlock()
+ if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
+ log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
+ if err := bc.SetHead(head.NumberU64()); err != nil {
+ return nil, err
+ }
+ }
+ // Ensure that a previous crash in SetHead doesn't leave extra ancients
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
var (
needRewind bool
@@ -275,7 +298,7 @@ func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *par
// blockchain repair. If the head full block is even lower than the ancient
// chain, truncate the ancient store.
fullBlock := bc.CurrentBlock()
- if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
+ if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 {
needRewind = true
low = fullBlock.NumberU64()
}
@@ -290,15 +313,17 @@ func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
}
if needRewind {
- var hashes []common.Hash
- previous := bc.CurrentHeader().Number.Uint64()
- for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
- hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
+ log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
+ if err := bc.SetHead(low); err != nil {
+ return nil, err
}
- bc.Rollback(hashes)
- log.Warn("Truncate ancient chain", "from", previous, "to", low)
}
}
+ // The first thing the node will do is reconstruct the verification data for
+ // the head block (ethash cache or clique voting snapshot). Might as well do
+ // it in advance.
+ bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
+
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash := range BadHashes {
if header := bc.GetHeaderByHash(hash); header != nil {
@@ -307,7 +332,9 @@ func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *par
// make sure the headerByNumber (if present) is in our current canonical chain
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
- bc.SetHead(header.Number.Uint64() - 1)
+ if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
+ return nil, err
+ }
log.Error("Chain rewind was successful, resuming normal operation")
}
}
@@ -318,6 +345,10 @@ func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
// Take ownership of this particular state
go bc.update()
+ if txLookupLimit != nil {
+ bc.txLookupLimit = *txLookupLimit
+ go bc.maintainTxIndex(txIndexBlock)
+ }
// If periodic cache journal is required, spin it up.
if bc.cacheConfig.TrieCleanRejournal > 0 {
if bc.cacheConfig.TrieCleanRejournal < time.Minute {
@@ -374,15 +405,6 @@ func (bc *BlockChain) loadLastState() error {
log.Warn("Head block missing, resetting chain", "hash", head)
return bc.Reset()
}
- // Make sure the state associated with the block is available
- if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil {
- // Dangling block without a state associated, init from scratch
- log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
- if err := bc.repair(¤tBlock); err != nil {
- return err
- }
- rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
- }
// Everything seems to be fine, set as the head block
bc.currentBlock.Store(currentBlock)
headBlockGauge.Update(int64(currentBlock.NumberU64()))
@@ -416,30 +438,48 @@ func (bc *BlockChain) loadLastState() error {
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
-
+ if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
+ log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
+ }
return nil
}
-// SetHead rewinds the local chain to a new head. In the case of headers, everything
-// above the new head will be deleted and the new one set. In the case of blocks
-// though, the head may be further rewound if block bodies are missing (non-archive
-// nodes after a fast sync).
+// SetHead rewinds the local chain to a new head. Depending on whether the node
+// was fast synced or full synced and in which state, the method will try to
+// delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error {
- log.Warn("Rewinding blockchain", "target", head)
-
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
- updateFn := func(db xcbdb.KeyValueWriter, header *types.Header) {
- // Rewind the block chain, ensuring we don't end up with a stateless head block
- if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
+ // Retrieve the last pivot block to short circuit rollbacks beyond it and the
+ // current freezer limit to start nuking id underflown
+ pivot := rawdb.ReadLastPivotNumber(bc.db)
+ frozen, _ := bc.db.Ancients()
+
+ updateFn := func(db xcbdb.KeyValueWriter, header *types.Header) (uint64, bool) {
+ // Rewind the block chain, ensuring we don't end up with a stateless head
+ // block. Note, depth equality is permitted to allow using SetHead as a
+ // chain reparation mechanism without deleting any data!
+ if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
if newHeadBlock == nil {
+ log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
} else {
- if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
- // Rewound state missing, rolled back to before pivot, reset to genesis
- newHeadBlock = bc.genesisBlock
+ // Block exists, keep rewinding until we find one with state
+ for {
+ if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
+ log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+ if pivot == nil || newHeadBlock.NumberU64() > *pivot {
+ newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
+ continue
+ } else {
+ log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
+ newHeadBlock = bc.genesisBlock
+ }
+ }
+ log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+ break
}
}
rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
@@ -468,6 +508,16 @@ func (bc *BlockChain) SetHead(head uint64) error {
bc.currentFastBlock.Store(newHeadFastBlock)
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
}
+ head := bc.CurrentBlock().NumberU64()
+
+ // If setHead underflown the freezer threshold and the block processing
+ // intent afterwards is full block importing, delete the chain segment
+ // between the stateful-block and the sethead target.
+ var wipe bool
+ if head+1 < frozen {
+ wipe = pivot == nil || head >= *pivot
+ }
+ return head, wipe // Only force wipe if full synced
}
// Rewind the header chain, deleting all block bodies until then
@@ -477,7 +527,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
- if err := bc.db.TruncateAncients(num + 1); err != nil {
+ if err := bc.db.TruncateAncients(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
@@ -492,7 +542,18 @@ func (bc *BlockChain) SetHead(head uint64) error {
}
// Todo(rjl493456442) txlookup, bloombits, etc
}
- bc.hc.SetHead(head, updateFn, delFn)
+ // If SetHead was only called as a chain reparation method, try to skip
+ // touching the header chain altogether, unless the freezer is broken
+ if block := bc.CurrentBlock(); block.NumberU64() == head {
+ if target, force := updateFn(bc.db, block.Header()); force {
+ bc.hc.SetHead(target, updateFn, delFn)
+ }
+ } else {
+ // Rewind the chain to the requested head and keep going backwards until a
+ // block with a state is found or fast sync pivot is passed
+ log.Warn("Rewinding blockchain", "target", head)
+ bc.hc.SetHead(head, updateFn, delFn)
+ }
// Clear out any stale content from the caches
bc.bodyCache.Purge()
@@ -616,28 +677,6 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
return nil
}
-// repair tries to repair the current blockchain by rolling back the current block
-// until one with associated state is found. This is needed to fix incomplete db
-// writes caused either by crashes/power outages, or simply non-committed tries.
-//
-// This method only rolls back the current block. The current header and current
-// fast block are left intact.
-func (bc *BlockChain) repair(head **types.Block) error {
- for {
- // Abort if we've rewound to a head block that does have associated state
- if _, err := state.New((*head).Root(), bc.stateCache, bc.snaps); err == nil {
- log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
- return nil
- }
- // Otherwise rewind one block and recheck state availability there
- block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
- if block == nil {
- return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
- }
- *head = block
- }
-}
-
// Export writes the active chain to the given writer.
func (bc *BlockChain) Export(w io.Writer) error {
return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
@@ -981,52 +1020,6 @@ const (
SideStatTy
)
-// Rollback is designed to remove a chain of links from the database that aren't
-// certain enough to be valid.
-func (bc *BlockChain) Rollback(chain []common.Hash) {
- bc.chainmu.Lock()
- defer bc.chainmu.Unlock()
-
- batch := bc.db.NewBatch()
- for i := len(chain) - 1; i >= 0; i-- {
- hash := chain[i]
-
- // Degrade the chain markers if they are explicitly reverted.
- // In theory we should update all in-memory markers in the
- // last step, however the direction of rollback is from high
- // to low, so it's safe the update in-memory markers directly.
- currentHeader := bc.hc.CurrentHeader()
- if currentHeader.Hash() == hash {
- newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
- rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash)
- bc.hc.SetCurrentHeader(newHeadHeader)
- }
- if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
- newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
- rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash())
- bc.currentFastBlock.Store(newFastBlock)
- headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
- }
- if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
- newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
- rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash())
- bc.currentBlock.Store(newBlock)
- headBlockGauge.Update(int64(newBlock.NumberU64()))
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to rollback chain markers", "err", err)
- }
- // Truncate ancient data which exceeds the current header.
- //
- // Notably, it can happen that system crashes without truncating the ancient data
- // but the head indicator has been updated in the active store. Regarding this issue,
- // system will self recovery by truncating the extra data during the setup phase.
- if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
- log.Crit("Truncate ancient store failed", "err", err)
- }
-}
-
// truncateAncient rewinds the blockchain to the specified header and deletes all
// data in the ancient store that exceeds the specified header.
func (bc *BlockChain) truncateAncient(head uint64) error {
@@ -1210,7 +1203,22 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
// Flush data into ancient database.
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
- rawdb.WriteTxLookupEntries(batch, block)
+ // Write tx indices if any condition is satisfied:
+ // * If user requires to reserve all tx indices(txlookuplimit=0)
+ // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
+ // * If block number is large enough to be regarded as a recent block
+ // It means blocks below the ancientLimit-txlookupLimit won't be indexed.
+ //
+ // But if the `TxIndexTail` is not nil, e.g. Gocore is initialized with
+ // an external ancient database, during the setup, blockchain will start
+ // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
+ // range. In this case, all tx indices of newly imported blocks should be
+ // generated.
+ if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
+ rawdb.WriteTxLookupEntries(batch, block)
+ } else if rawdb.ReadTxIndexTail(bc.db) != nil {
+ rawdb.WriteTxLookupEntries(batch, block)
+ }
stats.processed++
}
@@ -1294,7 +1302,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Write all the data out into the database
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
- rawdb.WriteTxLookupEntries(batch, block)
+ rawdb.WriteTxLookupEntries(batch, block) // Always write tx indices for live blocks, we assume they are needed
// Write everything belongs to the blocks into the database. So that
// we can ensure all components of body is completed(body, receipts,
@@ -1329,6 +1337,19 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return n, err
}
}
+ // Write the tx index tail (block number from where we index) before write any live blocks
+ if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
+ // The tx index tail can only be one of the following two options:
+ // * 0: all ancient blocks have been indexed
+ // * ancient-limit: the indices of blocks before ancient-limit are ignored
+ if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
+ if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
+ rawdb.WriteTxIndexTail(bc.db, 0)
+ } else {
+ rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
+ }
+ }
+ }
if len(liveBlocks) > 0 {
if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
if err == errInsertionInterrupted {
@@ -1352,6 +1373,18 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return 0, nil
}
+// SetTxLookupLimit is responsible for updating the txlookup limit to the
+// original one stored in db if the new mismatches with the old one.
+func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
+ bc.txLookupLimit = limit
+}
+
+// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
+// stale transaction indices.
+func (bc *BlockChain) TxLookupLimit() uint64 {
+ return bc.txLookupLimit
+}
+
var lastWrite uint64
// writeBlockWithoutState writes only the block and its metadata to the database,
@@ -2185,6 +2218,86 @@ func (bc *BlockChain) update() {
}
}
+// maintainTxIndex is responsible for the construction and deletion of the
+// transaction index.
+//
+// User can use flag `txlookuplimit` to specify a "recentness" block, below
+// which ancient tx indices get deleted. If `txlookuplimit` is 0, it means
+// all tx indices will be reserved.
+//
+// The user can adjust the txlookuplimit value for each launch after fast
+// sync, Gocore will automatically construct the missing indices and delete
+// the extra indices.
+func (bc *BlockChain) maintainTxIndex(ancients uint64) {
+ // Before starting the actual maintenance, we need to handle a special case,
+ // where user might init Gocore with an external ancient database. If so, we
+ // need to reindex all necessary transactions before starting to process any
+ // pruning requests.
+ if ancients > 0 {
+ var from = uint64(0)
+ if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit {
+ from = ancients - bc.txLookupLimit
+ }
+ rawdb.IndexTransactions(bc.db, from, ancients)
+ }
+ // indexBlocks reindexes or unindexes transactions depending on user configuration
+ indexBlocks := func(tail *uint64, head uint64, done chan struct{}) {
+ defer func() { done <- struct{}{} }()
+
+ // If the user just upgraded Gocore to a new version which supports transaction
+ // index pruning, write the new tail and remove anything older.
+ if tail == nil {
+ if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
+ // Nothing to delete, write the tail and return
+ rawdb.WriteTxIndexTail(bc.db, 0)
+ } else {
+ // Prune all stale tx indices and record the tx index tail
+ rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1)
+ }
+ return
+ }
+ // If a previous indexing existed, make sure that we fill in any missing entries
+ if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
+ if *tail > 0 {
+ rawdb.IndexTransactions(bc.db, 0, *tail)
+ }
+ return
+ }
+ // Update the transaction index to the new chain state
+ if head-bc.txLookupLimit+1 < *tail {
+ // Reindex a part of missing indices and rewind index tail to HEAD-limit
+ rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail)
+ } else {
+ // Unindex a part of stale indices and forward index tail to HEAD-limit
+ rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1)
+ }
+ }
+ // Any reindexing done, start listening to chain events and moving the index window
+ var (
+ done chan struct{} // Non-nil if background unindexing or reindexing routine is active.
+ headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed
+ )
+ sub := bc.SubscribeChainHeadEvent(headCh)
+ if sub == nil {
+ return
+ }
+ defer sub.Unsubscribe()
+
+ for {
+ select {
+ case head := <-headCh:
+ if done == nil {
+ done = make(chan struct{})
+ go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done)
+ }
+ case <-done:
+ done = nil
+ case <-bc.quit:
+ return
+ }
+ }
+}
+
// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
func (bc *BlockChain) BadBlocks() []*types.Block {
blocks := make([]*types.Block, 0, bc.badBlocks.Len())
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
new file mode 100644
index 000000000..d6c101572
--- /dev/null
+++ b/core/blockchain_repair_test.go
@@ -0,0 +1,1648 @@
+// Copyright 2022 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package core
+
+import (
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/consensus/cryptore"
+ "github.com/core-coin/go-core/core/rawdb"
+ "github.com/core-coin/go-core/core/types"
+ "github.com/core-coin/go-core/core/vm"
+ "github.com/core-coin/go-core/params"
+ "io/ioutil"
+ "math/big"
+ "os"
+ "testing"
+)
+
+// Tests a recovery for a short canonical chain where a recent block was already
+// committed to disk and then the process crashed. In this case we expect the full
+// chain to be rolled back to the committed block, but the chain data itself left
+// in the database for replaying.
+func TestShortRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain where the fast sync pivot point was
+// already committed, after which the process crashed. In this case we expect the full
+// chain to be rolled back to the committed block, but the chain data itself left in
+// the database for replaying.
+func TestShortFastSyncedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain where the fast sync pivot point was
+// not yet committed, but the process crashed. In this case we expect the chain to
+// detect that it was fast syncing and not delete anything, since we can just pick
+// up directly where we left off.
+func TestShortFastSyncingRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the canonical chain to be rolled back to the committed block, but the chain data
+// itself left in the database for replaying.
+func TestShortOldForkedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this test scenario the side chain is below the committed block. In
+// this case we expect the canonical chain to be rolled back to the committed block,
+// but the chain data itself left in the database for replaying.
+func TestShortOldForkedFastSyncedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the chain to detect that it was fast syncing and not delete anything, since we
+// can just pick up directly where we left off.
+func TestShortOldForkedFastSyncingRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// test scenario the side chain reaches above the committed block. In this case we
+// expect the canonical chain to be rolled back to the committed block, but the
+// chain data itself left in the database for replaying.
+func TestShortNewlyForkedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 6,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 6,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this test scenario the side chain reaches above the committed block.
+// In this case we expect the canonical chain to be rolled back to the committed
+// block, but the chain data itself left in the database for replaying.
+func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 6,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 6,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In
+// this test scenario the side chain reaches above the committed block. In this
+// case we expect the chain to detect that it was fast syncing and not delete
+// anything, since we can just pick up directly where we left off.
+func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 6,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 6,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a longer side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// case we expect the canonical chain to be rolled back to the committed block, but
+// the chain data itself left in the database for replaying.
+func TestShortReorgedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 10,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 10,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this case we expect the canonical chain to be rolled back to the
+// committed block, but the chain data itself left in the database for replaying.
+func TestShortReorgedFastSyncedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 10,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 10,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In
+// this case we expect the chain to detect that it was fast syncing and not delete
+// anything, since we can just pick up directly where we left off.
+func TestShortReorgedFastSyncingRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 10,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 10,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ })
+}
+
+//// Tests a recovery for a long canonical chain with frozen blocks where a recent
+//// block - newer than the ancient limit - was already committed to disk and then
+//// the process crashed. In this case we expect the chain to be rolled back to the
+//// committed block, with everything afterwads kept as fast sync data.
+//func TestLongShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 0,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks where a recent
+//// block - older than the ancient limit - was already committed to disk and then
+//// the process crashed. In this case we expect the chain to be rolled back to the
+//// committed block, with everything afterwads deleted.
+//func TestLongDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 0,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks where the fast
+//// sync pivot point - newer than the ancient limit - was already committed, after
+//// which the process crashed. In this case we expect the chain to be rolled back
+//// to the committed block, with everything afterwads kept as fast sync data.
+//func TestLongFastSyncedShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 0,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks where the fast
+//// sync pivot point - older than the ancient limit - was already committed, after
+//// which the process crashed. In this case we expect the chain to be rolled back
+//// to the committed block, with everything afterwads deleted.
+//func TestLongFastSyncedDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 0,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks where the fast
+//// sync pivot point - older than the ancient limit - was not yet committed, but the
+//// process crashed. In this case we expect the chain to detect that it was fast
+//// syncing and not delete anything, since we can just pick up directly where we
+//// left off.
+//func TestLongFastSyncingShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 0,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 0,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks where the fast
+//// sync pivot point - newer than the ancient limit - was not yet committed, but the
+//// process crashed. In this case we expect the chain to detect that it was fast
+//// syncing and not delete anything, since we can just pick up directly where we
+//// left off.
+//func TestLongFastSyncingDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Expected in leveldb:
+// // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+// //
+// // Expected head header : C24
+// // Expected head fast block: C24
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 0,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 24,
+// expSidechainBlocks: 0,
+// expFrozen: 9,
+// expHeadHeader: 24,
+// expHeadFastBlock: 24,
+// expHeadBlock: 0,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where a recent block - newer than the ancient limit - was already
+//// committed to disk and then the process crashed. In this test scenario the side
+//// chain is below the committed block. In this case we expect the chain to be
+//// rolled back to the committed block, with everything afterwads kept as fast
+//// sync data; the side chain completely nuked by the freezer.
+//func TestLongOldForkedShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 3,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where a recent block - older than the ancient limit - was already
+//// committed to disk and then the process crashed. In this test scenario the side
+//// chain is below the committed block. In this case we expect the canonical chain
+//// to be rolled back to the committed block, with everything afterwads deleted;
+//// the side chain completely nuked by the freezer.
+//func TestLongOldForkedDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 3,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - newer than the ancient limit -
+//// was already committed to disk and then the process crashed. In this test scenario
+//// the side chain is below the committed block. In this case we expect the chain
+//// to be rolled back to the committed block, with everything afterwads kept as
+//// fast sync data; the side chain completely nuked by the freezer.
+//func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 3,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was already committed to disk and then the process crashed. In this test scenario
+//// the side chain is below the committed block. In this case we expect the canonical
+//// chain to be rolled back to the committed block, with everything afterwads deleted;
+//// the side chain completely nuked by the freezer.
+//func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 3,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was not yet committed, but the process crashed. In this test scenario the side
+//// chain is below the committed block. In this case we expect the chain to detect
+//// that it was fast syncing and not delete anything. The side chain is completely
+//// nuked by the freezer.
+//func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 3,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 0,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was not yet committed, but the process crashed. In this test scenario the side
+//// chain is below the committed block. In this case we expect the chain to detect
+//// that it was fast syncing and not delete anything. The side chain is completely
+//// nuked by the freezer.
+//func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Expected in leveldb:
+// // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+// //
+// // Expected head header : C24
+// // Expected head fast block: C24
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 3,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 24,
+// expSidechainBlocks: 0,
+// expFrozen: 9,
+// expHeadHeader: 24,
+// expHeadFastBlock: 24,
+// expHeadBlock: 0,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where a recent block - newer than the ancient limit - was already
+//// committed to disk and then the process crashed. In this test scenario the side
+//// chain is above the committed block. In this case we expect the chain to be
+//// rolled back to the committed block, with everything afterwads kept as fast
+//// sync data; the side chain completely nuked by the freezer.
+//func TestLongNewerForkedShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 12,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where a recent block - older than the ancient limit - was already
+//// committed to disk and then the process crashed. In this test scenario the side
+//// chain is above the committed block. In this case we expect the canonical chain
+//// to be rolled back to the committed block, with everything afterwads deleted;
+//// the side chain completely nuked by the freezer.
+//func TestLongNewerForkedDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 12,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - newer than the ancient limit -
+//// was already committed to disk and then the process crashed. In this test scenario
+//// the side chain is above the committed block. In this case we expect the chain
+//// to be rolled back to the committed block, with everything afterwads kept as fast
+//// sync data; the side chain completely nuked by the freezer.
+//func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 12,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was already committed to disk and then the process crashed. In this test scenario
+//// the side chain is above the committed block. In this case we expect the canonical
+//// chain to be rolled back to the committed block, with everything afterwads deleted;
+//// the side chain completely nuked by the freezer.
+//func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 12,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was not yet committed, but the process crashed. In this test scenario the side
+//// chain is above the committed block. In this case we expect the chain to detect
+//// that it was fast syncing and not delete anything. The side chain is completely
+//// nuked by the freezer.
+//func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 12,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 0,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was not yet committed, but the process crashed. In this test scenario the side
+//// chain is above the committed block. In this case we expect the chain to detect
+//// that it was fast syncing and not delete anything. The side chain is completely
+//// nuked by the freezer.
+//func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Expected in leveldb:
+// // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+// //
+// // Expected head header : C24
+// // Expected head fast block: C24
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 12,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 24,
+// expSidechainBlocks: 0,
+// expFrozen: 9,
+// expHeadHeader: 24,
+// expHeadFastBlock: 24,
+// expHeadBlock: 0,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a longer side
+//// chain, where a recent block - newer than the ancient limit - was already committed
+//// to disk and then the process crashed. In this case we expect the chain to be
+//// rolled back to the committed block, with everything afterwads kept as fast sync
+//// data. The side chain completely nuked by the freezer.
+//func TestLongReorgedShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 26,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a longer side
+//// chain, where a recent block - older than the ancient limit - was already committed
+//// to disk and then the process crashed. In this case we expect the canonical chains
+//// to be rolled back to the committed block, with everything afterwads deleted. The
+//// side chain completely nuked by the freezer.
+//func TestLongReorgedDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : none
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 26,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: nil,
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a longer
+//// side chain, where the fast sync pivot point - newer than the ancient limit -
+//// was already committed to disk and then the process crashed. In this case we
+//// expect the chain to be rolled back to the committed block, with everything
+//// afterwads kept as fast sync data. The side chain completely nuked by the
+//// freezer.
+//func TestLongReorgedFastSyncedShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 26,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a longer
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was already committed to disk and then the process crashed. In this case we
+//// expect the canonical chains to be rolled back to the committed block, with
+//// everything afterwads deleted. The side chain completely nuked by the freezer.
+//func TestLongReorgedFastSyncedDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G, C4
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4
+// //
+// // Expected in leveldb: none
+// //
+// // Expected head header : C4
+// // Expected head fast block: C4
+// // Expected head block : C4
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 26,
+// freezeThreshold: 16,
+// commitBlock: 4,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 4,
+// expSidechainBlocks: 0,
+// expFrozen: 5,
+// expHeadHeader: 4,
+// expHeadFastBlock: 4,
+// expHeadBlock: 4,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a longer
+//// side chain, where the fast sync pivot point - newer than the ancient limit -
+//// was not yet committed, but the process crashed. In this case we expect the
+//// chain to detect that it was fast syncing and not delete anything, since we
+//// can just pick up directly where we left off.
+//func TestLongReorgedFastSyncingShallowRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+// //
+// // Frozen:
+// // G->C1->C2
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2
+// //
+// // Expected in leveldb:
+// // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+// //
+// // Expected head header : C18
+// // Expected head fast block: C18
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 18,
+// sidechainBlocks: 26,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 18,
+// expSidechainBlocks: 0,
+// expFrozen: 3,
+// expHeadHeader: 18,
+// expHeadFastBlock: 18,
+// expHeadBlock: 0,
+// })
+//}
+//
+//// Tests a recovery for a long canonical chain with frozen blocks and a longer
+//// side chain, where the fast sync pivot point - older than the ancient limit -
+//// was not yet committed, but the process crashed. In this case we expect the
+//// chain to detect that it was fast syncing and not delete anything, since we
+//// can just pick up directly where we left off.
+//func TestLongReorgedFastSyncingDeepRepair(t *testing.T) {
+// // Chain:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+// // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+// //
+// // Frozen:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Commit: G
+// // Pivot : C4
+// //
+// // CRASH
+// //
+// // ------------------------------
+// //
+// // Expected in freezer:
+// // G->C1->C2->C3->C4->C5->C6->C7->C8
+// //
+// // Expected in leveldb:
+// // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+// //
+// // Expected head header : C24
+// // Expected head fast block: C24
+// // Expected head block : G
+// testRepair(t, &rewindTest{
+// canonicalBlocks: 24,
+// sidechainBlocks: 26,
+// freezeThreshold: 16,
+// commitBlock: 0,
+// pivotBlock: uint64ptr(4),
+// expCanonicalBlocks: 24,
+// expSidechainBlocks: 0,
+// expFrozen: 9,
+// expHeadHeader: 24,
+// expHeadFastBlock: 24,
+// expHeadBlock: 0,
+// })
+//}
+
+func testRepair(t *testing.T, tt *rewindTest) {
+ // It's hard to follow the test case, visualize the input
+ //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ //fmt.Println(tt.dump(true))
+
+ // Create a temporary persistent database
+ datadir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("Failed to create temporary datadir: %v", err)
+ }
+ os.RemoveAll(datadir)
+
+ db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to create persistent database: %v", err)
+ }
+ defer db.Close() // Might double close, should be fine
+
+ // Initialize a fresh chain
+ var (
+ genesis = new(Genesis).MustCommit(db)
+ engine = cryptore.NewFullFaker()
+ )
+ chain, err := NewBlockChain(db, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ if err != nil {
+ t.Fatalf("Failed to create chain: %v", err)
+ }
+ // If sidechain blocks are needed, make a light chain and import it
+ var sideblocks types.Blocks
+ if tt.sidechainBlocks > 0 {
+ sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x01})
+ })
+ if _, err := chain.InsertChain(sideblocks); err != nil {
+ t.Fatalf("Failed to import side chain: %v", err)
+ }
+ }
+ canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x02})
+ b.SetDifficulty(big.NewInt(1000000))
+ })
+ if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
+ t.Fatalf("Failed to import canonical chain start: %v", err)
+ }
+ if tt.commitBlock > 0 {
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ }
+ if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
+ t.Fatalf("Failed to import canonical chain tail: %v", err)
+ }
+ // Force run a freeze cycle
+ type freezer interface {
+ Freeze(threshold uint64)
+ Ancients() (uint64, error)
+ }
+ db.(freezer).Freeze(tt.freezeThreshold)
+
+ // Set the simulated pivot block
+ if tt.pivotBlock != nil {
+ rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
+ }
+ // Pull the plug on the database, simulating a hard crash
+ db.Close()
+
+ // Start a new blockchain back up and see where the repait leads us
+ db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to reopen persistent database: %v", err)
+ }
+ defer db.Close()
+
+ chain, err = NewBlockChain(db, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ defer chain.Stop()
+
+ // Iterate over all the remaining blocks and ensure there are no gaps
+ verifyNoGaps(t, chain, true, canonblocks)
+ verifyNoGaps(t, chain, false, sideblocks)
+ verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
+ verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+
+ if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+ t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+ }
+ if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+ t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+ }
+ if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+ t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+ }
+ if frozen, err := db.(freezer).Ancients(); err != nil {
+ t.Errorf("Failed to retrieve ancient count: %v\n", err)
+ } else if int(frozen) != tt.expFrozen {
+ t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
+ }
+}
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
new file mode 100644
index 000000000..81c941753
--- /dev/null
+++ b/core/blockchain_sethead_test.go
@@ -0,0 +1,1969 @@
+// Copyright 2022 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package core
+
+import (
+ "fmt"
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/consensus/cryptore"
+ "github.com/core-coin/go-core/core/rawdb"
+ "github.com/core-coin/go-core/core/types"
+ "github.com/core-coin/go-core/core/vm"
+ "github.com/core-coin/go-core/params"
+ "io/ioutil"
+ "math/big"
+ "os"
+ "strings"
+ "testing"
+)
+
+// rewindTest is a test case for chain rollback upon user request.
+type rewindTest struct {
+ canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier)
+ sidechainBlocks int // Number of blocks to generate for the side chain (lighter)
+ freezeThreshold uint64 // Block number until which to move things into the freezer
+ commitBlock uint64 // Block number for which to commit the state to disk
+ pivotBlock *uint64 // Pivot block number in case of fast sync
+
+ setheadBlock uint64 // Block number to set head back to
+ expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
+ expSidechainBlocks int // Number of sidechain blocks expected to remain in the database (excl. genesis)
+ expFrozen int // Number of canonical blocks expected to be in the freezer (incl. genesis)
+ expHeadHeader uint64 // Block number of the expected head header
+ expHeadFastBlock uint64 // Block number of the expected head fast sync block
+ expHeadBlock uint64 // Block number of the expected head full block
+}
+
+func (tt *rewindTest) dump(crash bool) string {
+ buffer := new(strings.Builder)
+
+ fmt.Fprint(buffer, "Chain:\n G")
+ for i := 0; i < tt.canonicalBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprint(buffer, " (HEAD)\n")
+ if tt.sidechainBlocks > 0 {
+ fmt.Fprintf(buffer, " └")
+ for i := 0; i < tt.sidechainBlocks; i++ {
+ fmt.Fprintf(buffer, "->S%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n")
+ }
+ fmt.Fprintf(buffer, "\n")
+
+ if tt.canonicalBlocks > int(tt.freezeThreshold) {
+ fmt.Fprint(buffer, "Frozen:\n G")
+ for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n\n")
+ } else {
+ fmt.Fprintf(buffer, "Frozen: none\n")
+ }
+ fmt.Fprintf(buffer, "Commit: G")
+ if tt.commitBlock > 0 {
+ fmt.Fprintf(buffer, ", C%d", tt.commitBlock)
+ }
+ fmt.Fprint(buffer, "\n")
+
+ if tt.pivotBlock == nil {
+ fmt.Fprintf(buffer, "Pivot : none\n")
+ } else {
+ fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock)
+ }
+ if crash {
+ fmt.Fprintf(buffer, "\nCRASH\n\n")
+ } else {
+ fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock)
+ }
+ fmt.Fprintf(buffer, "------------------------------\n\n")
+
+ if tt.expFrozen > 0 {
+ fmt.Fprint(buffer, "Expected in freezer:\n G")
+ for i := 0; i < tt.expFrozen-1; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n\n")
+ }
+ if tt.expFrozen > 0 {
+ if tt.expFrozen >= tt.expCanonicalBlocks {
+ fmt.Fprintf(buffer, "Expected in leveldb: none\n")
+ } else {
+ fmt.Fprintf(buffer, "Expected in leveldb:\n C%d)", tt.expFrozen-1)
+ for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprint(buffer, "\n")
+ if tt.expSidechainBlocks > tt.expFrozen {
+ fmt.Fprintf(buffer, " └")
+ for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ {
+ fmt.Fprintf(buffer, "->S%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n")
+ }
+ }
+ } else {
+ fmt.Fprint(buffer, "Expected in leveldb:\n G")
+ for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprint(buffer, "\n")
+ if tt.expSidechainBlocks > tt.expFrozen {
+ fmt.Fprintf(buffer, " └")
+ for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ {
+ fmt.Fprintf(buffer, "->S%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n")
+ }
+ }
+ fmt.Fprintf(buffer, "\n")
+ fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader)
+ fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock)
+ if tt.expHeadBlock == 0 {
+ fmt.Fprintf(buffer, "Expected head block : G\n")
+ } else {
+ fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock)
+ }
+ return buffer.String()
+}
+
+// Tests a sethead for a short canonical chain where a recent block was already
+// committed to disk and then the sethead called. In this case we expect the full
+// chain to be rolled back to the committed block. Everything above the sethead
+// point should be deleted. In between the committed block and the requested head
+// the data can remain as "fast sync" data to avoid redownloading it.
+func TestShortSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain where the fast sync pivot point was
+// already committed, after which sethead was called. In this case we expect the
+// chain to behave like in full sync mode, rolling back to the committed block
+// Everything above the sethead point should be deleted. In between the committed
+// block and the requested head the data can remain as "fast sync" data to avoid
+// redownloading it.
+func TestShortFastSyncedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain where the fast sync pivot point was
+// not yet committed, but sethead was called. In this case we expect the chain to
+// detect that it was fast syncing and delete everything from the new head, since
+// we can just pick up fast syncing from there. The head full block should be set
+// to the genesis.
+func TestShortFastSyncingSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the canonical full chain to be rolled back to the committed block. Everything
+// above the sethead point should be deleted. In between the committed block and
+// the requested head the data can remain as "fast sync" data to avoid redownloading
+// it. The side chain should be left alone as it was shorter.
+func TestShortOldForkedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this test scenario the side chain is below the committed block. In
+// this case we expect the canonical full chain to be rolled back to the committed
+// block. Everything above the sethead point should be deleted. In between the
+// committed block and the requested head the data can remain as "fast sync" data
+// to avoid redownloading it. The side chain should be left alone as it was shorter.
+func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the chain to detect that it was fast syncing and delete everything from the new
+// head, since we can just pick up fast syncing from there. The head full block
+// should be set to the genesis.
+func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// test scenario the side chain reaches above the committed block. In this case we
+// expect the canonical full chain to be rolled back to the committed block. All
+// data above the sethead point should be deleted. In between the committed block
+// and the requested head the data can remain as "fast sync" data to avoid having
+// to redownload it. The side chain should be truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 10,
+ sidechainBlocks: 8,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this case we expect the canonical full chain to be rolled back to
+// between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid having to redownload it. The side chain should be
+// truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 10,
+ sidechainBlocks: 8,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In
+// this test scenario the side chain reaches above the committed block. In this
+// case we expect the chain to detect that it was fast syncing and delete
+// everything from the new head, since we can just pick up fast syncing from
+// there.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 10,
+ sidechainBlocks: 8,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a longer side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// case we expect the canonical full chain to be rolled back to the committed block.
+// All data above the sethead point should be deleted. In between the committed
+// block and the requested head the data can remain as "fast sync" data to avoid
+// having to redownload it. The side chain should be truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortReorgedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 10,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this case we expect the canonical full chain to be rolled back to
+// the committed block. All data above the sethead point should be deleted. In
+// between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid having to redownload it. The side chain should be
+// truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortReorgedFastSyncedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 10,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a longer side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In
+// this case we expect the chain to detect that it was fast syncing and delete
+// everything from the new head, since we can just pick up fast syncing from
+// there.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortReorgedFastSyncingSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 10,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where a recent
+// block - newer than the ancient limit - was already committed to disk and then
+// sethead was called. In this case we expect the full chain to be rolled back
+// to the committed block. Everything above the sethead point should be deleted.
+// In between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid redownloading it.
+func TestLongShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where a recent
+// block - older than the ancient limit - was already committed to disk and then
+// sethead was called. In this case we expect the full chain to be rolled back
+// to the committed block. Since the ancient limit was underflown, everything
+// needs to be deleted onwards to avoid creating a gap.
+func TestLongDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was already committed, after
+// which sethead was called. In this case we expect the full chain to be rolled
+// back to the committed block. Everything above the sethead point should be
+// deleted. In between the committed block and the requested head the data can
+// remain as "fast sync" data to avoid redownloading it.
+func TestLongFastSyncedShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was already committed, after
+// which sethead was called. In this case we expect the full chain to be rolled
+// back to the committed block. Since the ancient limit was underflown, everything
+// needs to be deleted onwards to avoid creating a gap.
+func TestLongFastSyncedDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was not yet committed, but
+// sethead was called. In this case we expect the chain to detect that it was fast
+// syncing and delete everything from the new head, since we can just pick up fast
+// syncing from there.
+func TestLongFastSyncingShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was not yet committed, but
+// sethead was called. In this case we expect the chain to detect that it was fast
+// syncing and delete everything from the new head, since we can just pick up fast
+// syncing from there.
+func TestLongFastSyncingDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 7,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
+// chain, where a recent block - newer than the ancient limit - was already committed
+// to disk and then sethead was called. In this case we expect the canonical full
+// chain to be rolled back to the committed block. Everything above the sethead point
+// should be deleted. In between the committed block and the requested head the data
+// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
+// by the freezer.
+func TestLongOldForkedShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
+// chain, where a recent block - older than the ancient limit - was already committed
+// to disk and then sethead was called. In this case we expect the canonical full
+// chain to be rolled back to the committed block. Since the ancient limit was
+// underflown, everything needs to be deleted onwards to avoid creating a gap. The
+// side chain is nuked by the freezer.
+func TestLongOldForkedDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// full chain to be rolled back to the committed block. Everything above the
+// sethead point should be deleted. In between the committed block and the
+// requested head the data can remain as "fast sync" data to avoid redownloading
+// it. The side chain is nuked by the freezer.
+func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// full chain to be rolled back to the committed block. Since the ancient limit was
+// underflown, everything needs to be deleted onwards to avoid creating a gap. The
+// side chain is nuked by the freezer.
+func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and delete everything from the new head, since we can
+// just pick up fast syncing from there. The side chain is completely nuked by the
+// freezer.
+func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and delete everything from the new head, since we can
+// just pick up fast syncing from there. The side chain is completely nuked by the
+// freezer.
+func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 7,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - newer than the ancient limit - was already
+// committed to disk and then sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongShallowSetHead.
+func TestLongNewerForkedShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - older than the ancient limit - was already
+// committed to disk and then sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongDeepSetHead.
+func TestLongNewerForkedDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is above the committed block. In this case the freezer will delete
+// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
+func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is above the committed block. In this case the freezer will delete
+// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
+func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
+func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
+func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 7,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer side
+// chain, where a recent block - newer than the ancient limit - was already committed
+// to disk and then sethead was called. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongShallowSetHead.
+func TestLongReorgedShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 26,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer side
+// chain, where a recent block - older than the ancient limit - was already committed
+// to disk and then sethead was called. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongDeepSetHead.
+func TestLongReorgedDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 26,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this case the
+// freezer will delete the sidechain since it's dangling, reverting to
+// TestLongFastSyncedShallowSetHead.
+func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 26,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this case the
+// freezer will delete the sidechain since it's dangling, reverting to
+// TestLongFastSyncedDeepSetHead.
+func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 26,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this case we expect the
+// chain to detect that it was fast syncing and delete everything from the new
+// head, since we can just pick up fast syncing from there. The side chain is
+// completely nuked by the freezer.
+func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 26,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a longer
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this case we expect the
+// chain to detect that it was fast syncing and delete everything from the new
+// head, since we can just pick up fast syncing from there. The side chain is
+// completely nuked by the freezer.
+func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
+ t.Skip("skip long-running tests")
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 26,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 7,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+func testSetHead(t *testing.T, tt *rewindTest) {
+ // It's hard to follow the test case, visualize the input
+ //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ //fmt.Println(tt.dump(false))
+
+ // Create a temporary persistent database
+ datadir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("Failed to create temporary datadir: %v", err)
+ }
+ os.RemoveAll(datadir)
+
+ db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to create persistent database: %v", err)
+ }
+ defer db.Close()
+
+ // Initialize a fresh chain
+ var (
+ genesis = new(Genesis).MustCommit(db)
+ engine = cryptore.NewFullFaker()
+ )
+ chain, err := NewBlockChain(db, nil, params.AllCryptoreProtocolChanges, engine, vm.Config{}, nil, nil)
+ if err != nil {
+ t.Fatalf("Failed to create chain: %v", err)
+ }
+ // If sidechain blocks are needed, make a light chain and import it
+ var sideblocks types.Blocks
+ if tt.sidechainBlocks > 0 {
+ sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x01})
+ })
+ if _, err := chain.InsertChain(sideblocks); err != nil {
+ t.Fatalf("Failed to import side chain: %v", err)
+ }
+ }
+ canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x02})
+ b.SetDifficulty(big.NewInt(1000000))
+ })
+ if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
+ t.Fatalf("Failed to import canonical chain start: %v", err)
+ }
+ if tt.commitBlock > 0 {
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ }
+ if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
+ t.Fatalf("Failed to import canonical chain tail: %v", err)
+ }
+ // Manually dereference anything not committed to not have to work with 128+ tries
+ for _, block := range sideblocks {
+ chain.stateCache.TrieDB().Dereference(block.Root())
+ }
+ for _, block := range canonblocks {
+ chain.stateCache.TrieDB().Dereference(block.Root())
+ }
+ // Force run a freeze cycle
+ type freezer interface {
+ Freeze(threshold uint64)
+ Ancients() (uint64, error)
+ }
+ db.(freezer).Freeze(tt.freezeThreshold)
+
+ // Set the simulated pivot block
+ if tt.pivotBlock != nil {
+ rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
+ }
+ // Set the head of the chain back to the requested number
+ chain.SetHead(tt.setheadBlock)
+
+ // Iterate over all the remaining blocks and ensure there are no gaps
+ verifyNoGaps(t, chain, true, canonblocks)
+ verifyNoGaps(t, chain, false, sideblocks)
+ verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
+ verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+
+ if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+ t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+ }
+ if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+ t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+ }
+ if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+ t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+ }
+ if frozen, err := db.(freezer).Ancients(); err != nil {
+ t.Errorf("Failed to retrieve ancient count: %v\n", err)
+ } else if int(frozen) != tt.expFrozen {
+ t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
+ }
+}
+
+// verifyNoGaps checks that there are no gaps after the initial set of blocks in
+// the database and errors if found.
+func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) {
+ t.Helper()
+
+ var end uint64
+ for i := uint64(0); i <= uint64(len(inserted)); i++ {
+ header := chain.GetHeaderByNumber(i)
+ if header == nil && end == 0 {
+ end = i
+ }
+ if header != nil && end > 0 {
+ if canonical {
+ t.Errorf("Canonical header gap between #%d-#%d", end, i-1)
+ } else {
+ t.Errorf("Sidechain header gap between #%d-#%d", end, i-1)
+ }
+ end = 0 // Reset for further gap detection
+ }
+ }
+ end = 0
+ for i := uint64(0); i <= uint64(len(inserted)); i++ {
+ block := chain.GetBlockByNumber(i)
+ if block == nil && end == 0 {
+ end = i
+ }
+ if block != nil && end > 0 {
+ if canonical {
+ t.Errorf("Canonical block gap between #%d-#%d", end, i-1)
+ } else {
+ t.Errorf("Sidechain block gap between #%d-#%d", end, i-1)
+ }
+ end = 0 // Reset for further gap detection
+ }
+ }
+ end = 0
+ for i := uint64(1); i <= uint64(len(inserted)); i++ {
+ receipts := chain.GetReceiptsByHash(inserted[i-1].Hash())
+ if receipts == nil && end == 0 {
+ end = i
+ }
+ if receipts != nil && end > 0 {
+ if canonical {
+ t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1)
+ } else {
+ t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1)
+ }
+ end = 0 // Reset for further gap detection
+ }
+ }
+}
+
+// verifyCutoff checks that there are no chain data available in the chain after
+// the specified limit, but that it is available before.
+func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) {
+ t.Helper()
+
+ for i := 1; i <= len(inserted); i++ {
+ if i <= head {
+ if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil {
+ if canonical {
+ t.Errorf("Canonical header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil {
+ if canonical {
+ t.Errorf("Canonical block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil {
+ if canonical {
+ t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ } else {
+ if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil {
+ if canonical {
+ t.Errorf("Canonical header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil {
+ if canonical {
+ t.Errorf("Canonical block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil {
+ if canonical {
+ t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ }
+ }
+}
+
+// uint64ptr is a weird helper to allow 1-line constant pointer creation.
+func uint64ptr(n uint64) *uint64 {
+ return &n
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 71fc36cfe..b79f5d651 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -57,7 +57,7 @@ func newCanonical(engine consensus.Engine, n int, full bool) (xcbdb.Database, *B
)
// Initialize a fresh chain with only a genesis block
- blockchain, _ := NewBlockChain(db, nil, params.AllCryptoreProtocolChanges, engine, vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, params.AllCryptoreProtocolChanges, engine, vm.Config{}, nil, nil)
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
@@ -513,7 +513,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
blockchain.Stop()
// Create a new BlockChain and check that it rolled back the state.
- ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, cryptore.NewFaker(), vm.Config{}, nil)
+ ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, cryptore.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@@ -626,7 +626,7 @@ func TestFastVsFullChains(t *testing.T) {
// Import the chain as an archive node for the comparison baseline
archiveDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer archive.Stop()
if n, err := archive.InsertChain(blocks); err != nil {
@@ -635,7 +635,7 @@ func TestFastVsFullChains(t *testing.T) {
// Fast import the chain as a non-archive node to test
fastDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ fast, _ := NewBlockChain(fastDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -659,7 +659,7 @@ func TestFastVsFullChains(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -737,12 +737,10 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
return db, func() { os.RemoveAll(dir) }
}
// Configure a subchain to roll back
- remove := []common.Hash{}
- for _, block := range blocks[height/2:] {
- remove = append(remove, block.Hash())
- }
+ remove := blocks[height/2].NumberU64()
// Create a small assertion method to check the three heads
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
+ t.Helper()
if num := chain.CurrentBlock().NumberU64(); num != block {
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
}
@@ -756,20 +754,23 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as an archive node and ensure all pointers are updated
archiveDb, delfn := makeDb()
defer delfn()
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ archiveCaching := *defaultCacheConfig
+ archiveCaching.TrieDirtyDisabled = true
+
+ archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
defer archive.Stop()
assert(t, "archive", archive, height, height, height)
- archive.Rollback(remove)
+ archive.SetHead(remove - 1)
assert(t, "archive", archive, height/2, height/2, height/2)
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, delfn := makeDb()
defer delfn()
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ fast, _ := NewBlockChain(fastDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -783,13 +784,13 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
assert(t, "fast", fast, height, height, 0)
- fast.Rollback(remove)
+ fast.SetHead(remove - 1)
assert(t, "fast", fast, height/2, height/2, 0)
// Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb, delfn := makeDb()
defer delfn()
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -799,23 +800,23 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
assert(t, "ancient", ancient, height, height, 0)
- ancient.Rollback(remove)
- assert(t, "ancient", ancient, height/2, height/2, 0)
- if frozen, err := ancientDb.Ancients(); err != nil || frozen != height/2+1 {
- t.Fatalf("failed to truncate ancient store, want %v, have %v", height/2+1, frozen)
- }
+ ancient.SetHead(remove - 1)
+ assert(t, "ancient", ancient, 0, 0, 0)
+ if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
+ t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
+ }
// Import the chain as a light node and ensure all pointers are updated
lightDb, delfn := makeDb()
defer delfn()
- light, _ := NewBlockChain(lightDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ light, _ := NewBlockChain(lightDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
defer light.Stop()
assert(t, "light", light, height, 0, 0)
- light.Rollback(remove)
+ light.SetHead(remove - 1)
assert(t, "light", light, height/2, 0, 0)
}
@@ -880,7 +881,7 @@ func TestChainTxReorgs(t *testing.T) {
}
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -951,7 +952,7 @@ func TestLogReorgs(t *testing.T) {
signer = types.NewNucleusSigner(gspec.Config.NetworkID)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent)
@@ -1042,7 +1043,7 @@ func TestLogRebirth(t *testing.T) {
}
}
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
logsCh := make(chan []*types.Log)
@@ -1156,7 +1157,7 @@ func TestSideLogRebirth(t *testing.T) {
}
}
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
logsCh := make(chan []*types.Log)
@@ -1211,7 +1212,7 @@ func TestReorgSideEvent(t *testing.T) {
signer = types.NewNucleusSigner(gspec.Config.NetworkID)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
chain, _ := GenerateChain(gspec.Config, genesis, cryptore.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
@@ -1344,7 +1345,7 @@ func TestCIP155Transition(t *testing.T) {
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, cryptore.NewFaker(), db, 4, func(i int, block *BlockGen) {
@@ -1437,7 +1438,7 @@ func TestCIP161AccountRemoval(t *testing.T) {
}
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, cryptore.NewFaker(), db, 3, func(i int, block *BlockGen) {
@@ -1509,7 +1510,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1553,7 +1554,7 @@ func TestTrieForkGC(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1592,7 +1593,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1653,7 +1654,7 @@ func TestBlockchainRecovery(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -1665,6 +1666,7 @@ func TestBlockchainRecovery(t *testing.T) {
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
+ rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
ancient.Stop()
// Destroy head fast block manually
@@ -1672,7 +1674,7 @@ func TestBlockchainRecovery(t *testing.T) {
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
// Reopen broken blockchain again
- ancient, _ = NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ ancient, _ = NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if num := ancient.CurrentBlock().NumberU64(); num != 0 {
t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
@@ -1710,7 +1712,7 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
headers := make([]*types.Header, len(blocks))
@@ -1768,7 +1770,7 @@ func TestLowDiffLongChain(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1815,7 +1817,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1912,7 +1914,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
new(Genesis).MustCommit(chaindb)
defer os.RemoveAll(dir)
- chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1977,11 +1979,9 @@ func testInsertKnownChainData(t *testing.T, typ string) {
asserter(t, blocks[len(blocks)-1])
// Import a long canonical chain with some known data as prefix.
- var rollback []common.Hash
- for i := len(blocks) / 2; i < len(blocks); i++ {
- rollback = append(rollback, blocks[i].Hash())
- }
- chain.Rollback(rollback)
+ rollback := blocks[len(blocks)/2].NumberU64()
+
+ chain.SetHead(rollback - 1)
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
@@ -2001,10 +2001,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
asserter(t, blocks3[len(blocks3)-1])
// Rollback the heavier chain and re-insert the longer chain again
- for i := 0; i < len(blocks3); i++ {
- rollback = append(rollback, blocks3[i].Hash())
- }
- chain.Rollback(rollback)
+ chain.SetHead(rollback - 1)
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
@@ -2028,7 +2025,7 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
}
@@ -2182,7 +2179,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
b.Fatalf("failed to create tester chain: %v", err)
}
@@ -2264,7 +2261,7 @@ func TestSideImportPrunedBlocks(t *testing.T) {
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2364,7 +2361,7 @@ func TestDeleteCreateRevert(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2479,7 +2476,7 @@ func TestDeleteRecreateSlots(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
Debug: true,
Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2504,6 +2501,221 @@ func TestDeleteRecreateSlots(t *testing.T) {
}
}
+func TestTransactionIndices(t *testing.T) {
+ // Configure and generate a sample block chain
+ var (
+ gendb = rawdb.NewMemoryDatabase()
+ key, _ = crypto.HexToEDDSA("856a9af6b0b651dd2f43b5e12193652ec1701c4da6f1c0d2a366ac4b9dabc9433ef09e41ca129552bd2c029086d9b03604de872a3b3432041f")
+ pub = eddsa.Ed448DerivePublicKey(*key)
+ address = crypto.PubkeyToAddress(pub)
+ funds = big.NewInt(1000000000)
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
+ genesis = gspec.MustCommit(gendb)
+ signer = types.NewNucleusSigner(gspec.Config.NetworkID)
+ )
+ height := uint64(128)
+ blocks, receipts := GenerateChain(gspec.Config, genesis, cryptore.NewFaker(), gendb, int(height), func(i int, block *BlockGen) {
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxEnergy, nil, nil), signer, key)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ })
+ blocks2, _ := GenerateChain(gspec.Config, blocks[len(blocks)-1], cryptore.NewFaker(), gendb, 10, nil)
+
+ check := func(tail *uint64, chain *BlockChain) {
+ stored := rawdb.ReadTxIndexTail(chain.db)
+ if tail == nil && stored != nil {
+ t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
+ }
+ if tail != nil && *stored != *tail {
+ t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
+ }
+ if tail != nil {
+ for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
+ t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ for i := uint64(0); i < *tail; i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
+ t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ }
+ }
+ frdir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer dir: %v", err)
+ }
+ defer os.Remove(frdir)
+ ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+
+ // Import all blocks into ancient db
+ l := uint64(0)
+ chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ headers := make([]*types.Header, len(blocks))
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ }
+ if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
+ t.Fatalf("failed to insert header %d: %v", n, err)
+ }
+ if n, err := chain.InsertReceiptChain(blocks, receipts, 128); err != nil {
+ t.Fatalf("block %d: failed to insert into chain: %v", n, err)
+ }
+ chain.Stop()
+ ancientDb.Close()
+
+ // Init block chain with external ancients, check all needed indices has been indexed.
+ limit := []uint64{0, 32, 64, 128}
+ for _, l := range limit {
+ ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+ chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
+ var tail uint64
+ if l != 0 {
+ tail = uint64(128) - l + 1
+ }
+ check(&tail, chain)
+ chain.Stop()
+ ancientDb.Close()
+ }
+
+ // Reconstruct a block chain which only reserves HEAD-64 tx indices
+ ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+
+ limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
+ tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
+ for i, l := range limit {
+ chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ chain.InsertChain(blocks2[i : i+1]) // Feed chain a higher block to trigger indices updater.
+ time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
+ check(&tails[i], chain)
+ chain.Stop()
+ }
+}
+
+func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
+ // Configure and generate a sample block chain
+ var (
+ gendb = rawdb.NewMemoryDatabase()
+ key, _ = crypto.HexToEDDSA("856a9af6b0b651dd2f43b5e12193652ec1701c4da6f1c0d2a366ac4b9dabc9433ef09e41ca129552bd2c029086d9b03604de872a3b3432041f")
+ pub = eddsa.Ed448DerivePublicKey(*key)
+ address = crypto.PubkeyToAddress(pub)
+ funds = big.NewInt(1000000000)
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
+ genesis = gspec.MustCommit(gendb)
+ signer = types.NewNucleusSigner(gspec.Config.NetworkID)
+ )
+ height := uint64(128)
+ blocks, receipts := GenerateChain(gspec.Config, genesis, cryptore.NewFaker(), gendb, int(height), func(i int, block *BlockGen) {
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxEnergy, nil, nil), signer, key)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ })
+
+ check := func(tail *uint64, chain *BlockChain) {
+ stored := rawdb.ReadTxIndexTail(chain.db)
+ if tail == nil && stored != nil {
+ t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
+ }
+ if tail != nil && *stored != *tail {
+ t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
+ }
+ if tail != nil {
+ for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
+ t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ for i := uint64(0); i < *tail; i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
+ t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ }
+ }
+
+ frdir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer dir: %v", err)
+ }
+ defer os.Remove(frdir)
+ ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+
+ // Import all blocks into ancient db, only HEAD-32 indices are kept.
+ l := uint64(32)
+ chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, cryptore.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ headers := make([]*types.Header, len(blocks))
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ }
+ if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
+ t.Fatalf("failed to insert header %d: %v", n, err)
+ }
+ // The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
+ if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil {
+ t.Fatalf("block %d: failed to insert into chain: %v", n, err)
+ }
+ tail := uint64(32)
+ check(&tail, chain)
+}
+
// TestDeleteRecreateAccount tests a state-transition that contains deletion of a
// contract with storage, and a recreate of the same contract via a
// regular value-transfer
@@ -2561,7 +2773,7 @@ func TestDeleteRecreateAccount(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
Debug: true,
Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2736,7 +2948,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2777,7 +2989,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
// TestInitThenFailCreateContract tests a pretty notorious case that happened
// on mainnet over blocks 7338108, 7338110 and 7338115.
// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
-// with 0.001 ether (thus created but no code)
+// with 0.001 xcb (thus created but no code)
// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
// deployment fails due to OOG during initcode execution
@@ -2872,7 +3084,7 @@ func TestInitThenFailCreateContract(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 4b91c8926..9031fc5f0 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -83,7 +83,7 @@ func ExampleGenerateChain() {
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, cryptore.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain); err != nil {
diff --git a/core/error.go b/core/error.go
index 4b3d665ec..daf4caa4e 100644
--- a/core/error.go
+++ b/core/error.go
@@ -22,17 +22,45 @@ var (
// ErrKnownBlock is returned when a block to import is already known locally.
ErrKnownBlock = errors.New("block already known")
- // ErrEnergyLimitReached is returned by the energy pool if the amount of energy required
- // by a transaction is higher than what's left in the block.
- ErrEnergyLimitReached = errors.New("energy limit reached")
-
// ErrBlacklistedHash is returned if a block to import is on the blacklist.
ErrBlacklistedHash = errors.New("blacklisted hash")
+ // ErrNoGenesis is returned when there is no Genesis Block.
+ ErrNoGenesis = errors.New("genesis not found in chain")
+)
+
+// List of cvm-call-message pre-checking errors. All state transtion messages will
+// be pre-checked before execution. If any invalidation detected, the corresponding
+// error should be returned which is defined here.
+//
+// - If the pre-checking happens in the miner, then the transaction won't be packed.
+// - If the pre-checking happens in the block processing procedure, then a "BAD BLOCk"
+// error should be emitted.
+var (
+ // ErrNonceTooLow is returned if the nonce of a transaction is lower than the
+ // one present in the local chain.
+ ErrNonceTooLow = errors.New("nonce too low")
+
// ErrNonceTooHigh is returned if the nonce of a transaction is higher than the
// next one expected based on the local chain.
ErrNonceTooHigh = errors.New("nonce too high")
- // ErrNoGenesis is returned when there is no Genesis Block.
- ErrNoGenesis = errors.New("genesis not found in chain")
+ // ErrEnergyLimitReached is returned by the energy pool if the amount of energy required
+ // by a transaction is higher than what's left in the block.
+ ErrEnergyLimitReached = errors.New("energy limit reached")
+
+ // ErrInsufficientFundsForTransfer is returned if the transaction sender doesn't
+ // have enough funds for transfer(topmost call only).
+ ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer")
+
+ // ErrInsufficientFunds is returned if the total cost of executing a transaction
+ // is higher than the balance of the user's account.
+ ErrInsufficientFunds = errors.New("insufficient funds for energy * price + value")
+
+ // ErrEnergyUintOverflow is returned when calculating energy usage.
+ ErrEnergyUintOverflow = errors.New("energy uint64 overflow")
+
+ // ErrIntrinsicEnergy is returned if the transaction is specified to use less energy
+ // than required to start the invocation.
+ ErrIntrinsicEnergy = errors.New("intrinsic energy too low")
)
diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go
index 966887f0c..3e2770e8b 100644
--- a/core/forkid/forkid.go
+++ b/core/forkid/forkid.go
@@ -20,6 +20,7 @@ package forkid
import (
"encoding/binary"
"errors"
+ "github.com/core-coin/go-core/core/types"
"hash/crc32"
"math"
"math/big"
@@ -27,7 +28,6 @@ import (
"strings"
"github.com/core-coin/go-core/common"
- "github.com/core-coin/go-core/core"
"github.com/core-coin/go-core/log"
"github.com/core-coin/go-core/params"
)
@@ -44,6 +44,18 @@ var (
ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
)
+// Blockchain defines all necessary method to build a forkID.
+type Blockchain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // Genesis retrieves the chain's genesis block.
+ Genesis() *types.Block
+
+ // CurrentHeader retrieves the current head header of the canonical chain.
+ CurrentHeader() *types.Header
+}
+
// ID is a fork identifier as defined by CIP-2124.
type ID struct {
Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers
@@ -54,7 +66,7 @@ type ID struct {
type Filter func(id ID) error
// NewID calculates the Core fork ID from the chain config and head.
-func NewID(chain *core.BlockChain) ID {
+func NewID(chain Blockchain) ID {
return newID(
chain.Config(),
chain.Genesis().Hash(),
@@ -85,7 +97,7 @@ func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
// NewFilter creates a filter that returns if a fork ID should be rejected or not
// based on the local chain's status.
-func NewFilter(chain *core.BlockChain) Filter {
+func NewFilter(chain Blockchain) Filter {
return newFilter(
chain.Config(),
chain.Genesis().Hash(),
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 4771a79a5..f18a6c6dc 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -121,7 +121,7 @@ func TestSetupGenesis(t *testing.T) {
// Advance to block #4, past the transition block of customg.
genesis := oldcustomg.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, oldcustomg.Config, cryptore.NewFullFaker(), vm.Config{}, nil)
+ bc, _ := NewBlockChain(db, nil, oldcustomg.Config, cryptore.NewFullFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks, _ := GenerateChain(oldcustomg.Config, genesis, cryptore.NewFaker(), db, 4, nil)
diff --git a/core/headerchain.go b/core/headerchain.go
index 7bd86c23c..dc10cf2ac 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -488,8 +488,10 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
type (
// UpdateHeadBlocksCallback is a callback function that is called by SetHead
- // before head header is updated.
- UpdateHeadBlocksCallback func(xcbdb.KeyValueWriter, *types.Header)
+ // before head header is updated. The method will return the actual block it
+ // updated the head to (missing state) and a flag if setHead should continue
+ // rewinding till that forcefully (exceeded ancient limits)
+ UpdateHeadBlocksCallback func(xcbdb.KeyValueWriter, *types.Header) (uint64, bool)
// DeleteBlockContentCallback is a callback function that is called by SetHead
// before each header is deleted.
@@ -502,9 +504,10 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
var (
parentHash common.Hash
batch = hc.chainDb.NewBatch()
+ origin = true
)
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
- hash, num := hdr.Hash(), hdr.Number.Uint64()
+ num := hdr.Number.Uint64()
// Rewind block chain to new head.
parent := hc.GetHeader(hdr.ParentHash, num-1)
@@ -515,13 +518,17 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
// Notably, since gocore has the possibility for setting the head to a low
// height which is even lower than ancient head.
// In order to ensure that the head is always no higher than the data in
- // the database(ancient store or active store), we need to update head
+ // the database (ancient store or active store), we need to update head
// first then remove the relative data from the database.
//
// Update head first(head fast block, head full block) before deleting the data.
markerBatch := hc.chainDb.NewBatch()
if updateFn != nil {
- updateFn(markerBatch, parent)
+ newHead, force := updateFn(markerBatch, parent)
+ if force && newHead < head {
+ log.Warn("Force rewinding till ancient limit", "head", newHead)
+ head = newHead
+ }
}
// Update head header then.
rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
@@ -532,14 +539,34 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
hc.currentHeaderHash = parentHash
headHeaderGauge.Update(parent.Number.Int64())
- // Remove the relative data from the database.
- if delFn != nil {
- delFn(batch, hash, num)
+ // If this is the first iteration, wipe any leftover data upwards too so
+ // we don't end up with dangling daps in the database
+ var nums []uint64
+ if origin {
+ for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
+ nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
+ }
+ origin = false
+ }
+ nums = append(nums, num)
+
+ // Remove the related data from the database on all sidechains
+ for _, num := range nums {
+ // Gather all the side fork hashes
+ hashes := rawdb.ReadAllHashes(hc.chainDb, num)
+ if len(hashes) == 0 {
+ // No hashes in the database whatsoever, probably frozen already
+ hashes = append(hashes, hdr.Hash())
+ }
+ for _, hash := range hashes {
+ if delFn != nil {
+ delFn(batch, hash, num)
+ }
+ rawdb.DeleteHeader(batch, hash, num)
+ rawdb.DeleteTd(batch, hash, num)
+ }
+ rawdb.DeleteCanonicalHash(batch, num)
}
- // Rewind header chain to new head.
- rawdb.DeleteHeader(batch, hash, num)
- rawdb.DeleteTd(batch, hash, num)
- rawdb.DeleteCanonicalHash(batch, num)
}
// Flush all accumulated deletions.
if err := batch.Write(); err != nil {
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 5c7e6dd8f..ed8c97936 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -69,7 +69,7 @@ func ReadAllHashes(db xcbdb.Iteratee, number uint64) []common.Hash {
prefix := headerKeyPrefix(number)
hashes := make([]common.Hash, 0, 1)
- it := db.NewIteratorWithPrefix(prefix)
+ it := db.NewIterator(prefix, nil)
defer it.Release()
for it.Next() {
@@ -94,7 +94,7 @@ func ReadAllCanonicalHashes(db xcbdb.Iteratee, from uint64, to uint64, limit int
)
// Construct the key prefix of start point.
start, end := headerHashKey(from), headerHashKey(to)
- it := db.NewIteratorWithStart(start)
+ it := db.NewIterator(nil, start)
defer it.Release()
for it.Next() {
@@ -187,6 +187,32 @@ func WriteHeadFastBlockHash(db xcbdb.KeyValueWriter, hash common.Hash) {
}
}
+// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
+// full synced, the last pivot will always be nil.
+func ReadLastPivotNumber(db xcbdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(lastPivotKey)
+ if len(data) == 0 {
+ return nil
+ }
+ var pivot uint64
+ if err := rlp.DecodeBytes(data, &pivot); err != nil {
+ log.Error("Invalid pivot block number in database", "err", err)
+ return nil
+ }
+ return &pivot
+}
+
+// WriteLastPivotNumber stores the number of the last pivot block.
+func WriteLastPivotNumber(db xcbdb.KeyValueWriter, pivot uint64) {
+ enc, err := rlp.EncodeToBytes(pivot)
+ if err != nil {
+ log.Crit("Failed to encode pivot block number", "err", err)
+ }
+ if err := db.Put(lastPivotKey, enc); err != nil {
+ log.Crit("Failed to store pivot block number", "err", err)
+ }
+}
+
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
// reporting correct numbers across restarts.
func ReadFastTrieProgress(db xcbdb.KeyValueReader) uint64 {
@@ -205,6 +231,43 @@ func WriteFastTrieProgress(db xcbdb.KeyValueWriter, count uint64) {
}
}
+// ReadTxIndexTail retrieves the number of oldest indexed block
+// whose transaction indices has been indexed. If the corresponding entry
+// is non-existent in database it means the indexing has been finished.
+func ReadTxIndexTail(db xcbdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(txIndexTailKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteTxIndexTail stores the number of oldest indexed block
+// into database.
+func WriteTxIndexTail(db xcbdb.KeyValueWriter, number uint64) {
+ if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store the transaction index tail", "err", err)
+ }
+}
+
+// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
+func ReadFastTxLookupLimit(db xcbdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(fastTxLookupLimitKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
+func WriteFastTxLookupLimit(db xcbdb.KeyValueWriter, number uint64) {
+ if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
+ }
+}
+
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db xcbdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash
@@ -323,6 +386,25 @@ func ReadBodyRLP(db xcbdb.Reader, hash common.Hash, number uint64) rlp.RawValue
return nil // Can't find the data anywhere.
}
+// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
+// block at number, in RLP encoding.
+func ReadCanonicalBodyRLP(db xcbdb.Reader, number uint64) rlp.RawValue {
+ // If it's an ancient one, we don't need the canonical hash
+ data, _ := db.Ancient(freezerBodiesTable, number)
+ if len(data) == 0 {
+ // Need to get the hash
+ data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ if len(data) == 0 {
+ data, _ = db.Ancient(freezerBodiesTable, number)
+ }
+ }
+ return data
+}
+
// WriteBodyRLP stores an RLP encoded block body into the database.
func WriteBodyRLP(db xcbdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
index 96f64cb81..af8c7019d 100644
--- a/core/rawdb/accessors_indexes.go
+++ b/core/rawdb/accessors_indexes.go
@@ -64,9 +64,31 @@ func WriteTxLookupEntries(db xcbdb.KeyValueWriter, block *types.Block) {
}
}
+// WriteTxLookupEntriesByHash is identical to WriteTxLookupEntries, but does not
+// require a full types.Block as input.
+func WriteTxLookupEntriesByHash(db xcbdb.KeyValueWriter, number uint64, hashes []common.Hash) {
+ numberBytes := new(big.Int).SetUint64(number).Bytes()
+ for _, hash := range hashes {
+ if err := db.Put(txLookupKey(hash), numberBytes); err != nil {
+ log.Crit("Failed to store transaction lookup entry", "err", err)
+ }
+ }
+}
+
// DeleteTxLookupEntry removes all transaction data associated with a hash.
func DeleteTxLookupEntry(db xcbdb.KeyValueWriter, hash common.Hash) {
- db.Delete(txLookupKey(hash))
+ if err := db.Delete(txLookupKey(hash)); err != nil {
+ log.Crit("Failed to delete transaction lookup entry", "err", err)
+ }
+}
+
+// DeleteTxLookupEntries removes all transaction lookups for a given block.
+func DeleteTxLookupEntriesByHash(db xcbdb.KeyValueWriter, hashes []common.Hash) {
+ for _, hash := range hashes {
+ if err := db.Delete(txLookupKey(hash)); err != nil {
+ log.Crit("Failed to delete transaction lookup entry", "err", err)
+ }
+ }
}
// ReadTransaction retrieves a specific transaction from the database, along with
@@ -135,7 +157,7 @@ func WriteBloomBits(db xcbdb.KeyValueWriter, bit uint, section uint64, head comm
// given section range and bit index.
func DeleteBloombits(db xcbdb.Database, bit uint, from uint64, to uint64) {
start, end := bloomBitsKey(bit, from, common.Hash{}), bloomBitsKey(bit, to, common.Hash{})
- it := db.NewIteratorWithStart(start)
+ it := db.NewIterator(nil, start)
defer it.Release()
for it.Next() {
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
index 64c1aa8c8..56e43d99b 100644
--- a/core/rawdb/accessors_snapshot.go
+++ b/core/rawdb/accessors_snapshot.go
@@ -93,7 +93,7 @@ func DeleteStorageSnapshot(db xcbdb.KeyValueWriter, accountHash, storageHash com
// IterateStorageSnapshots returns an iterator for walking the entire storage
// space of a specific account.
func IterateStorageSnapshots(db xcbdb.Iteratee, accountHash common.Hash) xcbdb.Iterator {
- return db.NewIteratorWithPrefix(storageSnapshotsKey(accountHash))
+ return db.NewIterator(storageSnapshotsKey(accountHash), nil)
}
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
new file mode 100644
index 000000000..6b17c243f
--- /dev/null
+++ b/core/rawdb/chain_iterator.go
@@ -0,0 +1,304 @@
+// Copyright 2018 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package rawdb
+
+import (
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/common/prque"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/rlp"
+ "github.com/core-coin/go-core/xcbdb"
+ "golang.org/x/crypto/sha3"
+ "math"
+ "runtime"
+ "sync/atomic"
+ "time"
+)
+
+// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
+// of frozen ancient blocks. The method iterates over all the frozen blocks and
+// injects into the database the block hash->number mappings.
+func InitDatabaseFromFreezer(db xcbdb.Database) {
+ // If we can't access the freezer or it's empty, abort
+ frozen, err := db.Ancients()
+ if err != nil || frozen == 0 {
+ return
+ }
+ var (
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
+ hash common.Hash
+ )
+ for i := uint64(0); i < frozen; i++ {
+ // Since the freezer has all data in sequential order on a file,
+ // it would be 'neat' to read more data in one go, and let the
+ // freezerdb return N items (e.g up to 1000 items per go)
+ // That would require an API change in Ancients though
+ if h, err := db.Ancient(freezerHashTable, i); err != nil {
+ log.Crit("Failed to init database from freezer", "err", err)
+ } else {
+ hash = common.BytesToHash(h)
+ }
+ WriteHeaderNumber(batch, hash, i)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > xcbdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+
+ WriteHeadHeaderHash(db, hash)
+ WriteHeadFastBlockHash(db, hash)
+ log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+type blockTxHashes struct {
+ number uint64
+ hashes []common.Hash
+}
+
+// iterateTransactions iterates over all transactions in the (canon) block
+// number(s) given, and yields the hashes on a channel
+func iterateTransactions(db xcbdb.Database, from uint64, to uint64, reverse bool) (chan *blockTxHashes, chan struct{}) {
+ // One thread sequentially reads data from db
+ type numberRlp struct {
+ number uint64
+ rlp rlp.RawValue
+ }
+ if to == from {
+ return nil, nil
+ }
+ threads := to - from
+ if cpus := runtime.NumCPU(); threads > uint64(cpus) {
+ threads = uint64(cpus)
+ }
+ var (
+ rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
+ hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
+ abortCh = make(chan struct{})
+ )
+ // lookup runs in one instance
+ lookup := func() {
+ n, end := from, to
+ if reverse {
+ n, end = to-1, from-1
+ }
+ defer close(rlpCh)
+ for n != end {
+ data := ReadCanonicalBodyRLP(db, n)
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case rlpCh <- &numberRlp{n, data}:
+ case <-abortCh:
+ return
+ }
+ if reverse {
+ n--
+ } else {
+ n++
+ }
+ }
+ }
+ // process runs in parallell
+ nThreadsAlive := int32(threads)
+ process := func() {
+ defer func() {
+ // Last processor closes the result channel
+ if atomic.AddInt32(&nThreadsAlive, -1) == 0 {
+ close(hashesCh)
+ }
+ }()
+
+ var hasher = sha3.New256()
+ for data := range rlpCh {
+ it, err := rlp.NewListIterator(data.rlp)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ it.Next()
+ txs := it.Value()
+ txIt, err := rlp.NewListIterator(txs)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var hashes []common.Hash
+ for txIt.Next() {
+ if err := txIt.Err(); err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var txHash common.Hash
+ hasher.Reset()
+ hasher.Write(txIt.Value())
+ hasher.Sum(txHash[:0])
+ hashes = append(hashes, txHash)
+ }
+ result := &blockTxHashes{
+ hashes: hashes,
+ number: data.number,
+ }
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case hashesCh <- result:
+ case <-abortCh:
+ return
+ }
+ }
+ }
+ go lookup() // start the sequential db accessor
+ for i := 0; i < int(threads); i++ {
+ go process()
+ }
+ return hashesCh, abortCh
+}
+
+// IndexTransactions creates txlookup indices of the specified block range.
+//
+// This function iterates canonical chain in reverse order, it has one main advantage:
+// We can write tx index tail flag periodically even without the whole indexing
+// procedure is finished. So that we can resume indexing procedure next time quickly.
+func IndexTransactions(db xcbdb.Database, from uint64, to uint64) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ var (
+ hashesCh, abortCh = iterateTransactions(db, from, to, true)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ // Since we iterate in reverse, we expect the first number to come
+ // in to be [to-1]. Therefore, setting lastNum to means that the
+ // prqueue gap-evaluation will work correctly
+ lastNum = to
+ queue = prque.New(nil)
+ // for stats reporting
+ blocks, txs = 0, 0
+ )
+ defer close(abortCh)
+
+ for chanDelivery := range hashesCh {
+ // Push the delivery into the queue and process contiguous ranges.
+ // Since we iterate in reverse, so lower numbers have lower prio, and
+ // we can use the number directly as prio marker
+ queue.Push(chanDelivery, int64(chanDelivery.number))
+ for !queue.Empty() {
+ // If the next available item is gapped, return
+ if _, priority := queue.Peek(); priority != int64(lastNum-1) {
+ break
+ }
+ // Next block available, pop it off and index it
+ delivery := queue.PopItem().(*blockTxHashes)
+ lastNum = delivery.number
+ WriteTxLookupEntriesByHash(batch, delivery.number, delivery.hashes)
+ blocks++
+ txs += len(delivery.hashes)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > xcbdb.IdealBatchSize {
+ // Also write the tail there
+ WriteTxIndexTail(batch, lastNum)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
+ if lastNum < to {
+ WriteTxIndexTail(batch, lastNum)
+ // No need to write the batch if we never entered the loop above...
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ }
+ log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+// UnindexTransactions removes txlookup indices of the specified block range.
+func UnindexTransactions(db xcbdb.Database, from uint64, to uint64) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ // Write flag first and then unindex the transaction indices. Some indices
+ // will be left in the database if crash happens but it's fine.
+ WriteTxIndexTail(db, to)
+ // If only one block is unindexed, do it directly
+ //if from+1 == to {
+ // data := ReadCanonicalBodyRLP(db, uint64(from))
+ // DeleteTxLookupEntries(db, ReadBlock(db, ReadCanonicalHash(db, from), from))
+ // log.Info("Unindexed transactions", "blocks", 1, "tail", to)
+ // return
+ //}
+ // TODO @holiman, add this back (if we want it)
+ var (
+ hashesCh, abortCh = iterateTransactions(db, from, to, false)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ )
+ defer close(abortCh)
+ // Otherwise spin up the concurrent iterator and unindexer
+ blocks, txs := 0, 0
+ for delivery := range hashesCh {
+ DeleteTxLookupEntriesByHash(batch, delivery.hashes)
+ txs += len(delivery.hashes)
+ blocks++
+
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ // A batch counts the size of deletion as '1', so we need to flush more
+ // often than that.
+ if blocks%1000 == 0 {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Unindexing transactions", "blocks", "txs", txs, int64(math.Abs(float64(delivery.number-from))), "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
+}
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
new file mode 100644
index 000000000..e859e403a
--- /dev/null
+++ b/core/rawdb/chain_iterator_test.go
@@ -0,0 +1,81 @@
+// Copyright 2018 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package rawdb
+
+import (
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/core/types"
+ "math/big"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestChainIterator(t *testing.T) {
+ // Construct test chain db
+ chainDb := NewMemoryDatabase()
+
+ var block *types.Block
+ var txs []*types.Transaction
+ for i := uint64(0); i <= 10; i++ {
+ if i == 0 {
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block
+ } else {
+ tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
+ txs = append(txs, tx)
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
+ }
+ WriteBlock(chainDb, block)
+ WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
+ }
+
+ var cases = []struct {
+ from, to uint64
+ reverse bool
+ expect []int
+ }{
+ {0, 11, true, []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}},
+ {0, 0, true, nil},
+ {0, 5, true, []int{4, 3, 2, 1, 0}},
+ {10, 11, true, []int{10}},
+ {0, 11, false, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
+ {0, 0, false, nil},
+ {10, 11, false, []int{10}},
+ }
+ for i, c := range cases {
+ var numbers []int
+ hashCh, _ := iterateTransactions(chainDb, c.from, c.to, c.reverse)
+ if hashCh != nil {
+ for h := range hashCh {
+ numbers = append(numbers, int(h.number))
+ if len(h.hashes) > 0 {
+ if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp {
+ t.Fatalf("hash wrong, got %x exp %x", got, exp)
+ }
+ }
+ }
+ }
+ if !c.reverse {
+ sort.Ints(numbers)
+ } else {
+ sort.Sort(sort.Reverse(sort.IntSlice(numbers)))
+ }
+ if !reflect.DeepEqual(numbers, c.expect) {
+ t.Fatalf("Case %d failed, visit element mismatch, want %v, got %v", i, c.expect, numbers)
+ }
+ }
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 4c3563292..e3fae4a04 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"os"
+ "sync/atomic"
"time"
"github.com/core-coin/go-core/common"
@@ -41,10 +42,10 @@ type freezerdb struct {
// the slow ancient tables.
func (frdb *freezerdb) Close() error {
var errs []error
- if err := frdb.KeyValueStore.Close(); err != nil {
+ if err := frdb.AncientStore.Close(); err != nil {
errs = append(errs, err)
}
- if err := frdb.AncientStore.Close(); err != nil {
+ if err := frdb.KeyValueStore.Close(); err != nil {
errs = append(errs, err)
}
if len(errs) != 0 {
@@ -53,6 +54,22 @@ func (frdb *freezerdb) Close() error {
return nil
}
+// Freeze is a helper method used for external testing to trigger and block until
+// a freeze cycle completes, without having to sleep for a minute to trigger the
+// automatic background run.
+func (frdb *freezerdb) Freeze(threshold uint64) {
+ // Set the freezer threshold to a temporary value
+ defer func(old uint64) {
+ atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old)
+ }(atomic.LoadUint64(&frdb.AncientStore.(*freezer).threshold))
+ atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, threshold)
+
+ // Trigger a freeze cycle and block until it's done
+ trigger := make(chan struct{}, 1)
+ frdb.AncientStore.(*freezer).trigger <- trigger
+ <-trigger
+}
+
// nofreezedb is a database wrapper that disables freezer data retrievals.
type nofreezedb struct {
xcbdb.KeyValueStore
@@ -137,7 +154,10 @@ func NewDatabaseWithFreezer(db xcbdb.KeyValueStore, freezer string, namespace st
// If the freezer already contains something, ensure that the genesis blocks
// match, otherwise we might mix up freezers across chains and destroy both
// the freezer and the key-value store.
- if frgenesis, _ := frdb.Ancient(freezerHashTable, 0); !bytes.Equal(kvgenesis, frgenesis) {
+ frgenesis, err := frdb.Ancient(freezerHashTable, 0)
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
+ } else if !bytes.Equal(kvgenesis, frgenesis) {
return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
}
// Key-value store and freezer belong to the same network. Ensure that they
@@ -251,7 +271,7 @@ func (s *stat) Count() string {
// InspectDatabase traverses the entire database and checks the size
// of all different categories of data.
func InspectDatabase(db xcbdb.Database) error {
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
defer it.Release()
var (
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 92de1578e..d32cf5259 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -22,6 +22,7 @@ import (
"math"
"os"
"path/filepath"
+ "sync"
"sync/atomic"
"time"
@@ -69,10 +70,16 @@ type freezer struct {
// WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- frozen uint64 // Number of blocks already frozen
+ frozen uint64 // Number of blocks already frozen
+ threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
tables map[string]*freezerTable // Data tables for storing everything
instanceLock fileutil.Releaser // File-system lock to prevent double opens
+
+ trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
+
+ quit chan struct{}
+ closeOnce sync.Once
}
// newFreezer creates a chain freezer that moves ancient chain data into
@@ -99,8 +106,11 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
}
// Open all the supported data tables
freezer := &freezer{
+ threshold: params.FullImmutabilityThreshold,
tables: make(map[string]*freezerTable),
instanceLock: lock,
+ trigger: make(chan chan struct{}),
+ quit: make(chan struct{}),
}
for name, disableSnappy := range freezerNoSnappy {
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
@@ -127,14 +137,17 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
// Close terminates the chain freezer, unmapping all the data files.
func (f *freezer) Close() error {
var errs []error
- for _, table := range f.tables {
- if err := table.Close(); err != nil {
+ f.closeOnce.Do(func() {
+ f.quit <- struct{}{}
+ for _, table := range f.tables {
+ if err := table.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if err := f.instanceLock.Release(); err != nil {
errs = append(errs, err)
}
- }
- if err := f.instanceLock.Release(); err != nil {
- errs = append(errs, err)
- }
+ })
if errs != nil {
return fmt.Errorf("%v", errs)
}
@@ -254,39 +267,66 @@ func (f *freezer) Sync() error {
func (f *freezer) freeze(db xcbdb.KeyValueStore) {
nfdb := &nofreezedb{KeyValueStore: db}
+ var (
+ backoff bool
+ triggered chan struct{} // Used in tests
+ )
for {
+ select {
+ case <-f.quit:
+ log.Info("Freezer shutting down")
+ return
+ default:
+ }
+ if backoff {
+ // If we were doing a manual trigger, notify it
+ if triggered != nil {
+ triggered <- struct{}{}
+ triggered = nil
+ }
+ select {
+ case <-time.NewTimer(freezerRecheckInterval).C:
+ backoff = false
+ case triggered = <-f.trigger:
+ backoff = false
+ case <-f.quit:
+ return
+ }
+ }
// Retrieve the freezing threshold.
hash := ReadHeadBlockHash(nfdb)
if hash == (common.Hash{}) {
log.Debug("Current full block hash unavailable") // new chain, empty database
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
number := ReadHeaderNumber(nfdb, hash)
+ threshold := atomic.LoadUint64(&f.threshold)
+
switch {
case number == nil:
log.Error("Current full block number unavailable", "hash", hash)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
- case *number < params.FullImmutabilityThreshold:
- log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.FullImmutabilityThreshold)
- time.Sleep(freezerRecheckInterval)
+ case *number < threshold:
+ log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
+ backoff = true
continue
- case *number-params.FullImmutabilityThreshold <= f.frozen:
+ case *number-threshold <= f.frozen:
log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
head := ReadHeader(nfdb, hash, *number)
if head == nil {
log.Error("Current full block unavailable", "number", *number, "hash", hash)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
// Seems we have data ready to be frozen, process in usable batches
- limit := *number - params.FullImmutabilityThreshold
+ limit := *number - threshold
if limit-f.frozen > freezerBatchLimit {
limit = f.frozen + freezerBatchLimit
}
@@ -295,7 +335,7 @@ func (f *freezer) freeze(db xcbdb.KeyValueStore) {
first = f.frozen
ancients = make([]common.Hash, 0, limit-f.frozen)
)
- for f.frozen < limit {
+ for f.frozen <= limit {
// Retrieves all the components of the canonical block
hash := ReadCanonicalHash(nfdb, f.frozen)
if hash == (common.Hash{}) {
@@ -346,11 +386,14 @@ func (f *freezer) freeze(db xcbdb.KeyValueStore) {
log.Crit("Failed to delete frozen canonical blocks", "err", err)
}
batch.Reset()
- // Wipe out side chain also.
+ // Wipe out side chains also and track dangling side chians
+ var dangling []common.Hash
for number := first; number < f.frozen; number++ {
// Always keep the genesis block in active database
if number != 0 {
- for _, hash := range ReadAllHashes(db, number) {
+ dangling = ReadAllHashes(db, number)
+ for _, hash := range dangling {
+ log.Trace("Deleting side chain", "number", number, "hash", hash)
DeleteBlock(batch, hash, number)
}
}
@@ -358,6 +401,41 @@ func (f *freezer) freeze(db xcbdb.KeyValueStore) {
if err := batch.Write(); err != nil {
log.Crit("Failed to delete frozen side blocks", "err", err)
}
+ batch.Reset()
+
+ // Step into the future and delete and dangling side chains
+ if f.frozen > 0 {
+ tip := f.frozen
+ for len(dangling) > 0 {
+ drop := make(map[common.Hash]struct{})
+ for _, hash := range dangling {
+ log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
+ drop[hash] = struct{}{}
+ }
+ children := ReadAllHashes(db, tip)
+ for i := 0; i < len(children); i++ {
+ // Dig up the child and ensure it's dangling
+ child := ReadHeader(nfdb, children[i], tip)
+ if child == nil {
+ log.Error("Missing dangling header", "number", tip, "hash", children[i])
+ continue
+ }
+ if _, ok := drop[child.ParentHash]; !ok {
+ children = append(children[:i], children[i+1:]...)
+ i--
+ continue
+ }
+ // Delete all block data associated with the child
+ log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
+ DeleteBlock(batch, children[i], tip)
+ }
+ dangling = children
+ tip++
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete dangling side blocks", "err", err)
+ }
+ }
// Log something friendly for the user
context := []interface{}{
"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
diff --git a/core/rawdb/freezer_reinit.go b/core/rawdb/freezer_reinit.go
deleted file mode 100644
index b8fb83c4f..000000000
--- a/core/rawdb/freezer_reinit.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2019 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package rawdb
-
-import (
- "errors"
- "runtime"
- "sync/atomic"
- "time"
-
- "github.com/core-coin/go-core/common"
- "github.com/core-coin/go-core/common/prque"
- "github.com/core-coin/go-core/core/types"
- "github.com/core-coin/go-core/log"
- "github.com/core-coin/go-core/xcbdb"
-)
-
-// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
-// of frozen ancient blocks. The method iterates over all the frozen blocks and
-// injects into the database the block hash->number mappings and the transaction
-// lookup entries.
-func InitDatabaseFromFreezer(db xcbdb.Database) error {
- // If we can't access the freezer or it's empty, abort
- frozen, err := db.Ancients()
- if err != nil || frozen == 0 {
- return err
- }
- // Blocks previously frozen, iterate over- and hash them concurrently
- var (
- number = ^uint64(0) // -1
- results = make(chan *types.Block, 4*runtime.NumCPU())
- )
- abort := make(chan struct{})
- defer close(abort)
-
- for i := 0; i < runtime.NumCPU(); i++ {
- go func() {
- for {
- // Fetch the next task number, terminating if everything's done
- n := atomic.AddUint64(&number, 1)
- if n >= frozen {
- return
- }
- // Retrieve the block from the freezer. If successful, pre-cache
- // the block hash and the individual transaction hashes for storing
- // into the database.
- block := ReadBlock(db, ReadCanonicalHash(db, n), n)
- if block != nil {
- block.Hash()
- for _, tx := range block.Transactions() {
- tx.Hash()
- }
- }
- // Feed the block to the aggregator, or abort on interrupt
- select {
- case results <- block:
- case <-abort:
- return
- }
- }
- }()
- }
- // Reassemble the blocks into a contiguous stream and push them out to disk
- var (
- queue = prque.New(nil)
- next = int64(0)
-
- batch = db.NewBatch()
- start = time.Now()
- logged time.Time
- )
- for i := uint64(0); i < frozen; i++ {
- // Retrieve the next result and bail if it's nil
- block := <-results
- if block == nil {
- return errors.New("broken ancient database")
- }
- // Push the block into the import queue and process contiguous ranges
- queue.Push(block, -int64(block.NumberU64()))
- for !queue.Empty() {
- // If the next available item is gapped, return
- if _, priority := queue.Peek(); -priority != next {
- break
- }
- // Next block available, pop it off and index it
- block = queue.PopItem().(*types.Block)
- next++
-
- // Inject hash<->number mapping and txlookup indexes
- WriteHeaderNumber(batch, block.Hash(), block.NumberU64())
- WriteTxLookupEntries(batch, block)
-
- // If enough data was accumulated in memory or we're at the last block, dump to disk
- if batch.ValueSize() > xcbdb.IdealBatchSize || uint64(next) == frozen {
- if err := batch.Write(); err != nil {
- return err
- }
- batch.Reset()
- }
- // If we've spent too much time already, notify the user of what we're doing
- if time.Since(logged) > 8*time.Second {
- log.Info("Initializing chain from ancient data", "number", block.Number(), "hash", block.Hash(), "total", frozen-1, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- }
- }
- hash := ReadCanonicalHash(db, frozen-1)
- WriteHeadHeaderHash(db, hash)
- WriteHeadFastBlockHash(db, hash)
-
- log.Info("Initialized chain from ancient data", "number", frozen-1, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
- return nil
-}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index bacd431e5..5cb5a0041 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -39,6 +39,9 @@ var (
// headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
headFastBlockKey = []byte("LastFast")
+ // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead).
+ lastPivotKey = []byte("LastPivot")
+
// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
fastTrieProgressKey = []byte("TrieSync")
@@ -48,6 +51,12 @@ var (
// snapshotJournalKey tracks the in-memory diff layers across restarts.
snapshotJournalKey = []byte("SnapshotJournal")
+ // txIndexTailKey tracks the oldest block whose transactions have been indexed.
+ txIndexTailKey = []byte("TransactionIndexTail")
+
+ // fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
+ fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")
+
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 3cfc1de04..ccdbfb7b9 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -103,27 +103,12 @@ func (t *table) Delete(key []byte) error {
return t.db.Delete(append([]byte(t.prefix), key...))
}
-// NewIterator creates a binary-alphabetical iterator over the entire keyspace
-// contained within the database.
-func (t *table) NewIterator() xcbdb.Iterator {
- return t.NewIteratorWithPrefix(nil)
-}
-
-// NewIteratorWithStart creates a binary-alphabetical iterator over a subset of
-// database content starting at a particular initial key (or after, if it does
-// not exist).
-func (t *table) NewIteratorWithStart(start []byte) xcbdb.Iterator {
- iter := t.db.NewIteratorWithStart(append([]byte(t.prefix), start...))
- return &tableIterator{
- iter: iter,
- prefix: t.prefix,
- }
-}
-
-// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
-// of database content with a particular key prefix.
-func (t *table) NewIteratorWithPrefix(prefix []byte) xcbdb.Iterator {
- iter := t.db.NewIteratorWithPrefix(append([]byte(t.prefix), prefix...))
+// NewIterator creates a binary-alphabetical iterator over a subset
+// of database content with a particular key prefix, starting at a particular
+// initial key (or after, if it does not exist).
+func (t *table) NewIterator(prefix []byte, start []byte) xcbdb.Iterator {
+ innerPrefix := append([]byte(t.prefix), prefix...)
+ iter := t.db.NewIterator(innerPrefix, start)
return &tableIterator{
iter: iter,
prefix: t.prefix,
diff --git a/core/rawdb/table_test.go b/core/rawdb/table_test.go
index df4c05019..00a0f4dfd 100644
--- a/core/rawdb/table_test.go
+++ b/core/rawdb/table_test.go
@@ -18,6 +18,7 @@ package rawdb
import (
"bytes"
+ "github.com/core-coin/go-core/xcbdb"
"testing"
)
@@ -96,48 +97,33 @@ func testTableDatabase(t *testing.T, prefix string) {
}
}
- // Test iterators
- iter := db.NewIterator()
- var index int
- for iter.Next() {
- key, value := iter.Key(), iter.Value()
- if !bytes.Equal(key, entries[index].key) {
- t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key)
+ check := func(iter xcbdb.Iterator, expCount, index int) {
+ count := 0
+ for iter.Next() {
+ key, value := iter.Key(), iter.Value()
+ if !bytes.Equal(key, entries[index].key) {
+ t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key)
+ }
+ if !bytes.Equal(value, entries[index].value) {
+ t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value)
+ }
+ index += 1
+ count++
}
- if !bytes.Equal(value, entries[index].value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value)
+ if count != expCount {
+ t.Fatalf("Wrong number of elems, exp %d got %d", expCount, count)
}
- index += 1
+ iter.Release()
}
- iter.Release()
+ // Test iterators
+ check(db.NewIterator(nil, nil), 6, 0)
// Test iterators with prefix
- iter = db.NewIteratorWithPrefix([]byte{0xff, 0xff})
- index = 3
- for iter.Next() {
- key, value := iter.Key(), iter.Value()
- if !bytes.Equal(key, entries[index].key) {
- t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key)
- }
- if !bytes.Equal(value, entries[index].value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value)
- }
- index += 1
- }
- iter.Release()
+ check(db.NewIterator([]byte{0xff, 0xff}, nil), 3, 3)
// Test iterators with start point
- iter = db.NewIteratorWithStart([]byte{0xff, 0xff, 0x02})
- index = 4
- for iter.Next() {
- key, value := iter.Key(), iter.Value()
- if !bytes.Equal(key, entries[index].key) {
- t.Fatalf("Key mismatch: want=%v, got=%v", entries[index].key, key)
- }
- if !bytes.Equal(value, entries[index].value) {
- t.Fatalf("Value mismatch: want=%v, got=%v", entries[index].value, value)
- }
- index += 1
- }
- iter.Release()
+ check(db.NewIterator(nil, []byte{0xff, 0xff, 0x02}), 2, 4)
+ // Test iterators with prefix and start point
+ check(db.NewIterator([]byte{0xee}, nil), 0, 0)
+ check(db.NewIterator(nil, []byte{0x00}), 6, 0)
}
diff --git a/core/rlp_test.go b/core/rlp_test.go
new file mode 100644
index 000000000..7790c5e22
--- /dev/null
+++ b/core/rlp_test.go
@@ -0,0 +1,203 @@
+// Copyright 2018 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package core
+
+import (
+ "fmt"
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/consensus/cryptore"
+ "github.com/core-coin/go-core/core/rawdb"
+ "github.com/core-coin/go-core/core/types"
+ "github.com/core-coin/go-core/crypto"
+ "github.com/core-coin/go-core/params"
+ "github.com/core-coin/go-core/rlp"
+ eddsa "github.com/core-coin/go-goldilocks"
+ "golang.org/x/crypto/sha3"
+ "math/big"
+ "testing"
+)
+
+func getBlock(transactions int, uncles int, dataSize int) *types.Block {
+ var (
+ aa, _ = common.HexToAddress("cb540000000000000000000000000000000000000000")
+ // Generate a canonical chain to act as the main dataset
+ engine = cryptore.NewFaker()
+ db = rawdb.NewMemoryDatabase()
+ // A sender who makes transactions, has some funds
+ key, _ = crypto.HexToEDDSA("856a9af6b0b651dd2f43b5e12193652ec1701c4da6f1c0d2a366ac4b9dabc9433ef09e41ca129552bd2c029086d9b03604de872a3b3432041f")
+ pub = eddsa.Ed448DerivePublicKey(*key)
+ address = crypto.PubkeyToAddress(pub)
+
+ funds = big.NewInt(1000000000)
+ gspec = &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{address: {Balance: funds}},
+ }
+ genesis = gspec.MustCommit(db)
+ )
+
+ // We need to generate as many blocks +1 as uncles
+ blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, uncles+1,
+ func(n int, b *BlockGen) {
+ if n == uncles {
+ // Add transactions and stuff on the last block
+ for i := 0; i < transactions; i++ {
+ tx, _ := types.SignTx(types.NewTransaction(uint64(i), aa,
+ big.NewInt(0), 50000, big.NewInt(1), make([]byte, dataSize)), types.NewNucleusSigner(big.NewInt(1)), key)
+ b.AddTx(tx)
+ }
+ for i := 0; i < uncles; i++ {
+ b.AddUncle(&types.Header{ParentHash: b.PrevBlock(n - 1 - i).Hash(), Number: big.NewInt(int64(n - i))})
+ }
+ }
+ })
+ block := blocks[len(blocks)-1]
+ return block
+}
+
+// TestRlpIterator tests that individual transactions can be picked out
+// from blocks without full unmarshalling/marshalling
+func TestRlpIterator(t *testing.T) {
+ for _, tt := range []struct {
+ txs int
+ uncles int
+ datasize int
+ }{
+ {0, 0, 0},
+ {0, 2, 0},
+ {10, 0, 0},
+ {10, 2, 0},
+ {10, 2, 50},
+ } {
+ testRlpIterator(t, tt.txs, tt.uncles, tt.datasize)
+ }
+}
+
+func testRlpIterator(t *testing.T, txs, uncles, datasize int) {
+ desc := fmt.Sprintf("%d txs [%d datasize] and %d uncles", txs, datasize, uncles)
+ bodyRlp, _ := rlp.EncodeToBytes(getBlock(txs, uncles, datasize).Body())
+ it, err := rlp.NewListIterator(bodyRlp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Check that txs exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got zero")
+ }
+ txdata := it.Value()
+ // Check that uncles exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got one")
+ }
+ // No more after that
+ if it.Next() {
+ t.Fatal("expected only two elems, got more")
+ }
+ txIt, err := rlp.NewListIterator(txdata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var gotHashes []common.Hash
+ var expHashes []common.Hash
+ for txIt.Next() {
+ gotHashes = append(gotHashes, crypto.SHA3Hash(txIt.Value()))
+ }
+
+ var expBody types.Body
+ err = rlp.DecodeBytes(bodyRlp, &expBody)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, tx := range expBody.Transactions {
+ expHashes = append(expHashes, tx.Hash())
+ }
+ if gotLen, expLen := len(gotHashes), len(expHashes); gotLen != expLen {
+ t.Fatalf("testcase %v: length wrong, got %d exp %d", desc, gotLen, expLen)
+ }
+ // also sanity check against input
+ if gotLen := len(gotHashes); gotLen != txs {
+ t.Fatalf("testcase %v: length wrong, got %d exp %d", desc, gotLen, txs)
+ }
+ for i, got := range gotHashes {
+ if exp := expHashes[i]; got != exp {
+ t.Errorf("testcase %v: hash wrong, got %x, exp %x", desc, got, exp)
+ }
+ }
+}
+
+// BenchmarkHashing compares the speeds of hashing a rlp raw data directly
+// without the unmarshalling/marshalling step
+func BenchmarkHashing(b *testing.B) {
+ // Make a pretty fat block
+ var (
+ bodyRlp []byte
+ blockRlp []byte
+ )
+ {
+ block := getBlock(200, 2, 50)
+ bodyRlp, _ = rlp.EncodeToBytes(block.Body())
+ blockRlp, _ = rlp.EncodeToBytes(block)
+ }
+ var got common.Hash
+ var hasher = sha3.New256()
+ b.Run("iteratorhashing", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var hash common.Hash
+ it, err := rlp.NewListIterator(bodyRlp)
+ if err != nil {
+ b.Fatal(err)
+ }
+ it.Next()
+ txs := it.Value()
+ txIt, err := rlp.NewListIterator(txs)
+ if err != nil {
+ b.Fatal(err)
+ }
+ for txIt.Next() {
+ hasher.Reset()
+ hasher.Write(txIt.Value())
+ hasher.Sum(hash[:0])
+ got = hash
+ }
+ }
+ })
+ var exp common.Hash
+ b.Run("fullbodyhashing", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var body types.Body
+ rlp.DecodeBytes(bodyRlp, &body)
+ for _, tx := range body.Transactions {
+ exp = tx.Hash()
+ }
+ }
+ })
+ b.Run("fullblockhashing", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var block types.Block
+ rlp.DecodeBytes(blockRlp, &block)
+ for _, tx := range block.Transactions() {
+ tx.Hash()
+ }
+ }
+ })
+ if got != exp {
+ b.Fatalf("hash wrong, got %x exp %x", got, exp)
+ }
+}
diff --git a/core/state/dump.go b/core/state/dump.go
index a645bbd51..7901de93c 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -27,6 +27,14 @@ import (
"github.com/core-coin/go-core/trie"
)
+// DumpCollector interface which the state trie calls during iteration
+type DumpCollector interface {
+ // OnRoot is called with the state root
+ OnRoot(common.Hash)
+ // OnAccount is called once for each account in the trie
+ OnAccount(common.Address, DumpAccount)
+}
+
// DumpAccount represents an account in the state.
type DumpAccount struct {
Balance string `json:"balance"`
@@ -37,7 +45,6 @@ type DumpAccount struct {
Storage map[common.Hash]string `json:"storage,omitempty"`
Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode
SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key
-
}
// Dump represents the full dump in a collected format, as one large map.
@@ -46,9 +53,14 @@ type Dump struct {
Accounts map[common.Address]DumpAccount `json:"accounts"`
}
-// iterativeDump is a 'collector'-implementation which dump output line-by-line iteratively.
-type iterativeDump struct {
- *json.Encoder
+// OnRoot implements DumpCollector interface
+func (d *Dump) OnRoot(root common.Hash) {
+ d.Root = fmt.Sprintf("%x", root)
+}
+
+// OnAccount implements DumpCollector interface
+func (d *Dump) OnAccount(addr common.Address, account DumpAccount) {
+ d.Accounts[addr] = account
}
// IteratorDump is an implementation for iterating over data.
@@ -58,29 +70,23 @@ type IteratorDump struct {
Next []byte `json:"next,omitempty"` // nil if no more accounts
}
-// Collector interface which the state trie calls during iteration
-type collector interface {
- onRoot(common.Hash)
- onAccount(common.Address, DumpAccount)
-}
-
-func (d *Dump) onRoot(root common.Hash) {
+// OnRoot implements DumpCollector interface
+func (d *IteratorDump) OnRoot(root common.Hash) {
d.Root = fmt.Sprintf("%x", root)
}
-func (d *Dump) onAccount(addr common.Address, account DumpAccount) {
+// OnAccount implements DumpCollector interface
+func (d *IteratorDump) OnAccount(addr common.Address, account DumpAccount) {
d.Accounts[addr] = account
}
-func (d *IteratorDump) onRoot(root common.Hash) {
- d.Root = fmt.Sprintf("%x", root)
-}
-
-func (d *IteratorDump) onAccount(addr common.Address, account DumpAccount) {
- d.Accounts[addr] = account
+// iterativeDump is a DumpCollector-implementation which dumps output line-by-line iteratively.
+type iterativeDump struct {
+ *json.Encoder
}
-func (d iterativeDump) onAccount(addr common.Address, account DumpAccount) {
+// OnAccount implements DumpCollector interface
+func (d iterativeDump) OnAccount(addr common.Address, account DumpAccount) {
dumpAccount := &DumpAccount{
Balance: account.Balance,
Nonce: account.Nonce,
@@ -97,16 +103,16 @@ func (d iterativeDump) onAccount(addr common.Address, account DumpAccount) {
d.Encode(dumpAccount)
}
-func (d iterativeDump) onRoot(root common.Hash) {
+// OnRoot implements DumpCollector interface
+func (d iterativeDump) OnRoot(root common.Hash) {
d.Encode(struct {
Root common.Hash `json:"root"`
}{root})
}
-func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) (nextKey []byte) {
- emptyAddress := (common.Address{})
+func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) (nextKey []byte) {
missingPreimages := 0
- c.onRoot(s.trie.Hash())
+ c.OnRoot(s.trie.Hash())
var count int
it := trie.NewIterator(s.trie.NodeIterator(start))
@@ -115,15 +121,14 @@ func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingP
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
panic(err)
}
- addr := common.BytesToAddress(s.trie.GetKey(it.Key))
- obj := newObject(nil, addr, data)
account := DumpAccount{
Balance: data.Balance.String(),
Nonce: data.Nonce,
Root: common.Bytes2Hex(data.Root[:]),
CodeHash: common.Bytes2Hex(data.CodeHash),
}
- if emptyAddress == addr {
+ addrBytes := s.trie.GetKey(it.Key)
+ if addrBytes == nil {
// Preimage missing
missingPreimages++
if excludeMissingPreimages {
@@ -131,6 +136,8 @@ func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingP
}
account.SecureKey = it.Key
}
+ addr := common.BytesToAddress(addrBytes)
+ obj := newObject(nil, addr, data)
if !excludeCode {
account.Code = common.Bytes2Hex(obj.Code(s.db))
}
@@ -146,7 +153,7 @@ func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingP
account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content)
}
}
- c.onAccount(addr, account)
+ c.OnAccount(addr, account)
count++
if maxResults > 0 && count >= maxResults {
if it.Next() {
@@ -167,7 +174,7 @@ func (s *StateDB) RawDump(excludeCode, excludeStorage, excludeMissingPreimages b
dump := &Dump{
Accounts: make(map[common.Address]DumpAccount),
}
- s.dump(dump, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0)
+ s.DumpToCollector(dump, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0)
return *dump
}
@@ -176,14 +183,14 @@ func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool
dump := s.RawDump(excludeCode, excludeStorage, excludeMissingPreimages)
json, err := json.MarshalIndent(dump, "", " ")
if err != nil {
- fmt.Println("dump err", err)
+ fmt.Println("Dump err", err)
}
return json
}
// IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout
func (s *StateDB) IterativeDump(excludeCode, excludeStorage, excludeMissingPreimages bool, output *json.Encoder) {
- s.dump(iterativeDump{output}, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0)
+ s.DumpToCollector(iterativeDump{output}, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0)
}
// IteratorDump dumps out a batch of accounts starts with the given start key
@@ -191,6 +198,6 @@ func (s *StateDB) IteratorDump(excludeCode, excludeStorage, excludeMissingPreima
iterator := &IteratorDump{
Accounts: make(map[common.Address]DumpAccount),
}
- iterator.Next = s.dump(iterator, excludeCode, excludeStorage, excludeMissingPreimages, start, maxResults)
+ iterator.Next = s.DumpToCollector(iterator, excludeCode, excludeStorage, excludeMissingPreimages, start, maxResults)
return *iterator
}
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index 5821b98d0..59a4c196b 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -55,7 +55,7 @@ func TestNodeIteratorCoverage(t *testing.T) {
t.Errorf("state entry not reported %x", hash)
}
}
- it := db.TrieDB().DiskDB().(xcbdb.Database).NewIterator()
+ it := db.TrieDB().DiskDB().(xcbdb.Database).NewIterator(nil, nil)
for it.Next() {
key := it.Key()
if bytes.HasPrefix(key, []byte("secure-key-")) {
diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go
index 9038e3402..9f942b7ba 100644
--- a/core/state/snapshot/difflayer_test.go
+++ b/core/state/snapshot/difflayer_test.go
@@ -131,7 +131,7 @@ func TestMergeDelete(t *testing.T) {
flipDrops := func() map[common.Hash]struct{} {
return map[common.Hash]struct{}{
- h2: struct{}{},
+ h2: {},
}
}
flipAccs := func() map[common.Hash][]byte {
@@ -141,7 +141,7 @@ func TestMergeDelete(t *testing.T) {
}
flopDrops := func() map[common.Hash]struct{} {
return map[common.Hash]struct{}{
- h1: struct{}{},
+ h1: {},
}
}
flopAccs := func() map[common.Hash][]byte {
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index 43bc104aa..854579187 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -18,6 +18,10 @@ package snapshot
import (
"bytes"
+ "github.com/core-coin/go-core/xcbdb"
+ "github.com/core-coin/go-core/xcbdb/leveldb"
+ "io/ioutil"
+ "os"
"testing"
"github.com/VictoriaMetrics/fastcache"
@@ -117,10 +121,10 @@ func TestDiskMerge(t *testing.T) {
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
- accDelNoCache: struct{}{},
- accDelCache: struct{}{},
- conNukeNoCache: struct{}{},
- conNukeCache: struct{}{},
+ accDelNoCache: {},
+ accDelCache: {},
+ conNukeNoCache: {},
+ conNukeCache: {},
}, map[common.Hash][]byte{
accModNoCache: reverse(accModNoCache[:]),
accModCache: reverse(accModCache[:]),
@@ -340,10 +344,10 @@ func TestDiskPartialMerge(t *testing.T) {
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
- accDelNoCache: struct{}{},
- accDelCache: struct{}{},
- conNukeNoCache: struct{}{},
- conNukeCache: struct{}{},
+ accDelNoCache: {},
+ accDelCache: {},
+ conNukeNoCache: {},
+ conNukeCache: {},
}, map[common.Hash][]byte{
accModNoCache: reverse(accModNoCache[:]),
accModCache: reverse(accModCache[:]),
@@ -432,4 +436,76 @@ func TestDiskPartialMerge(t *testing.T) {
// This test case is a tiny specialized case of TestDiskPartialMerge, which tests
// some very specific cornercases that random tests won't ever trigger.
func TestDiskMidAccountPartialMerge(t *testing.T) {
+ // TODO(@error2215) ?
+}
+
+// TestDiskSeek tests that seek-operations work on the disk layer
+func TestDiskSeek(t *testing.T) {
+ // Create some accounts in the disk layer
+ var db xcbdb.Database
+
+ if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil {
+ t.Fatal(err)
+ } else {
+ defer os.RemoveAll(dir)
+ diskdb, err := leveldb.New(dir, 256, 0, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ db = rawdb.NewDatabase(diskdb)
+ }
+ // Fill even keys [0,2,4...]
+ for i := 0; i < 0xff; i += 2 {
+ acc := common.Hash{byte(i)}
+ rawdb.WriteAccountSnapshot(db, acc, acc[:])
+ }
+ // Add an 'higher' key, with incorrect (higher) prefix
+ highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1}
+ db.Put(highKey, []byte{0xff, 0xff})
+
+ baseRoot := randomHash()
+ rawdb.WriteSnapshotRoot(db, baseRoot)
+
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ baseRoot: &diskLayer{
+ diskdb: db,
+ cache: fastcache.New(500 * 1024),
+ root: baseRoot,
+ },
+ },
+ }
+ // Test some different seek positions
+ type testcase struct {
+ pos byte
+ expkey byte
+ }
+ var cases = []testcase{
+ {0xff, 0x55}, // this should exit immediately without checking key
+ {0x01, 0x02},
+ {0xfe, 0xfe},
+ {0xfd, 0xfe},
+ {0x00, 0x00},
+ }
+ for i, tc := range cases {
+ it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos})
+ if err != nil {
+ t.Fatalf("case %d, error: %v", i, err)
+ }
+ count := 0
+ for it.Next() {
+ k, v, err := it.Hash()[0], it.Account()[0], it.Error()
+ if err != nil {
+ t.Fatalf("test %d, item %d, error: %v", i, count, err)
+ }
+ // First item in iterator should have the expected key
+ if count == 0 && k != tc.expkey {
+ t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey)
+ }
+ count++
+ if v != k {
+ t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k)
+ }
+ }
+ }
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index c9fa36548..d5aff2930 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -37,7 +37,7 @@ var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- // emptyCode is the known hash of the empty EVM bytecode.
+ // emptyCode is the known hash of the empty CVM bytecode.
emptyCode = crypto.SHA3Hash(nil)
)
diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go
index 5ca376fde..8308b6584 100644
--- a/core/state/snapshot/iterator.go
+++ b/core/state/snapshot/iterator.go
@@ -148,10 +148,10 @@ type diskAccountIterator struct {
// AccountIterator creates an account iterator over a disk layer.
func (dl *diskLayer) AccountIterator(seek common.Hash) AccountIterator {
- // TODO: Fix seek position, or remove seek parameter
+ pos := common.TrimRightZeroes(seek[:])
return &diskAccountIterator{
layer: dl,
- it: dl.diskdb.NewIteratorWithPrefix(rawdb.SnapshotAccountPrefix),
+ it: dl.diskdb.NewIterator(rawdb.SnapshotAccountPrefix, pos),
}
}
diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go
index 53d56b117..13ba67743 100644
--- a/core/state/snapshot/iterator_test.go
+++ b/core/state/snapshot/iterator_test.go
@@ -402,7 +402,7 @@ func TestIteratorDeletions(t *testing.T) {
deleted := common.HexToHash("0x22")
destructed := map[common.Hash]struct{}{
- deleted: struct{}{},
+ deleted: {},
}
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
destructed, randomAccountSet("0x11", "0x33"), nil)
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index e2bb50928..77f6eb855 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -140,7 +140,7 @@ type snapshot interface {
AccountIterator(seek common.Hash) AccountIterator
}
-// SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
+// SnapshotTree is a Core state snapshot tree. It consists of one persistent
// base layer backed by a key-value store, on top of which arbitrarily many in-
// memory diff layers are topped. The memory diffs can form a tree with branching,
// but the disk layer is singleton and common to all. If a reorg goes deeper than
diff --git a/core/state/snapshot/wipe.go b/core/state/snapshot/wipe.go
index 81e0609a7..e099f10f3 100644
--- a/core/state/snapshot/wipe.go
+++ b/core/state/snapshot/wipe.go
@@ -92,7 +92,7 @@ func wipeKeyRange(db xcbdb.KeyValueStore, kind string, prefix []byte, keylen int
// Iterate over the key-range and delete all of them
start, logged := time.Now(), time.Now()
- it := db.NewIteratorWithStart(prefix)
+ it := db.NewIterator(prefix, nil)
for it.Next() {
// Skip any keys with the correct prefix but wrong lenth (trie nodes)
key := it.Key()
@@ -113,7 +113,8 @@ func wipeKeyRange(db xcbdb.KeyValueStore, kind string, prefix []byte, keylen int
return err
}
batch.Reset()
- it = db.NewIteratorWithStart(key)
+ seekPos := key[len(prefix):]
+ it = db.NewIterator(prefix, seekPos)
if time.Since(logged) > 8*time.Second {
log.Info("Deleting state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start)))
diff --git a/core/state/snapshot/wipe_test.go b/core/state/snapshot/wipe_test.go
index 11b75cfcb..62413f216 100644
--- a/core/state/snapshot/wipe_test.go
+++ b/core/state/snapshot/wipe_test.go
@@ -60,7 +60,7 @@ func TestWipe(t *testing.T) {
// Sanity check that all the keys are present
var items int
- it := db.NewIteratorWithPrefix(rawdb.SnapshotAccountPrefix)
+ it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
defer it.Release()
for it.Next() {
@@ -69,7 +69,7 @@ func TestWipe(t *testing.T) {
items++
}
}
- it = db.NewIteratorWithPrefix(rawdb.SnapshotStoragePrefix)
+ it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
defer it.Release()
for it.Next() {
@@ -88,7 +88,7 @@ func TestWipe(t *testing.T) {
<-wipeSnapshot(db, true)
// Iterate over the database end ensure no snapshot information remains
- it = db.NewIteratorWithPrefix(rawdb.SnapshotAccountPrefix)
+ it = db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
defer it.Release()
for it.Next() {
@@ -97,7 +97,7 @@ func TestWipe(t *testing.T) {
t.Errorf("snapshot entry remained after wipe: %x", key)
}
}
- it = db.NewIteratorWithPrefix(rawdb.SnapshotStoragePrefix)
+ it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
defer it.Release()
for it.Next() {
@@ -112,7 +112,7 @@ func TestWipe(t *testing.T) {
// Iterate over the database and ensure miscellaneous items are present
items = 0
- it = db.NewIterator()
+ it = db.NewIterator(nil, nil)
defer it.Release()
for it.Next() {
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 23984a1c0..c0a38fcfe 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -58,7 +58,7 @@ func TestDump(t *testing.T) {
s.state.updateStateObject(obj2)
s.state.Commit(false)
- // check that dump contains the state objects that are in trie
+ // check that DumpToCollector contains the state objects that are in trie
got := string(s.state.Dump(false, false, true))
want := `{
"root": "4a6ee6ee61e1ba178e184b74f0b7ea7d16869ef48e17f81a961cf5f58a867b4a",
@@ -85,7 +85,7 @@ func TestDump(t *testing.T) {
}
}`
if got != want {
- t.Errorf("dump mismatch:\ngot: %s\nwant: %s\n", got, want)
+ t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want)
}
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 9d6f69b7b..4076791b8 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -506,7 +506,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
// If no live objects are available, attempt to use snapshots
var (
- data Account
+ data *Account
err error
)
if s.snap != nil {
@@ -518,11 +518,15 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if acc == nil {
return nil
}
- data.Nonce, data.Balance, data.CodeHash = acc.Nonce, acc.Balance, acc.CodeHash
+ data = &Account{
+ Nonce: acc.Nonce,
+ Balance: acc.Balance,
+ CodeHash: acc.CodeHash,
+ Root: common.BytesToHash(acc.Root),
+ }
if len(data.CodeHash) == 0 {
data.CodeHash = emptyCodeHash
}
- data.Root = common.BytesToHash(acc.Root)
if data.Root == (common.Hash{}) {
data.Root = emptyRoot
}
@@ -538,13 +542,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
s.setError(err)
return nil
}
- if err := rlp.DecodeBytes(enc, &data); err != nil {
+ data = new(Account)
+ if err := rlp.DecodeBytes(enc, data); err != nil {
log.Error("Failed to decode state object", "addr", addr, "err", err)
return nil
}
}
// Insert into the live set
- obj := newObject(s, addr, data)
+ obj := newObject(s, addr, *data)
s.setStateObject(obj)
return obj
}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 6e542aa0e..c55946fec 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -60,7 +60,7 @@ func TestUpdateLeaks(t *testing.T) {
}
// Ensure that no data was leaked into the database
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
for it.Next() {
t.Errorf("State leaked into database: %x -> %x", it.Key(), it.Value())
}
@@ -118,7 +118,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
}
- it := finalDb.NewIterator()
+ it := finalDb.NewIterator(nil, nil)
for it.Next() {
key, fvalue := it.Key(), it.Value()
tvalue, err := transDb.Get(key)
@@ -131,7 +131,7 @@ func TestIntermediateLeaks(t *testing.T) {
}
it.Release()
- it = transDb.NewIterator()
+ it = transDb.NewIterator(nil, nil)
for it.Next() {
key, tvalue := it.Key(), it.Value()
fvalue, err := finalDb.Get(key)
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
index d9b71507b..ab7cf569f 100644
--- a/core/state_prefetcher.go
+++ b/core/state_prefetcher.go
@@ -81,6 +81,6 @@ func precacheTransaction(config *params.ChainConfig, bc ChainContext, author *co
context := NewCVMContext(msg, header, bc, author)
vm := vm.NewCVM(context, statedb, config, cfg)
- _, _, _, err = ApplyMessage(vm, msg, energypool)
+ _, err = ApplyMessage(vm, msg, energypool)
return err
}
diff --git a/core/state_processor.go b/core/state_processor.go
index 43c7477a7..eb6144233 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -91,20 +91,20 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// about the transaction and calling mechanisms.
vmenv := vm.NewCVM(context, statedb, config, cfg)
// Apply the transaction to the current state (included in the env)
- _, energy, failed, err := ApplyMessage(vmenv, msg, gp)
+ result, err := ApplyMessage(vmenv, msg, gp)
if err != nil {
return nil, err
}
// Update the state with pending changes
var root []byte
statedb.Finalise(true)
- *usedEnergy += energy
+ *usedEnergy += result.UsedEnergy
// Create a new receipt for the transaction, storing the intermediate root and energy used by the tx
// based on the cip phase, we're passing whether the root touch-delete accounts.
- receipt := types.NewReceipt(root, failed, *usedEnergy)
+ receipt := types.NewReceipt(root, result.Failed(), *usedEnergy)
receipt.TxHash = tx.Hash()
- receipt.EnergyUsed = energy
+ receipt.EnergyUsed = result.UsedEnergy
// if the transaction created a contract, store the creation address in the receipt.
if msg.To() == nil {
receipt.ContractAddress = crypto.CreateAddress(vmenv.Context.Origin, tx.Nonce())
diff --git a/core/state_transition.go b/core/state_transition.go
index 0848e9e7f..28305657c 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -17,20 +17,14 @@
package core
import (
- "errors"
"math"
"math/big"
"github.com/core-coin/go-core/common"
"github.com/core-coin/go-core/core/vm"
- "github.com/core-coin/go-core/log"
"github.com/core-coin/go-core/params"
)
-var (
- errInsufficientBalanceForEnergy = errors.New("insufficient balance to pay for energy")
-)
-
/*
The State Transitioning Model
@@ -74,6 +68,41 @@ type Message interface {
Data() []byte
}
+// ExecutionResult includes all output after executing given evm
+// message no matter the execution itself is successful or not.
+type ExecutionResult struct {
+ UsedEnergy uint64 // Total used energy but include the refunded energy
+ Err error // Any error encountered during the execution(listed in core/vm/errors.go)
+ ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode)
+}
+
+// Unwrap returns the internal evm error which allows us for further
+// analysis outside.
+func (result *ExecutionResult) Unwrap() error {
+ return result.Err
+}
+
+// Failed returns the indicator whether the execution is successful or not
+func (result *ExecutionResult) Failed() bool { return result.Err != nil }
+
+// Return is a helper function to help caller distinguish between revert reason
+// and function return. Return returns the data after execution if no error occurs.
+func (result *ExecutionResult) Return() []byte {
+ if result.Err != nil {
+ return nil
+ }
+ return common.CopyBytes(result.ReturnData)
+}
+
+// Revert returns the concrete revert reason if the execution is aborted by `REVERT`
+// opcode. Note the reason can be nil if no data supplied with revert opcode.
+func (result *ExecutionResult) Revert() []byte {
+ if result.Err != vm.ErrExecutionReverted {
+ return nil
+ }
+ return common.CopyBytes(result.ReturnData)
+}
+
// IntrinsicEnergy computes the 'intrinsic energy' for a message with the given data.
func IntrinsicEnergy(data []byte, contractCreation bool) (uint64, error) {
// Set the starting energy for the raw transaction
@@ -95,13 +124,13 @@ func IntrinsicEnergy(data []byte, contractCreation bool) (uint64, error) {
// Make sure we don't exceed uint64 for all data combinations
nonZeroEnergy := params.TxDataNonZeroEnergy
if (math.MaxUint64-energy)/nonZeroEnergy < nz {
- return 0, vm.ErrOutOfEnergy
+ return 0, ErrEnergyUintOverflow
}
energy += nz * nonZeroEnergy
z := uint64(len(data)) - nz
if (math.MaxUint64-energy)/params.TxDataZeroEnergy < z {
- return 0, vm.ErrOutOfEnergy
+ return 0, ErrEnergyUintOverflow
}
energy += z * params.TxDataZeroEnergy
}
@@ -128,7 +157,7 @@ func NewStateTransition(cvm *vm.CVM, msg Message, gp *EnergyPool) *StateTransiti
// the energy used (which includes energy refunds) and an error if it failed. An error always
// indicates a core error meaning that the message would always fail for that particular
// state and would never be accepted within a block.
-func ApplyMessage(cvm *vm.CVM, msg Message, gp *EnergyPool) ([]byte, uint64, bool, error) {
+func ApplyMessage(cvm *vm.CVM, msg Message, gp *EnergyPool) (*ExecutionResult, error) {
return NewStateTransition(cvm, msg, gp).TransitionDb()
}
@@ -140,19 +169,10 @@ func (st *StateTransition) to() common.Address {
return *st.msg.To()
}
-func (st *StateTransition) useEnergy(amount uint64) error {
- if st.energy < amount {
- return vm.ErrOutOfEnergy
- }
- st.energy -= amount
-
- return nil
-}
-
func (st *StateTransition) buyEnergy() error {
mgval := new(big.Int).Mul(new(big.Int).SetUint64(st.msg.Energy()), st.energyPrice)
if st.state.GetBalance(st.msg.From()).Cmp(mgval) < 0 {
- return errInsufficientBalanceForEnergy
+ return ErrInsufficientFunds
}
if err := st.gp.SubEnergy(st.msg.Energy()); err != nil {
return err
@@ -178,52 +198,70 @@ func (st *StateTransition) preCheck() error {
}
// TransitionDb will transition the state by applying the current message and
-// returning the result including the used energy. It returns an error if failed.
-// An error indicates a consensus issue.
-func (st *StateTransition) TransitionDb() (ret []byte, usedEnergy uint64, failed bool, err error) {
- if err = st.preCheck(); err != nil {
- return
+// returning the evm execution result with following fields.
+//
+// - used energy:
+// total energy used (including energy being refunded)
+// - returndata:
+// the returned data from cvm
+// - concrete execution error:
+// various **CVM** error which aborts the execution,
+// e.g. ErrOutOfEnergy, ErrExecutionReverted
+//
+// However if any consensus issue encountered, return the error directly with
+// nil evm execution result.
+func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
+ // First check this message satisfies all consensus rules before
+ // applying the message. The rules include these clauses
+ //
+ // 1. the nonce of the message caller is correct
+ // 2. caller has enough balance to cover transaction fee(energylimit * energyprice)
+ // 3. the amount of energy required is available in the block
+ // 4. the purchased energy is enough to cover intrinsic usage
+ // 5. there is no overflow when calculating intrinsic energy
+ // 6. caller has enough balance to cover asset transfer for **topmost** call
+
+ // Check clauses 1-3, buy energy if everything is correct
+ if err := st.preCheck(); err != nil {
+ return nil, err
}
msg := st.msg
sender := vm.AccountRef(msg.From())
contractCreation := msg.To() == nil
- // Pay intrinsic energy
+ // Check clauses 4-5, subtract intrinsic energy if everything is correct
energy, err := IntrinsicEnergy(st.data, contractCreation)
if err != nil {
- return nil, 0, false, err
+ return nil, err
}
- if err = st.useEnergy(energy); err != nil {
- return nil, 0, false, err
+ if st.energy < energy {
+ return nil, ErrIntrinsicEnergy
}
+ st.energy -= energy
+ // Check clause 6
+ if msg.Value().Sign() > 0 && !st.cvm.CanTransfer(st.state, msg.From(), msg.Value()) {
+ return nil, ErrInsufficientFundsForTransfer
+ }
var (
- cvm = st.cvm
- // vm errors do not effect consensus and are therefor
- // not assigned to err, except for insufficient balance
- // error.
- vmerr error
+ ret []byte
+ vmerr error // vm errors do not effect consensus and are therefore not assigned to err
)
if contractCreation {
- ret, _, st.energy, vmerr = cvm.Create(sender, st.data, st.energy, st.value)
+ ret, _, st.energy, vmerr = st.cvm.Create(sender, st.data, st.energy, st.value)
} else {
// Increment the nonce for the next transaction
st.state.SetNonce(msg.From(), st.state.GetNonce(sender.Address())+1)
- ret, st.energy, vmerr = cvm.Call(sender, st.to(), st.data, st.energy, st.value)
- }
- if vmerr != nil {
- log.Debug("VM returned with error", "err", vmerr)
- // The only possible consensus-error would be if there wasn't
- // sufficient balance to make the transfer happen. The first
- // balance transfer may never fail.
- if vmerr == vm.ErrInsufficientBalance {
- return nil, 0, false, vmerr
- }
+ ret, st.energy, vmerr = st.cvm.Call(sender, st.to(), st.data, st.energy, st.value)
}
st.refundEnergy()
st.state.AddBalance(st.cvm.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.energyUsed()), st.energyPrice))
- return ret, st.energyUsed(), vmerr != nil, err
+ return &ExecutionResult{
+ UsedEnergy: st.energyUsed(),
+ Err: vmerr,
+ ReturnData: ret,
+ }, nil
}
func (st *StateTransition) refundEnergy() {
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 976cd7274..507c564d9 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -59,10 +59,6 @@ var (
// ErrInvalidSender is returned if the transaction contains an invalid signature.
ErrInvalidSender = errors.New("invalid sender")
- // ErrNonceTooLow is returned if the nonce of a transaction is lower than the
- // one present in the local chain.
- ErrNonceTooLow = errors.New("nonce too low")
-
// ErrUnderpriced is returned if a transaction's energy price is below the minimum
// configured for the transaction pool.
ErrUnderpriced = errors.New("transaction underpriced")
@@ -71,14 +67,6 @@ var (
// with a different one without the required price bump.
ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
- // ErrInsufficientFunds is returned if the total cost of executing a transaction
- // is higher than the balance of the user's account.
- ErrInsufficientFunds = errors.New("insufficient funds for energy * price + value")
-
- // ErrIntrinsicEnergy is returned if the transaction is specified to use less energy
- // than required to start the invocation.
- ErrIntrinsicEnergy = errors.New("intrinsic energy too low")
-
// ErrEnergyLimit is returned if a transaction's requested energy limit exceeds the
// maximum allowance of the current block.
ErrEnergyLimit = errors.New("exceeds block energy limit")
diff --git a/core/types/block.go b/core/types/block.go
index 5d2af1c3f..aeed11d71 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -134,6 +134,17 @@ func rlpHash(x interface{}) (h common.Hash) {
return h
}
+// EmptyBody returns true if there is no additional 'body' to complete the header
+// that is: no transactions and no uncles.
+func (h *Header) EmptyBody() bool {
+ return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash
+}
+
+// EmptyReceipts returns true if there are no receipts for this header/block.
+func (h *Header) EmptyReceipts() bool {
+ return h.ReceiptHash == EmptyRootHash
+}
+
// Body is a simple (mutable, non-safe) data container for storing and moving
// a block's data contents (transactions and uncles) together.
type Body struct {
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index dd61b47b9..42f2151ac 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -218,9 +218,9 @@ func TestTransactionTimeSort(t *testing.T) {
fromNext, _ := Sender(signer, next)
if txi.EnergyPrice().Cmp(next.EnergyPrice()) < 0 {
- t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.EnergyPrice(), i+1, fromNext[:4], next.EnergyPrice())
+ t.Errorf("invalid energyprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.EnergyPrice(), i+1, fromNext[:4], next.EnergyPrice())
}
- // Make sure time order is ascending if the txs have the same gas price
+ // Make sure time order is ascending if the txs have the same energy price
if txi.EnergyPrice().Cmp(next.EnergyPrice()) == 0 && txi.time.After(next.time) {
t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.time, i+1, fromNext[:4], next.time)
}
diff --git a/core/vm/common.go b/core/vm/common.go
index fbc8866f5..8a9678293 100644
--- a/core/vm/common.go
+++ b/core/vm/common.go
@@ -17,7 +17,7 @@
package vm
import (
- "math/big"
+ "github.com/core-coin/uint256"
"github.com/core-coin/go-core/common"
"github.com/core-coin/go-core/common/math"
@@ -25,7 +25,7 @@ import (
// calcMemSize64 calculates the required memory size, and returns
// the size and whether the result overflowed uint64
-func calcMemSize64(off, l *big.Int) (uint64, bool) {
+func calcMemSize64(off, l *uint256.Int) (uint64, bool) {
if !l.IsUint64() {
return 0, true
}
@@ -35,16 +35,16 @@ func calcMemSize64(off, l *big.Int) (uint64, bool) {
// calcMemSize64WithUint calculates the required memory size, and returns
// the size and whether the result overflowed uint64
// Identical to calcMemSize64, but length is a uint64
-func calcMemSize64WithUint(off *big.Int, length64 uint64) (uint64, bool) {
+func calcMemSize64WithUint(off *uint256.Int, length64 uint64) (uint64, bool) {
// if length is zero, memsize is always zero, regardless of offset
if length64 == 0 {
return 0, false
}
// Check that offset doesn't overflow
- if !off.IsUint64() {
+ offset64, overflow := off.Uint64WithOverflow()
+ if overflow {
return 0, true
}
- offset64 := off.Uint64()
val := offset64 + length64
// if value < either of it's parts, then it overflowed
return val, val < offset64
@@ -64,22 +64,6 @@ func getData(data []byte, start uint64, size uint64) []byte {
return common.RightPadBytes(data[start:end], int(size))
}
-// getDataBig returns a slice from the data based on the start and size and pads
-// up to size with zero's. This function is overflow safe.
-func getDataBig(data []byte, start *big.Int, size *big.Int) []byte {
- dlen := big.NewInt(int64(len(data)))
-
- s := math.BigMin(start, dlen)
- e := math.BigMin(new(big.Int).Add(s, size), dlen)
- return common.RightPadBytes(data[s.Uint64():e.Uint64()], int(size.Uint64()))
-}
-
-// bigUint64 returns the integer casted to a uint64 and returns whether it
-// overflowed in the process.
-func bigUint64(v *big.Int) (uint64, bool) {
- return v.Uint64(), !v.IsUint64()
-}
-
// toWordSize returns the ceiled word size required for memory expansion.
func toWordSize(size uint64) uint64 {
if size > math.MaxUint64-31 {
@@ -88,12 +72,3 @@ func toWordSize(size uint64) uint64 {
return (size + 31) / 32
}
-
-func allZero(b []byte) bool {
- for _, byte := range b {
- if byte != 0 {
- return false
- }
- }
- return true
-}
diff --git a/core/vm/contract.go b/core/vm/contract.go
index f18f77567..64c7c92cc 100644
--- a/core/vm/contract.go
+++ b/core/vm/contract.go
@@ -17,6 +17,7 @@
package vm
import (
+ "github.com/core-coin/uint256"
"math/big"
"github.com/core-coin/go-core/common"
@@ -57,8 +58,8 @@ type Contract struct {
CodeAddr *common.Address
Input []byte
- Energy uint64
- value *big.Int
+ Energy uint64
+ value *big.Int
}
// NewContract returns a new contract environment for the execution of CVM.
@@ -81,17 +82,36 @@ func NewContract(caller ContractRef, object ContractRef, value *big.Int, energy
return c
}
-func (c *Contract) validJumpdest(dest *big.Int) bool {
- udest := dest.Uint64()
- // PC cannot go beyond len(code) and certainly can't be bigger than 63bits.
+func (c *Contract) validJumpdest(dest *uint256.Int) bool {
+ udest, overflow := dest.Uint64WithOverflow()
+ // PC cannot go beyond len(code) and certainly can't be bigger than 63 bits.
// Don't bother checking for JUMPDEST in that case.
- if dest.BitLen() >= 63 || udest >= uint64(len(c.Code)) {
+ if overflow || udest >= uint64(len(c.Code)) {
return false
}
// Only JUMPDESTs allowed for destinations
if OpCode(c.Code[udest]) != JUMPDEST {
return false
}
+ return c.isCode(udest)
+}
+
+func (c *Contract) validJumpSubdest(udest uint64) bool {
+ // PC cannot go beyond len(code) and certainly can't be bigger than 63 bits.
+ // Don't bother checking for BEGINSUB in that case.
+ if int64(udest) < 0 || udest >= uint64(len(c.Code)) {
+ return false
+ }
+ // Only BEGINSUBs allowed for destinations
+ if OpCode(c.Code[udest]) != BEGINSUB {
+ return false
+ }
+ return c.isCode(udest)
+}
+
+// isCode returns true if the provided PC location is an actual opcode, as
+// opposed to a data-segment following a PUSHN operation.
+func (c *Contract) isCode(udest uint64) bool {
// Do we have a contract hash already?
if c.CodeHash != (common.Hash{}) {
// Does parent context have the analysis?
@@ -102,6 +122,8 @@ func (c *Contract) validJumpdest(dest *big.Int) bool {
analysis = codeBitmap(c.Code)
c.jumpdests[c.CodeHash] = analysis
}
+ // Also stash it in current contract for faster access
+ c.analysis = analysis
return analysis.codeSegment(udest)
}
// We don't have the code hash, most likely a piece of initcode not already
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 767af2c06..72c11a193 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -56,12 +56,18 @@ var PrecompiledContracts = map[common.Address]PrecompiledContract{
}
// RunPrecompiledContract runs and evaluates the output of a precompiled contract.
-func RunPrecompiledContract(p PrecompiledContract, input []byte, contract *Contract) (ret []byte, err error) {
- energy := p.RequiredEnergy(input)
- if contract.UseEnergy(energy) {
- return p.Run(input)
+// It returns
+// - the returned bytes,
+// - the _remaining_ energy,
+// - any error that occurred
+func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedEnergy uint64) (ret []byte, remainingEnergy uint64, err error) {
+ energyCost := p.RequiredEnergy(input)
+ if suppliedEnergy < energyCost {
+ return nil, 0, ErrOutOfEnergy
}
- return nil, ErrOutOfEnergy
+ suppliedEnergy -= energyCost
+ output, err := p.Run(input)
+ return output, suppliedEnergy, err
}
// ECRECOVER implemented as a native contract.
@@ -136,6 +142,7 @@ func (c *dataCopy) Run(in []byte) ([]byte, error) {
type bigModExp struct{}
var (
+ big0 = big.NewInt(0)
big1 = big.NewInt(1)
big4 = big.NewInt(4)
big8 = big.NewInt(8)
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index 3846c99b0..a9ef4a1a6 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -19,7 +19,6 @@ package vm
import (
"bytes"
"fmt"
- "math/big"
"reflect"
"testing"
@@ -405,14 +404,9 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) {
}
p := PrecompiledContracts[address]
in := common.Hex2Bytes(test.input)
- testAddr, err := common.HexToAddress("cb390000000000000000000000000000000000001337")
- if err != nil {
- t.Error(err)
- }
- contract := NewContract(AccountRef(testAddr),
- nil, new(big.Int), p.RequiredEnergy(in))
- t.Run(fmt.Sprintf("%s-Energy=%d", test.name, contract.Energy), func(t *testing.T) {
- if res, err := RunPrecompiledContract(p, in, contract); err != nil {
+ energy := p.RequiredEnergy(in)
+ t.Run(fmt.Sprintf("%s-Energy=%d", test.name, energy), func(t *testing.T) {
+ if res, _, err := RunPrecompiledContract(p, in, energy); err != nil {
t.Error(err)
} else if common.Bytes2Hex(res) != test.expected {
t.Errorf("Expected %v, got %v", test.expected, common.Bytes2Hex(res))
@@ -432,14 +426,10 @@ func testPrecompiledOOG(addr string, test precompiledTest, t *testing.T) {
}
p := PrecompiledContracts[address]
in := common.Hex2Bytes(test.input)
- testAddr, err := common.HexToAddress("cb390000000000000000000000000000000000001337")
- if err != nil {
- t.Error(err)
- }
- contract := NewContract(AccountRef(testAddr),
- nil, new(big.Int), p.RequiredEnergy(in)-1)
- t.Run(fmt.Sprintf("%s-Energy=%d", test.name, contract.Energy), func(t *testing.T) {
- _, err := RunPrecompiledContract(p, in, contract)
+ energy := p.RequiredEnergy(in) - 1
+
+ t.Run(fmt.Sprintf("%s-Energy=%d", test.name, energy), func(t *testing.T) {
+ _, _, err := RunPrecompiledContract(p, in, energy)
if err.Error() != "out of energy" {
t.Errorf("Expected error [out of energy], got [%v]", err)
}
@@ -458,15 +448,9 @@ func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing
}
p := PrecompiledContracts[address]
in := common.Hex2Bytes(test.input)
- testAddr, err := common.HexToAddress("cb860000000000000000000000000000000000031337")
- if err != nil {
- t.Error(err)
- }
- contract := NewContract(AccountRef(testAddr),
- nil, new(big.Int), p.RequiredEnergy(in))
-
+ energy := p.RequiredEnergy(in)
t.Run(test.name, func(t *testing.T) {
- _, err := RunPrecompiledContract(p, in, contract)
+ _, _, err := RunPrecompiledContract(p, in, energy)
if !reflect.DeepEqual(err, test.expectedError) {
t.Errorf("Expected error [%v], got [%v]", test.expectedError, err)
}
@@ -489,24 +473,16 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) {
p := PrecompiledContracts[address]
in := common.Hex2Bytes(test.input)
reqEnergy := p.RequiredEnergy(in)
- testAddr, err := common.HexToAddress("cb860000000000000000000000000000000000031337")
- if err != nil {
- bench.Error(err)
- }
- contract := NewContract(AccountRef(testAddr),
- nil, new(big.Int), reqEnergy)
-
var (
res []byte
data = make([]byte, len(in))
)
- bench.Run(fmt.Sprintf("%s-Energy=%d", test.name, contract.Energy), func(bench *testing.B) {
+ bench.Run(fmt.Sprintf("%s-Energy=%d", test.name, reqEnergy), func(bench *testing.B) {
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
- contract.Energy = reqEnergy
copy(data, in)
- res, err = RunPrecompiledContract(p, data, contract)
+ res, _, err = RunPrecompiledContract(p, data, reqEnergy)
}
bench.StopTimer()
//Check if it is correct
diff --git a/core/vm/cvm.go b/core/vm/cvm.go
index 765facda4..5655bccce 100644
--- a/core/vm/cvm.go
+++ b/core/vm/cvm.go
@@ -17,6 +17,8 @@
package vm
import (
+ "errors"
+ "github.com/core-coin/uint256"
"math/big"
"sync/atomic"
"time"
@@ -40,14 +42,15 @@ type (
GetHashFunc func(uint64) common.Hash
)
+func (cvm *CVM) precompile(addr common.Address) (PrecompiledContract, bool) {
+ var precompiles map[common.Address]PrecompiledContract
+ precompiles = PrecompiledContracts
+ p, ok := precompiles[addr]
+ return p, ok
+}
+
// run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter.
func run(cvm *CVM, contract *Contract, input []byte, readOnly bool) ([]byte, error) {
- if contract.CodeAddr != nil {
- precompiles := PrecompiledContracts
- if p := precompiles[*contract.CodeAddr]; p != nil {
- return RunPrecompiledContract(p, input, contract)
- }
- }
for _, interpreter := range cvm.interpreters {
if interpreter.CanRun(contract.Code) {
if cvm.interpreter != interpreter {
@@ -61,7 +64,7 @@ func run(cvm *CVM, contract *Contract, input []byte, readOnly bool) ([]byte, err
return interpreter.Run(contract, input, readOnly)
}
}
- return nil, ErrNoCompatibleInterpreter
+ return nil, errors.New("no compatible interpreter")
}
// Context provides the CVM with auxiliary information. Once provided
@@ -76,12 +79,12 @@ type Context struct {
GetHash GetHashFunc
// Message information
- Origin common.Address // Provides information for ORIGIN
+ Origin common.Address // Provides information for ORIGIN
EnergyPrice *big.Int // Provides information for ENERGYPRICE
// Block information
Coinbase common.Address // Provides information for COINBASE
- EnergyLimit uint64 // Provides information for ENERGYLIMIT
+ EnergyLimit uint64 // Provides information for ENERGYLIMIT
BlockNumber *big.Int // Provides information for NUMBER
Time *big.Int // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY
@@ -190,17 +193,15 @@ func (cvm *CVM) Call(caller ContractRef, addr common.Address, input []byte, ener
return nil, energy, ErrDepth
}
// Fail if we're trying to transfer more than the available balance
- if !cvm.Context.CanTransfer(cvm.StateDB, caller.Address(), value) {
+ if value.Sign() != 0 && !cvm.Context.CanTransfer(cvm.StateDB, caller.Address(), value) {
return nil, energy, ErrInsufficientBalance
}
- var (
- to = AccountRef(addr)
- snapshot = cvm.StateDB.Snapshot()
- )
+ snapshot := cvm.StateDB.Snapshot()
+ p, isPrecompile := cvm.precompile(addr)
+
if !cvm.StateDB.Exist(addr) {
- precompiles := PrecompiledContracts
- if precompiles[addr] == nil && value.Sign() == 0 {
+ if !isPrecompile && value.Sign() == 0 {
// Calling a non existing account, don't do anything, but ping the tracer
if cvm.vmConfig.Debug && cvm.depth == 0 {
cvm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, energy, value)
@@ -210,34 +211,47 @@ func (cvm *CVM) Call(caller ContractRef, addr common.Address, input []byte, ener
}
cvm.StateDB.CreateAccount(addr)
}
- cvm.Transfer(cvm.StateDB, caller.Address(), to.Address(), value)
- // Initialise a new contract and set the code that is to be used by the CVM.
- // The contract is a scoped environment for this execution context only.
- contract := NewContract(caller, to, value, energy)
- contract.SetCallCode(&addr, cvm.StateDB.GetCodeHash(addr), cvm.StateDB.GetCode(addr))
-
- // Even if the account has no code, we need to continue because it might be a precompile
- start := time.Now()
+ cvm.Transfer(cvm.StateDB, caller.Address(), addr, value)
// Capture the tracer start/end events in debug mode
if cvm.vmConfig.Debug && cvm.depth == 0 {
cvm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, energy, value)
- defer func() { // Lazy evaluation of the parameters
- cvm.vmConfig.Tracer.CaptureEnd(ret, energy-contract.Energy, time.Since(start), err)
- }()
+ defer func(startEnergy uint64, startTime time.Time) { // Lazy evaluation of the parameters
+ cvm.vmConfig.Tracer.CaptureEnd(ret, startEnergy-energy, time.Since(startTime), err)
+ }(energy, time.Now())
+ }
+ if isPrecompile {
+ ret, energy, err = RunPrecompiledContract(p, input, energy)
+ } else {
+ // Initialise a new contract and set the code that is to be used by the CVM.
+ // The contract is a scoped environment for this execution context only.
+ code := cvm.StateDB.GetCode(addr)
+ if len(code) == 0 {
+ ret, err = nil, nil // energy is unchanged
+ } else {
+ addrCopy := addr
+ // If the account has no code, we can abort here
+ // The depth-check is already done, and precompiles handled above
+ contract := NewContract(caller, AccountRef(addrCopy), value, energy)
+ contract.SetCallCode(&addrCopy, cvm.StateDB.GetCodeHash(addrCopy), code)
+ ret, err = run(cvm, contract, input, false)
+ energy = contract.Energy
+ }
}
- ret, err = run(cvm, contract, input, false)
// When an error was returned by the CVM or when setting the creation code
// above we revert to the snapshot and consume any energy remaining.
if err != nil {
cvm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
- contract.UseEnergy(contract.Energy)
+ if err != ErrExecutionReverted {
+ energy = 0
}
+ // TODO: consider clearing up unused snapshots:
+ //} else {
+ // cvm.StateDB.DiscardSnapshot(snapshot)
}
- return ret, contract.Energy, err
+ return ret, energy, err
}
// CallCode executes the contract associated with the addr with the given input
@@ -257,27 +271,34 @@ func (cvm *CVM) CallCode(caller ContractRef, addr common.Address, input []byte,
return nil, energy, ErrDepth
}
// Fail if we're trying to transfer more than the available balance
- if !cvm.CanTransfer(cvm.StateDB, caller.Address(), value) {
+ // Note although it's noop to transfer X core to caller itself. But
+ // if caller doesn't have enough balance, it would be an error to allow
+ // over-charging itself. So the check here is necessary.
+ if !cvm.Context.CanTransfer(cvm.StateDB, caller.Address(), value) {
return nil, energy, ErrInsufficientBalance
}
- var (
- snapshot = cvm.StateDB.Snapshot()
- to = AccountRef(caller.Address())
- )
- // Initialise a new contract and set the code that is to be used by the CVM.
- // The contract is a scoped environment for this execution context only.
- contract := NewContract(caller, to, value, energy)
- contract.SetCallCode(&addr, cvm.StateDB.GetCodeHash(addr), cvm.StateDB.GetCode(addr))
-
- ret, err = run(cvm, contract, input, false)
+ var snapshot = cvm.StateDB.Snapshot()
+
+ // It is allowed to call precompiles, even via delegatecall
+ if p, isPrecompile := cvm.precompile(addr); isPrecompile {
+ ret, energy, err = RunPrecompiledContract(p, input, energy)
+ } else {
+ addrCopy := addr
+ // Initialise a new contract and set the code that is to be used by the CVM.
+ // The contract is a scoped environment for this execution context only.
+ contract := NewContract(caller, AccountRef(caller.Address()), value, energy)
+ contract.SetCallCode(&addrCopy, cvm.StateDB.GetCodeHash(addrCopy), cvm.StateDB.GetCode(addrCopy))
+ ret, err = run(cvm, contract, input, false)
+ energy = contract.Energy
+ }
if err != nil {
cvm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
- contract.UseEnergy(contract.Energy)
+ if err != ErrExecutionReverted {
+ energy = 0
}
}
- return ret, contract.Energy, err
+ return ret, energy, err
}
// DelegateCall executes the contract associated with the addr with the given input
@@ -294,23 +315,26 @@ func (cvm *CVM) DelegateCall(caller ContractRef, addr common.Address, input []by
return nil, energy, ErrDepth
}
- var (
- snapshot = cvm.StateDB.Snapshot()
- to = AccountRef(caller.Address())
- )
-
- // Initialise a new contract and make initialise the delegate values
- contract := NewContract(caller, to, nil, energy).AsDelegate()
- contract.SetCallCode(&addr, cvm.StateDB.GetCodeHash(addr), cvm.StateDB.GetCode(addr))
-
- ret, err = run(cvm, contract, input, false)
+ var snapshot = cvm.StateDB.Snapshot()
+
+ // It is allowed to call precompiles, even via delegatecall
+ if p, isPrecompile := cvm.precompile(addr); isPrecompile {
+ ret, energy, err = RunPrecompiledContract(p, input, energy)
+ } else {
+ addrCopy := addr
+ // Initialise a new contract and make initialise the delegate values
+ contract := NewContract(caller, AccountRef(caller.Address()), nil, energy).AsDelegate()
+ contract.SetCallCode(&addrCopy, cvm.StateDB.GetCodeHash(addrCopy), cvm.StateDB.GetCode(addrCopy))
+ ret, err = run(cvm, contract, input, false)
+ energy = contract.Energy
+ }
if err != nil {
cvm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
- contract.UseEnergy(contract.Energy)
+ if err != ErrExecutionReverted {
+ energy = 0
}
}
- return ret, contract.Energy, err
+ return ret, energy, err
}
// StaticCall executes the contract associated with the addr with the given input
@@ -325,31 +349,41 @@ func (cvm *CVM) StaticCall(caller ContractRef, addr common.Address, input []byte
if cvm.depth > int(params.CallCreateDepth) {
return nil, energy, ErrDepth
}
-
- var (
- to = AccountRef(addr)
- snapshot = cvm.StateDB.Snapshot()
- )
- // Initialise a new contract and set the code that is to be used by the CVM.
- // The contract is a scoped environment for this execution context only.
- contract := NewContract(caller, to, new(big.Int), energy)
- contract.SetCallCode(&addr, cvm.StateDB.GetCodeHash(addr), cvm.StateDB.GetCode(addr))
+ // We take a snapshot here. This is a bit counter-intuitive, and could probably be skipped.
+ // However, even a staticcall is considered a 'touch'. On mainnet, static calls were introduced
+ // after all empty accounts were deleted, so this is not required. However, if we omit this,
+ // then certain tests start failing; stRevertTest/RevertPrecompiledTouchExactOOG.json.
+ // We could change this, but for now it's left for legacy reasons
+ var snapshot = cvm.StateDB.Snapshot()
// We do an AddBalance of zero here, just in order to trigger a touch.
// but is the correct thing to do and matters on other networks, in tests, and potential
// future scenarios
- cvm.StateDB.AddBalance(addr, bigZero)
-
- // When an error was returned by the CVM or when setting the creation code
- // above we revert to the snapshot and consume any energy remaining.
- ret, err = run(cvm, contract, input, true)
+ cvm.StateDB.AddBalance(addr, big0)
+
+ if p, isPrecompile := cvm.precompile(addr); isPrecompile {
+ ret, energy, err = RunPrecompiledContract(p, input, energy)
+ } else {
+ // At this point, we use a copy of address. If we don't, the go compiler will
+ // leak the 'contract' to the outer scope, and make allocation for 'contract'
+ // even if the actual execution ends on RunPrecompiled above.
+ addrCopy := addr
+ // Initialise a new contract and set the code that is to be used by the CVM.
+ // The contract is a scoped environment for this execution context only.
+ contract := NewContract(caller, AccountRef(addrCopy), new(big.Int), energy)
+ contract.SetCallCode(&addrCopy, cvm.StateDB.GetCodeHash(addrCopy), cvm.StateDB.GetCode(addrCopy))
+ // When an error was returned by the CVM or when setting the creation code
+ // above we revert to the snapshot and consume any energy remaining.
+ ret, err = run(cvm, contract, input, true)
+ energy = contract.Energy
+ }
if err != nil {
cvm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
- contract.UseEnergy(contract.Energy)
+ if err != ErrExecutionReverted {
+ energy = 0
}
}
- return ret, contract.Energy, err
+ return ret, energy, err
}
type codeAndHash struct {
@@ -423,13 +457,13 @@ func (cvm *CVM) create(caller ContractRef, codeAndHash *codeAndHash, energy uint
// above we revert to the snapshot and consume any energy remaining.
if maxCodeSizeExceeded || (err != nil && err != ErrCodeStoreOutOfEnergy) {
cvm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
+ if err != ErrExecutionReverted {
contract.UseEnergy(contract.Energy)
}
}
// Assign err if contract code size exceeds the max while the err is still empty.
if maxCodeSizeExceeded && err == nil {
- err = errMaxCodeSizeExceeded
+ err = ErrMaxCodeSizeExceeded
}
if cvm.vmConfig.Debug && cvm.depth == 0 {
cvm.vmConfig.Tracer.CaptureEnd(ret, energy-contract.Energy, time.Since(start), err)
@@ -448,9 +482,9 @@ func (cvm *CVM) Create(caller ContractRef, code []byte, energy uint64, value *bi
//
// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
-func (cvm *CVM) Create2(caller ContractRef, code []byte, energy uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverEnergy uint64, err error) {
+func (cvm *CVM) Create2(caller ContractRef, code []byte, energy uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverEnergy uint64, err error) {
codeAndHash := &codeAndHash{code: code}
- contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), codeAndHash.Hash().Bytes())
+ contractAddr = crypto.CreateAddress2(caller.Address(), common.Hash(salt.Bytes32()), codeAndHash.Hash().Bytes())
return cvm.create(caller, codeAndHash, energy, endowment, contractAddr)
}
diff --git a/core/vm/eips.go b/core/vm/eips.go
index b24d10377..bed0addaa 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -16,15 +16,17 @@
package vm
-func opSelfBalance(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- balance := interpreter.intPool.get().Set(interpreter.cvm.StateDB.GetBalance(contract.Address()))
- stack.push(balance)
+import "github.com/core-coin/uint256"
+
+func opSelfBalance(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ balance, _ := uint256.FromBig(interpreter.cvm.StateDB.GetBalance(callContext.contract.Address()))
+ callContext.stack.push(balance)
return nil, nil
}
// opNetworkID implements NETWORKID opcode
-func opNetworkID(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- networkId := interpreter.intPool.get().Set(interpreter.cvm.chainConfig.NetworkID)
- stack.push(networkId)
+func opNetworkID(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ networkId, _ := uint256.FromBig(interpreter.cvm.chainConfig.NetworkID)
+ callContext.stack.push(networkId)
return nil, nil
}
diff --git a/core/vm/energy.go b/core/vm/energy.go
index 798e5b7a5..d52b49aee 100644
--- a/core/vm/energy.go
+++ b/core/vm/energy.go
@@ -17,7 +17,7 @@
package vm
import (
- "math/big"
+ "github.com/core-coin/uint256"
)
// Energy costs
@@ -33,7 +33,7 @@ const (
// callEnergy returns the actual energy cost of the call.
//
// The returned energy is energy - base * 63 / 64.
-func callEnergy(availableEnergy, base uint64, callCost *big.Int) (uint64, error) {
+func callEnergy(availableEnergy, base uint64, callCost *uint256.Int) (uint64, error) {
availableEnergy = availableEnergy - base
energy := availableEnergy - availableEnergy/64
// If the bit length exceeds 64 bit we know that the newly calculated "energy" for CIP150
@@ -43,7 +43,7 @@ func callEnergy(availableEnergy, base uint64, callCost *big.Int) (uint64, error)
return energy, nil
}
if !callCost.IsUint64() {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return callCost.Uint64(), nil
diff --git a/core/vm/energy_table.go b/core/vm/energy_table.go
index 04909ab06..4b7a7c274 100644
--- a/core/vm/energy_table.go
+++ b/core/vm/energy_table.go
@@ -36,7 +36,7 @@ func memoryEnergyCost(mem *Memory, newMemSize uint64) (uint64, error) {
// overflow. The constant 0x1FFFFFFFE0 is the highest number that can be used
// without overflowing the energy calculation.
if newMemSize > 0x1FFFFFFFE0 {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
newMemSizeWords := toWordSize(newMemSize)
newMemSize = newMemSizeWords * 32
@@ -70,17 +70,17 @@ func memoryCopierEnergy(stackpos int) energyFunc {
return 0, err
}
// And energy for copying data, charged per word at param.CopyEnergy
- words, overflow := bigUint64(stack.Back(stackpos))
+ words, overflow := stack.Back(stackpos).Uint64WithOverflow()
if overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if words, overflow = math.SafeMul(toWordSize(words), params.CopyEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if energy, overflow = math.SafeAdd(energy, words); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -114,14 +114,14 @@ func energySStore(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memor
// Energy sentry honoured, do the actual energy calculation based on the stored value
var (
y, x = stack.Back(1), stack.Back(0)
- current = cvm.StateDB.GetState(contract.Address(), common.BigToHash(x))
+ current = cvm.StateDB.GetState(contract.Address(), common.Hash(x.Bytes32()))
)
- value := common.BigToHash(y)
+ value := common.Hash(y.Bytes32())
if current == value { // noop (1)
return params.SstoreNoopEnergy, nil
}
- original := cvm.StateDB.GetCommittedState(contract.Address(), common.BigToHash(x))
+ original := cvm.StateDB.GetCommittedState(contract.Address(), common.Hash(x.Bytes32()))
if original == current {
if original == (common.Hash{}) { // create slot (2.1.1)
return params.SstoreInitEnergy, nil
@@ -150,9 +150,9 @@ func energySStore(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memor
func makeEnergyLog(n uint64) energyFunc {
return func(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
- requestedSize, overflow := bigUint64(stack.Back(1))
+ requestedSize, overflow := stack.Back(1).Uint64WithOverflow()
if overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
energy, err := memoryEnergyCost(mem, memorySize)
@@ -161,18 +161,18 @@ func makeEnergyLog(n uint64) energyFunc {
}
if energy, overflow = math.SafeAdd(energy, params.LogEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if energy, overflow = math.SafeAdd(energy, n*params.LogTopicEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
var memorySizeEnergy uint64
if memorySizeEnergy, overflow = math.SafeMul(requestedSize, params.LogDataEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if energy, overflow = math.SafeAdd(energy, memorySizeEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -183,15 +183,15 @@ func energySha3(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memoryS
if err != nil {
return 0, err
}
- wordEnergy, overflow := bigUint64(stack.Back(1))
+ wordEnergy, overflow := stack.Back(1).Uint64WithOverflow()
if overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if wordEnergy, overflow = math.SafeMul(toWordSize(wordEnergy), params.Sha3WordEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if energy, overflow = math.SafeAdd(energy, wordEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -217,15 +217,15 @@ func energyCreate2(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memo
if err != nil {
return 0, err
}
- wordEnergy, overflow := bigUint64(stack.Back(2))
+ wordEnergy, overflow := stack.Back(2).Uint64WithOverflow()
if overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if wordEnergy, overflow = math.SafeMul(toWordSize(wordEnergy), params.Sha3WordEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
if energy, overflow = math.SafeAdd(energy, wordEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -234,20 +234,20 @@ func energyExp(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memorySi
expByteLen := uint64((stack.data[stack.len()-2].BitLen() + 7) / 8)
var (
- energy = expByteLen * params.ExpByte // no overflow check required. Max is 256 * ExpByte energy
+ energy = expByteLen * params.ExpByte // no overflow check required. Max is 256 * ExpByte energy
overflow bool
)
if energy, overflow = math.SafeAdd(energy, params.ExpEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
func energyCall(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
- energy uint64
- transfersValue = stack.Back(2).Sign() != 0
- address = common.BigToAddress(stack.Back(1))
+ energy uint64
+ transfersValue = !stack.Back(2).IsZero()
+ address = common.Address(stack.Back(1).Bytes22())
)
if transfersValue && cvm.StateDB.Empty(address) {
energy += params.CallNewAccountEnergy
@@ -261,15 +261,15 @@ func energyCall(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, memoryS
}
var overflow bool
if energy, overflow = math.SafeAdd(energy, memoryEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
- cvm.callEnergyTemp, err = callEnergy( contract.Energy, energy, stack.Back(0))
+ cvm.callEnergyTemp, err = callEnergy(contract.Energy, energy, stack.Back(0))
if err != nil {
return 0, err
}
if energy, overflow = math.SafeAdd(energy, cvm.callEnergyTemp); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -280,21 +280,21 @@ func energyCallCode(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, mem
return 0, err
}
var (
- energy uint64
+ energy uint64
overflow bool
)
if stack.Back(2).Sign() != 0 {
energy += params.CallValueTransferEnergy
}
if energy, overflow = math.SafeAdd(energy, memoryEnergy); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
cvm.callEnergyTemp, err = callEnergy(contract.Energy, energy, stack.Back(0))
if err != nil {
return 0, err
}
if energy, overflow = math.SafeAdd(energy, cvm.callEnergyTemp); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -310,7 +310,7 @@ func energyDelegateCall(cvm *CVM, contract *Contract, stack *Stack, mem *Memory,
}
var overflow bool
if energy, overflow = math.SafeAdd(energy, cvm.callEnergyTemp); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -326,7 +326,7 @@ func energyStaticCall(cvm *CVM, contract *Contract, stack *Stack, mem *Memory, m
}
var overflow bool
if energy, overflow = math.SafeAdd(energy, cvm.callEnergyTemp); overflow {
- return 0, errEnergyUintOverflow
+ return 0, ErrEnergyUintOverflow
}
return energy, nil
}
@@ -335,7 +335,7 @@ func energySelfdestruct(cvm *CVM, contract *Contract, stack *Stack, mem *Memory,
var energy uint64
energy = params.SelfdestructEnergy
- var address = common.BigToAddress(stack.Back(0))
+ var address = common.Address(stack.Back(0).Bytes22())
if cvm.StateDB.Empty(address) && cvm.StateDB.GetBalance(contract.Address()).Sign() != 0 {
energy += params.CreateBySelfdestructEnergy
diff --git a/core/vm/energy_table_test.go b/core/vm/energy_table_test.go
index c36b7c805..9b8b4197b 100644
--- a/core/vm/energy_table_test.go
+++ b/core/vm/energy_table_test.go
@@ -39,8 +39,8 @@ func TestMemoryEnergyCost(t *testing.T) {
}
for i, tt := range tests {
v, err := memoryEnergyCost(&Memory{}, tt.size)
- if (err == errEnergyUintOverflow) != tt.overflow {
- t.Errorf("test %d: overflow mismatch: have %v, want %v", i, err == errEnergyUintOverflow, tt.overflow)
+ if (err == ErrEnergyUintOverflow) != tt.overflow {
+ t.Errorf("test %d: overflow mismatch: have %v, want %v", i, err == ErrEnergyUintOverflow, tt.overflow)
}
if v != tt.cost {
t.Errorf("test %d: energy cost mismatch: have %v, want %v", i, v, tt.cost)
diff --git a/core/vm/errors.go b/core/vm/errors.go
index 0b48584f9..d6003a843 100644
--- a/core/vm/errors.go
+++ b/core/vm/errors.go
@@ -16,15 +16,56 @@
package vm
-import "errors"
+import (
+ "errors"
+ "fmt"
+)
-// List execution errors
+// List cvm execution errors
var (
- ErrOutOfEnergy = errors.New("out of energy")
- ErrCodeStoreOutOfEnergy = errors.New("contract creation code storage out of energy")
+ // ErrInvalidSubroutineEntry means that a BEGINSUB was reached via iteration,
+ // as opposed to from a JUMPSUB instruction
+ ErrInvalidSubroutineEntry = errors.New("invalid subroutine entry")
+ ErrOutOfEnergy = errors.New("out of energy")
+ ErrCodeStoreOutOfEnergy = errors.New("contract creation code storage out of energy")
ErrDepth = errors.New("max call depth exceeded")
- ErrTraceLimitReached = errors.New("the number of logs reached the specified limit")
ErrInsufficientBalance = errors.New("insufficient balance for transfer")
ErrContractAddressCollision = errors.New("contract address collision")
- ErrNoCompatibleInterpreter = errors.New("no compatible interpreter")
+ ErrExecutionReverted = errors.New("execution reverted")
+ ErrMaxCodeSizeExceeded = errors.New("max code size exceeded")
+ ErrInvalidJump = errors.New("invalid jump destination")
+ ErrWriteProtection = errors.New("write protection")
+ ErrReturnDataOutOfBounds = errors.New("return data out of bounds")
+ ErrEnergyUintOverflow = errors.New("energy uint64 overflow")
+ ErrInvalidRetsub = errors.New("invalid retsub")
+ ErrReturnStackExceeded = errors.New("return stack limit reached")
)
+
+// ErrStackUnderflow wraps an cvm error when the items on the stack less
+// than the minimal requirement.
+type ErrStackUnderflow struct {
+ stackLen int
+ required int
+}
+
+func (e *ErrStackUnderflow) Error() string {
+ return fmt.Sprintf("stack underflow (%d <=> %d)", e.stackLen, e.required)
+}
+
+// ErrStackOverflow wraps an cvm error when the items on the stack exceeds
+// the maximum allowance.
+type ErrStackOverflow struct {
+ stackLen int
+ limit int
+}
+
+func (e *ErrStackOverflow) Error() string {
+ return fmt.Sprintf("stack limit reached %d (%d)", e.stackLen, e.limit)
+}
+
+// ErrInvalidOpCode wraps an cvm error when an invalid opcode is encountered.
+type ErrInvalidOpCode struct {
+ opcode OpCode
+}
+
+func (e *ErrInvalidOpCode) Error() string { return fmt.Sprintf("invalid opcode: %s", e.opcode) }
diff --git a/core/vm/gen_structlog.go b/core/vm/gen_structlog.go
index cf862da2d..2d145819d 100644
--- a/core/vm/gen_structlog.go
+++ b/core/vm/gen_structlog.go
@@ -23,6 +23,8 @@ func (s StructLog) MarshalJSON() ([]byte, error) {
Memory hexutil.Bytes `json:"memory"`
MemorySize int `json:"memSize"`
Stack []*math.HexOrDecimal256 `json:"stack"`
+ ReturnStack []math.HexOrDecimal64 `json:"returnStack"`
+ ReturnData []byte `json:"returnData"`
Storage map[common.Hash]common.Hash `json:"-"`
Depth int `json:"depth"`
RefundCounter uint64 `json:"refund"`
@@ -43,6 +45,13 @@ func (s StructLog) MarshalJSON() ([]byte, error) {
enc.Stack[k] = (*math.HexOrDecimal256)(v)
}
}
+ if s.ReturnStack != nil {
+ enc.ReturnStack = make([]math.HexOrDecimal64, len(s.ReturnStack))
+ for k, v := range s.ReturnStack {
+ enc.ReturnStack[k] = math.HexOrDecimal64(v)
+ }
+ }
+ enc.ReturnData = s.ReturnData
enc.Storage = s.Storage
enc.Depth = s.Depth
enc.RefundCounter = s.RefundCounter
@@ -62,6 +71,8 @@ func (s *StructLog) UnmarshalJSON(input []byte) error {
Memory *hexutil.Bytes `json:"memory"`
MemorySize *int `json:"memSize"`
Stack []*math.HexOrDecimal256 `json:"stack"`
+ ReturnStack []math.HexOrDecimal64 `json:"returnStack"`
+ ReturnData []byte `json:"returnData"`
Storage map[common.Hash]common.Hash `json:"-"`
Depth *int `json:"depth"`
RefundCounter *uint64 `json:"refund"`
@@ -95,6 +106,15 @@ func (s *StructLog) UnmarshalJSON(input []byte) error {
s.Stack[k] = (*big.Int)(v)
}
}
+ if dec.ReturnStack != nil {
+ s.ReturnStack = make([]uint32, len(dec.ReturnStack))
+ for k, v := range dec.ReturnStack {
+ s.ReturnStack[k] = uint32(v)
+ }
+ }
+ if dec.ReturnData != nil {
+ s.ReturnData = dec.ReturnData
+ }
if dec.Storage != nil {
s.Storage = dec.Storage
}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 164f8fe7f..3ca0f5119 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -17,374 +17,228 @@
package vm
import (
- "errors"
- "math/big"
-
"github.com/core-coin/go-core/common"
- "github.com/core-coin/go-core/common/math"
"github.com/core-coin/go-core/core/types"
"github.com/core-coin/go-core/params"
+ "github.com/core-coin/uint256"
"golang.org/x/crypto/sha3"
)
-var (
- bigZero = new(big.Int)
- tt255 = math.BigPow(2, 255)
- errWriteProtection = errors.New("cvm: write protection")
- errReturnDataOutOfBounds = errors.New("cvm: return data out of bounds")
- errExecutionReverted = errors.New("cvm: execution reverted")
- errMaxCodeSizeExceeded = errors.New("cvm: max code size exceeded")
- errInvalidJump = errors.New("cvm: invalid jump destination")
-)
-
-func opAdd(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- math.U256(y.Add(x, y))
-
- interpreter.intPool.put(x)
+func opAdd(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.Add(&x, y)
return nil, nil
}
-func opSub(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- math.U256(y.Sub(x, y))
-
- interpreter.intPool.put(x)
+func opSub(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.Sub(&x, y)
return nil, nil
}
-func opMul(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.pop()
- stack.push(math.U256(x.Mul(x, y)))
-
- interpreter.intPool.put(y)
+func opMul(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.Mul(&x, y)
return nil, nil
}
-func opDiv(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- if y.Sign() != 0 {
- math.U256(y.Div(x, y))
- } else {
- y.SetUint64(0)
- }
- interpreter.intPool.put(x)
+func opDiv(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.Div(&x, y)
return nil, nil
}
-func opSdiv(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := math.S256(stack.pop()), math.S256(stack.pop())
- res := interpreter.intPool.getZero()
-
- if y.Sign() == 0 || x.Sign() == 0 {
- stack.push(res)
- } else {
- if x.Sign() != y.Sign() {
- res.Div(x.Abs(x), y.Abs(y))
- res.Neg(res)
- } else {
- res.Div(x.Abs(x), y.Abs(y))
- }
- stack.push(math.U256(res))
- }
- interpreter.intPool.put(x, y)
+func opSdiv(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.SDiv(&x, y)
return nil, nil
}
-func opMod(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.pop()
- if y.Sign() == 0 {
- stack.push(x.SetUint64(0))
- } else {
- stack.push(math.U256(x.Mod(x, y)))
- }
- interpreter.intPool.put(y)
+func opMod(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.Mod(&x, y)
return nil, nil
}
-func opSmod(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := math.S256(stack.pop()), math.S256(stack.pop())
- res := interpreter.intPool.getZero()
-
- if y.Sign() == 0 {
- stack.push(res)
- } else {
- if x.Sign() < 0 {
- res.Mod(x.Abs(x), y.Abs(y))
- res.Neg(res)
- } else {
- res.Mod(x.Abs(x), y.Abs(y))
- }
- stack.push(math.U256(res))
- }
- interpreter.intPool.put(x, y)
+func opSmod(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.SMod(&x, y)
return nil, nil
}
-func opExp(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- base, exponent := stack.pop(), stack.pop()
- // some shortcuts
- cmpToOne := exponent.Cmp(big1)
- if cmpToOne < 0 { // Exponent is zero
- // x ^ 0 == 1
- stack.push(base.SetUint64(1))
- } else if base.Sign() == 0 {
- // 0 ^ y, if y != 0, == 0
- stack.push(base.SetUint64(0))
- } else if cmpToOne == 0 { // Exponent is one
- // x ^ 1 == x
- stack.push(base)
- } else {
- stack.push(math.Exp(base, exponent))
- interpreter.intPool.put(base)
- }
- interpreter.intPool.put(exponent)
+func opExp(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ base, exponent := callContext.stack.pop(), callContext.stack.peek()
+ exponent.Exp(&base, exponent)
return nil, nil
}
-func opSignExtend(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- back := stack.pop()
- if back.Cmp(big.NewInt(31)) < 0 {
- bit := uint(back.Uint64()*8 + 7)
- num := stack.pop()
- mask := back.Lsh(common.Big1, bit)
- mask.Sub(mask, common.Big1)
- if num.Bit(int(bit)) > 0 {
- num.Or(num, mask.Not(mask))
- } else {
- num.And(num, mask)
- }
-
- stack.push(math.U256(num))
- }
-
- interpreter.intPool.put(back)
+func opSignExtend(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ back, num := callContext.stack.pop(), callContext.stack.peek()
+ num.ExtendSign(num, &back)
return nil, nil
}
-func opNot(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x := stack.peek()
- math.U256(x.Not(x))
+func opNot(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x := callContext.stack.peek()
+ x.Not(x)
return nil, nil
}
-func opLt(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- if x.Cmp(y) < 0 {
- y.SetUint64(1)
+func opLt(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ if x.Lt(y) {
+ y.SetOne()
} else {
- y.SetUint64(0)
+ y.Clear()
}
- interpreter.intPool.put(x)
return nil, nil
}
-func opGt(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- if x.Cmp(y) > 0 {
- y.SetUint64(1)
+func opGt(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ if x.Gt(y) {
+ y.SetOne()
} else {
- y.SetUint64(0)
+ y.Clear()
}
- interpreter.intPool.put(x)
return nil, nil
}
-func opSlt(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
-
- xSign := x.Cmp(tt255)
- ySign := y.Cmp(tt255)
-
- switch {
- case xSign >= 0 && ySign < 0:
- y.SetUint64(1)
-
- case xSign < 0 && ySign >= 0:
- y.SetUint64(0)
-
- default:
- if x.Cmp(y) < 0 {
- y.SetUint64(1)
- } else {
- y.SetUint64(0)
- }
+func opSlt(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ if x.Slt(y) {
+ y.SetOne()
+ } else {
+ y.Clear()
}
- interpreter.intPool.put(x)
return nil, nil
}
-func opSgt(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
-
- xSign := x.Cmp(tt255)
- ySign := y.Cmp(tt255)
-
- switch {
- case xSign >= 0 && ySign < 0:
- y.SetUint64(0)
-
- case xSign < 0 && ySign >= 0:
- y.SetUint64(1)
-
- default:
- if x.Cmp(y) > 0 {
- y.SetUint64(1)
- } else {
- y.SetUint64(0)
- }
+func opSgt(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ if x.Sgt(y) {
+ y.SetOne()
+ } else {
+ y.Clear()
}
- interpreter.intPool.put(x)
return nil, nil
}
-func opEq(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- if x.Cmp(y) == 0 {
- y.SetUint64(1)
+func opEq(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ if x.Eq(y) {
+ y.SetOne()
} else {
- y.SetUint64(0)
+ y.Clear()
}
- interpreter.intPool.put(x)
return nil, nil
}
-func opIszero(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x := stack.peek()
- if x.Sign() > 0 {
- x.SetUint64(0)
+func opIszero(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x := callContext.stack.peek()
+ if x.IsZero() {
+ x.SetOne()
} else {
- x.SetUint64(1)
+ x.Clear()
}
return nil, nil
}
-func opAnd(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.pop()
- stack.push(x.And(x, y))
-
- interpreter.intPool.put(y)
+func opAnd(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.And(&x, y)
return nil, nil
}
-func opOr(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- y.Or(x, y)
-
- interpreter.intPool.put(x)
+func opOr(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.Or(&x, y)
return nil, nil
}
-func opXor(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y := stack.pop(), stack.peek()
- y.Xor(x, y)
-
- interpreter.intPool.put(x)
+func opXor(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y := callContext.stack.pop(), callContext.stack.peek()
+ y.Xor(&x, y)
return nil, nil
}
-func opByte(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- th, val := stack.pop(), stack.peek()
- if th.Cmp(common.Big32) < 0 {
- b := math.Byte(val, 32, int(th.Int64()))
- val.SetUint64(uint64(b))
- } else {
- val.SetUint64(0)
- }
- interpreter.intPool.put(th)
+func opByte(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ th, val := callContext.stack.pop(), callContext.stack.peek()
+ val.Byte(&th)
return nil, nil
}
-func opAddmod(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y, z := stack.pop(), stack.pop(), stack.pop()
- if z.Cmp(bigZero) > 0 {
- x.Add(x, y)
- x.Mod(x, z)
- stack.push(math.U256(x))
+func opAddmod(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y, z := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.peek()
+ if z.IsZero() {
+ z.Clear()
} else {
- stack.push(x.SetUint64(0))
+ z.AddMod(&x, &y, z)
}
- interpreter.intPool.put(y, z)
return nil, nil
}
-func opMulmod(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- x, y, z := stack.pop(), stack.pop(), stack.pop()
- if z.Cmp(bigZero) > 0 {
- x.Mul(x, y)
- x.Mod(x, z)
- stack.push(math.U256(x))
- } else {
- stack.push(x.SetUint64(0))
- }
- interpreter.intPool.put(y, z)
+func opMulmod(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x, y, z := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.peek()
+ z.MulMod(&x, &y, z)
return nil, nil
}
// opSHL implements Shift Left
-// The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2,
-// and pushes on the stack arg2 shifted to the left by arg1 number of bits.
-func opSHL(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
- shift, value := math.U256(stack.pop()), math.U256(stack.peek())
- defer interpreter.intPool.put(shift) // First operand back into the pool
-
- if shift.Cmp(common.Big256) >= 0 {
- value.SetUint64(0)
- return nil, nil
+// The SHL instruction (shift left) pops 2 values from the callContext.stack, first arg1 and then arg2,
+// and pushes on the callContext.stack arg2 shifted to the left by arg1 number of bits.
+func opSHL(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ // Note, second operand is left in the callContext.stack; accumulate result into it, and no need to push it afterwards
+ shift, value := callContext.stack.pop(), callContext.stack.peek()
+ if shift.LtUint64(256) {
+ value.Lsh(value, uint(shift.Uint64()))
+ } else {
+ value.Clear()
}
- n := uint(shift.Uint64())
- math.U256(value.Lsh(value, n))
return nil, nil
}
// opSHR implements Logical Shift Right
-// The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2,
-// and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill.
-func opSHR(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- // Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
- shift, value := math.U256(stack.pop()), math.U256(stack.peek())
- defer interpreter.intPool.put(shift) // First operand back into the pool
-
- if shift.Cmp(common.Big256) >= 0 {
- value.SetUint64(0)
- return nil, nil
+// The SHR instruction (logical shift right) pops 2 values from the callContext.stack, first arg1 and then arg2,
+// and pushes on the callContext.stack arg2 shifted to the right by arg1 number of bits with zero fill.
+func opSHR(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ // Note, second operand is left in the callContext.stack; accumulate result into it, and no need to push it afterwards
+ shift, value := callContext.stack.pop(), callContext.stack.peek()
+ if shift.LtUint64(256) {
+ value.Rsh(value, uint(shift.Uint64()))
+ } else {
+ value.Clear()
}
- n := uint(shift.Uint64())
- math.U256(value.Rsh(value, n))
return nil, nil
}
// opSAR implements Arithmetic Shift Right
-// The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2,
-// and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension.
-func opSAR(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+// The SAR instruction (arithmetic shift right) pops 2 values from the callContext.stack, first arg1 and then arg2,
+// and pushes on the callContext.stack arg2 shifted to the right by arg1 number of bits with sign extension.
+func opSAR(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
// Note, S256 returns (potentially) a new bigint, so we're popping, not peeking this one
- shift, value := math.U256(stack.pop()), math.S256(stack.pop())
- defer interpreter.intPool.put(shift) // First operand back into the pool
-
- if shift.Cmp(common.Big256) >= 0 {
+ shift, value := callContext.stack.pop(), callContext.stack.peek()
+ if shift.GtUint64(256) {
if value.Sign() >= 0 {
- value.SetUint64(0)
+ value.Clear()
} else {
- value.SetInt64(-1)
+ // Max negative shift: all bits set
+ value.SetAllOne()
}
- stack.push(math.U256(value))
return nil, nil
}
n := uint(shift.Uint64())
- value.Rsh(value, n)
- stack.push(math.U256(value))
+ value.SRsh(value, n)
return nil, nil
}
-func opSha3(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- offset, size := stack.pop(), stack.pop()
- data := memory.GetPtr(offset.Int64(), size.Int64())
+func opSha3(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ offset, size := callContext.stack.pop(), callContext.stack.peek()
+ data := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
if interpreter.hasher == nil {
interpreter.hasher = sha3.New256().(keccakState)
@@ -398,134 +252,157 @@ func opSha3(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory
if cvm.vmConfig.EnablePreimageRecording {
cvm.StateDB.AddPreimage(interpreter.hasherBuf, data)
}
- stack.push(interpreter.intPool.get().SetBytes(interpreter.hasherBuf[:]))
-
- interpreter.intPool.put(offset, size)
+ size.SetBytes(interpreter.hasherBuf[:])
return nil, nil
}
-func opAddress(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetBytes(contract.Address().Bytes()))
+func opAddress(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetBytes(callContext.contract.Address().Bytes()))
return nil, nil
}
-func opBalance(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- slot := stack.peek()
- slot.Set(interpreter.cvm.StateDB.GetBalance(common.BigToAddress(slot)))
+func opBalance(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ slot := callContext.stack.peek()
+ address := common.Address(slot.Bytes22())
+ slot.SetFromBig(interpreter.cvm.StateDB.GetBalance(address))
return nil, nil
}
-func opOrigin(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetBytes(interpreter.cvm.Origin.Bytes()))
+func opOrigin(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetBytes(interpreter.cvm.Origin.Bytes()))
return nil, nil
}
-func opCaller(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetBytes(contract.Caller().Bytes()))
+func opCaller(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetBytes(callContext.contract.Caller().Bytes()))
return nil, nil
}
-func opCallValue(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().Set(contract.value))
+func opCallValue(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ v, _ := uint256.FromBig(callContext.contract.value)
+ callContext.stack.push(v)
return nil, nil
}
-func opCallDataLoad(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetBytes(getDataBig(contract.Input, stack.pop(), big32)))
+func opCallDataLoad(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ x := callContext.stack.peek()
+ if offset, overflow := x.Uint64WithOverflow(); !overflow {
+ data := getData(callContext.contract.Input, offset, 32)
+ x.SetBytes(data)
+ } else {
+ x.Clear()
+ }
return nil, nil
}
-func opCallDataSize(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetInt64(int64(len(contract.Input))))
+func opCallDataSize(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetUint64(uint64(len(callContext.contract.Input))))
return nil, nil
}
-func opCallDataCopy(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCallDataCopy(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
var (
- memOffset = stack.pop()
- dataOffset = stack.pop()
- length = stack.pop()
+ memOffset = callContext.stack.pop()
+ dataOffset = callContext.stack.pop()
+ length = callContext.stack.pop()
)
- memory.Set(memOffset.Uint64(), length.Uint64(), getDataBig(contract.Input, dataOffset, length))
-
- interpreter.intPool.put(memOffset, dataOffset, length)
+ dataOffset64, overflow := dataOffset.Uint64WithOverflow()
+ if overflow {
+ dataOffset64 = 0xffffffffffffffff
+ }
+ // These values are checked for overflow during energy cost calculation
+ memOffset64 := memOffset.Uint64()
+ length64 := length.Uint64()
+ callContext.memory.Set(memOffset64, length64, getData(callContext.contract.Input, dataOffset64, length64))
return nil, nil
}
-func opReturnDataSize(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetUint64(uint64(len(interpreter.returnData))))
+func opReturnDataSize(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData))))
return nil, nil
}
-func opReturnDataCopy(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opReturnDataCopy(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
var (
- memOffset = stack.pop()
- dataOffset = stack.pop()
- length = stack.pop()
-
- end = interpreter.intPool.get().Add(dataOffset, length)
+ memOffset = callContext.stack.pop()
+ dataOffset = callContext.stack.pop()
+ length = callContext.stack.pop()
)
- defer interpreter.intPool.put(memOffset, dataOffset, length, end)
- if !end.IsUint64() || uint64(len(interpreter.returnData)) < end.Uint64() {
- return nil, errReturnDataOutOfBounds
+ offset64, overflow := dataOffset.Uint64WithOverflow()
+ if overflow {
+ return nil, ErrReturnDataOutOfBounds
}
- memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[dataOffset.Uint64():end.Uint64()])
-
+ // we can reuse dataOffset now (aliasing it for clarity)
+ var end = dataOffset
+ end.Add(&dataOffset, &length)
+ end64, overflow := end.Uint64WithOverflow()
+ if overflow || uint64(len(interpreter.returnData)) < end64 {
+ return nil, ErrReturnDataOutOfBounds
+ }
+ callContext.memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64])
return nil, nil
}
-func opExtCodeSize(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- slot := stack.peek()
- slot.SetUint64(uint64(interpreter.cvm.StateDB.GetCodeSize(common.BigToAddress(slot))))
+func opExtCodeSize(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ slot := callContext.stack.peek()
+ slot.SetUint64(uint64(interpreter.cvm.StateDB.GetCodeSize(common.Address(slot.Bytes22()))))
return nil, nil
}
-func opCodeSize(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- l := interpreter.intPool.get().SetInt64(int64(len(contract.Code)))
- stack.push(l)
+func opCodeSize(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ l := new(uint256.Int)
+ l.SetUint64(uint64(len(callContext.contract.Code)))
+ callContext.stack.push(l)
return nil, nil
}
-func opCodeCopy(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCodeCopy(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
var (
- memOffset = stack.pop()
- codeOffset = stack.pop()
- length = stack.pop()
+ memOffset = callContext.stack.pop()
+ codeOffset = callContext.stack.pop()
+ length = callContext.stack.pop()
)
- codeCopy := getDataBig(contract.Code, codeOffset, length)
- memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
+ uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
+ if overflow {
+ uint64CodeOffset = 0xffffffffffffffff
+ }
+ codeCopy := getData(callContext.contract.Code, uint64CodeOffset, length.Uint64())
+ callContext.memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
- interpreter.intPool.put(memOffset, codeOffset, length)
return nil, nil
}
-func opExtCodeCopy(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opExtCodeCopy(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
var (
- addr = common.BigToAddress(stack.pop())
- memOffset = stack.pop()
- codeOffset = stack.pop()
- length = stack.pop()
+ a = callContext.stack.pop()
+ memOffset = callContext.stack.pop()
+ codeOffset = callContext.stack.pop()
+ length = callContext.stack.pop()
)
- codeCopy := getDataBig(interpreter.cvm.StateDB.GetCode(addr), codeOffset, length)
- memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
+ uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
+ if overflow {
+ uint64CodeOffset = 0xffffffffffffffff
+ }
+ addr := common.Address(a.Bytes22())
+ codeCopy := getData(interpreter.cvm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64())
+ callContext.memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
- interpreter.intPool.put(memOffset, codeOffset, length)
return nil, nil
}
// opExtCodeHash returns the code hash of a specified account.
// There are several cases when the function is called, while we can relay everything
// to `state.GetCodeHash` function to ensure the correctness.
-// (1) Caller tries to get the code hash of a normal contract account, state
+// (1) Caller tries to get the code hash of a normal callContext.contract account, state
// should return the relative code hash and set it as the result.
//
// (2) Caller tries to get the code hash of a non-existent account, state should
// return common.Hash{} and zero will be set as the result.
//
-// (3) Caller tries to get the code hash for an account without contract code,
+// (3) Caller tries to get the code hash for an account without callContext.contract code,
// state should return emptyCodeHash(0xc5d246...) as the result.
//
// (4) Caller tries to get the code hash of a precompiled account, the result
@@ -542,340 +419,401 @@ func opExtCodeCopy(pc *uint64, interpreter *CVMInterpreter, contract *Contract,
//
// (6) Caller tries to get the code hash for an account which is marked as deleted,
// this account should be regarded as a non-existent account and zero should be returned.
-func opExtCodeHash(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- slot := stack.peek()
- address := common.BigToAddress(slot)
+func opExtCodeHash(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ slot := callContext.stack.peek()
+ address := common.Address(slot.Bytes22())
if interpreter.cvm.StateDB.Empty(address) {
- slot.SetUint64(0)
+ slot.Clear()
} else {
slot.SetBytes(interpreter.cvm.StateDB.GetCodeHash(address).Bytes())
}
return nil, nil
}
-func opEnergyprice(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().Set(interpreter.cvm.EnergyPrice))
+func opEnergyprice(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ v, _ := uint256.FromBig(interpreter.cvm.EnergyPrice)
+ callContext.stack.push(v)
return nil, nil
}
-func opBlockhash(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- num := stack.pop()
-
- n := interpreter.intPool.get().Sub(interpreter.cvm.BlockNumber, common.Big257)
- if num.Cmp(n) > 0 && num.Cmp(interpreter.cvm.BlockNumber) < 0 {
- stack.push(interpreter.cvm.GetHash(num.Uint64()).Big())
+func opBlockhash(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ num := callContext.stack.peek()
+ num64, overflow := num.Uint64WithOverflow()
+ if overflow {
+ num.Clear()
+ return nil, nil
+ }
+ var upper, lower uint64
+ upper = interpreter.cvm.BlockNumber.Uint64()
+ if upper < 257 {
+ lower = 0
+ } else {
+ lower = upper - 256
+ }
+ if num64 >= lower && num64 < upper {
+ num.SetBytes(interpreter.cvm.GetHash(num64).Bytes())
} else {
- stack.push(interpreter.intPool.getZero())
+ num.Clear()
}
- interpreter.intPool.put(num, n)
return nil, nil
}
-func opCoinbase(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetBytes(interpreter.cvm.Coinbase.Bytes()))
+func opCoinbase(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetBytes(interpreter.cvm.Coinbase.Bytes()))
return nil, nil
}
-func opTimestamp(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(math.U256(interpreter.intPool.get().Set(interpreter.cvm.Time)))
+func opTimestamp(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ v, _ := uint256.FromBig(interpreter.cvm.Time)
+ callContext.stack.push(v)
return nil, nil
}
-func opNumber(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(math.U256(interpreter.intPool.get().Set(interpreter.cvm.BlockNumber)))
+func opNumber(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ v, _ := uint256.FromBig(interpreter.cvm.BlockNumber)
+ callContext.stack.push(v)
return nil, nil
}
-func opDifficulty(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(math.U256(interpreter.intPool.get().Set(interpreter.cvm.Difficulty)))
+func opDifficulty(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ v, _ := uint256.FromBig(interpreter.cvm.Difficulty)
+ callContext.stack.push(v)
return nil, nil
}
-func opEnergyLimit(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(math.U256(interpreter.intPool.get().SetUint64(interpreter.cvm.EnergyLimit)))
+func opEnergyLimit(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetUint64(interpreter.cvm.EnergyLimit))
return nil, nil
}
-func opPop(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- interpreter.intPool.put(stack.pop())
+func opPop(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.pop()
return nil, nil
}
-func opMload(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- v := stack.peek()
- offset := v.Int64()
- v.SetBytes(memory.GetPtr(offset, 32))
+func opMload(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ v := callContext.stack.peek()
+ offset := int64(v.Uint64())
+ v.SetBytes(callContext.memory.GetPtr(offset, 32))
return nil, nil
}
-func opMstore(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- // pop value of the stack
- mStart, val := stack.pop(), stack.pop()
- memory.Set32(mStart.Uint64(), val)
-
- interpreter.intPool.put(mStart, val)
+func opMstore(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ // pop value of the callContext.stack
+ mStart, val := callContext.stack.pop(), callContext.stack.pop()
+ callContext.memory.Set32(mStart.Uint64(), &val)
return nil, nil
}
-func opMstore8(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- off, val := stack.pop().Int64(), stack.pop().Int64()
- memory.store[off] = byte(val & 0xff)
-
+func opMstore8(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ off, val := callContext.stack.pop(), callContext.stack.pop()
+ callContext.memory.store[off.Uint64()] = byte(val.Uint64())
return nil, nil
}
-func opSload(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- loc := stack.peek()
- val := interpreter.cvm.StateDB.GetState(contract.Address(), common.BigToHash(loc))
+func opSload(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ loc := callContext.stack.peek()
+ hash := common.Hash(loc.Bytes32())
+ val := interpreter.cvm.StateDB.GetState(callContext.contract.Address(), hash)
loc.SetBytes(val.Bytes())
return nil, nil
}
-func opSstore(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- loc := common.BigToHash(stack.pop())
- val := stack.pop()
- interpreter.cvm.StateDB.SetState(contract.Address(), loc, common.BigToHash(val))
-
- interpreter.intPool.put(val)
+func opSstore(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ loc := callContext.stack.pop()
+ val := callContext.stack.pop()
+ interpreter.cvm.StateDB.SetState(callContext.contract.Address(),
+ common.Hash(loc.Bytes32()), common.Hash(val.Bytes32()))
return nil, nil
}
-func opJump(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- pos := stack.pop()
- if !contract.validJumpdest(pos) {
- return nil, errInvalidJump
+func opJump(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ pos := callContext.stack.pop()
+ if !callContext.contract.validJumpdest(&pos) {
+ return nil, ErrInvalidJump
}
*pc = pos.Uint64()
- interpreter.intPool.put(pos)
return nil, nil
}
-func opJumpi(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- pos, cond := stack.pop(), stack.pop()
- if cond.Sign() != 0 {
- if !contract.validJumpdest(pos) {
- return nil, errInvalidJump
+func opJumpi(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ pos, cond := callContext.stack.pop(), callContext.stack.pop()
+ if !cond.IsZero() {
+ if !callContext.contract.validJumpdest(&pos) {
+ return nil, ErrInvalidJump
}
*pc = pos.Uint64()
} else {
*pc++
}
- interpreter.intPool.put(pos, cond)
return nil, nil
}
-func opJumpdest(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opJumpdest(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
return nil, nil
}
-func opPc(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetUint64(*pc))
+func opBeginSub(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ return nil, ErrInvalidSubroutineEntry
+}
+
+func opJumpSub(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ if len(callContext.rstack.data) >= 1023 {
+ return nil, ErrReturnStackExceeded
+ }
+ pos := callContext.stack.pop()
+ if !pos.IsUint64() {
+ return nil, ErrInvalidJump
+ }
+ posU64 := pos.Uint64()
+ if !callContext.contract.validJumpSubdest(posU64) {
+ return nil, ErrInvalidJump
+ }
+ callContext.rstack.push(uint32(*pc))
+ *pc = posU64 + 1
+ return nil, nil
+}
+
+func opReturnSub(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ if len(callContext.rstack.data) == 0 {
+ return nil, ErrInvalidRetsub
+ }
+ // Other than the check that the return stack is not empty, there is no
+ // need to validate the pc from 'returns', since we only ever push valid
+ //values onto it via jumpsub.
+ *pc = uint64(callContext.rstack.pop()) + 1
+ return nil, nil
+}
+
+func opPc(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetUint64(*pc))
return nil, nil
}
-func opMsize(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetInt64(int64(memory.Len())))
+func opMsize(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetUint64(uint64(callContext.memory.Len())))
return nil, nil
}
-func opEnergy(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(interpreter.intPool.get().SetUint64(contract.Energy))
+func opEnergy(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.push(new(uint256.Int).SetUint64(callContext.contract.Energy))
return nil, nil
}
-func opCreate(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCreate(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
var (
- value = stack.pop()
- offset, size = stack.pop(), stack.pop()
- input = memory.GetCopy(offset.Int64(), size.Int64())
- energy = contract.Energy
+ value = callContext.stack.pop()
+ offset, size = callContext.stack.pop(), callContext.stack.pop()
+ input = callContext.memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
+ energy = callContext.contract.Energy
)
energy -= energy / 64
+ // reuse size int for callContext.stackvalue
+ stackvalue := size
- contract.UseEnergy(energy)
- res, addr, returnEnergy, suberr := interpreter.cvm.Create(contract, input, energy, value)
- // Push item on the stack based on the returned error. We must
+ callContext.contract.UseEnergy(energy)
+ //TODO: use uint256.Int instead of converting with toBig()
+ var bigVal = big0
+ if !value.IsZero() {
+ bigVal = value.ToBig()
+ }
+
+ res, addr, returnEnergy, suberr := interpreter.cvm.Create(callContext.contract, input, energy, bigVal)
+ // Push item on the callContext.stack based on the returned error. We must
// ignore this error and pretend the operation was successful.
if suberr == ErrCodeStoreOutOfEnergy {
- stack.push(interpreter.intPool.getZero())
+ stackvalue.Clear()
} else if suberr != nil && suberr != ErrCodeStoreOutOfEnergy {
- stack.push(interpreter.intPool.getZero())
+ stackvalue.Clear()
} else {
- stack.push(interpreter.intPool.get().SetBytes(addr.Bytes()))
+ stackvalue.SetBytes(addr.Bytes())
}
- contract.Energy += returnEnergy
- interpreter.intPool.put(value, offset, size)
+ callContext.stack.push(&stackvalue)
+ callContext.contract.Energy += returnEnergy
- if suberr == errExecutionReverted {
+ if suberr == ErrExecutionReverted {
return res, nil
}
return nil, nil
}
-func opCreate2(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCreate2(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
var (
- endowment = stack.pop()
- offset, size = stack.pop(), stack.pop()
- salt = stack.pop()
- input = memory.GetCopy(offset.Int64(), size.Int64())
- energy = contract.Energy
+ endowment = callContext.stack.pop()
+ offset, size = callContext.stack.pop(), callContext.stack.pop()
+ salt = callContext.stack.pop()
+ input = callContext.memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
+ energy = callContext.contract.Energy
)
// Apply CIP150
energy -= energy / 64
- contract.UseEnergy(energy)
- res, addr, returnEnergy, suberr := interpreter.cvm.Create2(contract, input, energy, endowment, salt)
- // Push item on the stack based on the returned error.
+ callContext.contract.UseEnergy(energy)
+ // reuse size int for callContext.stackvalue
+ stackvalue := size
+ //TODO: use uint256.Int instead of converting with toBig()
+ bigEndowment := big0
+ if !endowment.IsZero() {
+ bigEndowment = endowment.ToBig()
+ }
+ res, addr, returnEnergy, suberr := interpreter.cvm.Create2(callContext.contract, input, energy,
+ bigEndowment, &salt)
+ // Push item on the callContext.stack based on the returned error.
if suberr != nil {
- stack.push(interpreter.intPool.getZero())
+ stackvalue.Clear()
} else {
- stack.push(interpreter.intPool.get().SetBytes(addr.Bytes()))
+ stackvalue.SetBytes(addr.Bytes())
}
- contract.Energy += returnEnergy
- interpreter.intPool.put(endowment, offset, size, salt)
+ callContext.stack.push(&stackvalue)
+ callContext.contract.Energy += returnEnergy
- if suberr == errExecutionReverted {
+ if suberr == ErrExecutionReverted {
return res, nil
}
return nil, nil
}
-func opCall(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCall(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
// Pop energy. The actual energy in interpreter.cvm.callEnergyTemp.
- interpreter.intPool.put(stack.pop())
+ // We can use this as a temporary value
+ temp := callContext.stack.pop()
energy := interpreter.cvm.callEnergyTemp
// Pop other call parameters.
- addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
- toAddr := common.BigToAddress(addr)
- value = math.U256(value)
- // Get the arguments from the memory.
- args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
-
- if value.Sign() != 0 {
+ addr, value, inOffset, inSize, retOffset, retSize := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop()
+ toAddr := common.Address(addr.Bytes22())
+ // Get the arguments from the callContext.memory.
+ args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
+
+ var bigVal = big0
+ //TODO: use uint256.Int instead of converting with toBig()
+ // By using big0 here, we save an alloc for the most common case (non-ether-transferring contract calls),
+ // but it would make more sense to extend the usage of uint256.Int
+ if !value.IsZero() {
energy += params.CallStipend
+ bigVal = value.ToBig()
}
- ret, returnEnergy, err := interpreter.cvm.Call(contract, toAddr, args, energy, value)
+
+ ret, returnEnergy, err := interpreter.cvm.Call(callContext.contract, toAddr, args, energy, bigVal)
if err != nil {
- stack.push(interpreter.intPool.getZero())
+ temp.Clear()
} else {
- stack.push(interpreter.intPool.get().SetUint64(1))
+ temp.SetOne()
}
- if err == nil || err == errExecutionReverted {
- memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ callContext.stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
+ callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- contract.Energy += returnEnergy
-
- interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize)
+ callContext.contract.Energy += returnEnergy
return ret, nil
}
-func opCallCode(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCallCode(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
// Pop energy. The actual energy is in interpreter.cvm.callEnergyTemp.
- interpreter.intPool.put(stack.pop())
+ // We use it as a temporary value
+ temp := callContext.stack.pop()
energy := interpreter.cvm.callEnergyTemp
// Pop other call parameters.
- addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
- toAddr := common.BigToAddress(addr)
- value = math.U256(value)
- // Get arguments from the memory.
- args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
-
- if value.Sign() != 0 {
+ addr, value, inOffset, inSize, retOffset, retSize := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop()
+ toAddr := common.Address(addr.Bytes22())
+ // Get arguments from the callContext.memory.
+ args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
+
+ //TODO: use uint256.Int instead of converting with toBig()
+ var bigVal = big0
+ if !value.IsZero() {
energy += params.CallStipend
+ bigVal = value.ToBig()
}
- ret, returnEnergy, err := interpreter.cvm.CallCode(contract, toAddr, args, energy, value)
+
+ ret, returnEnergy, err := interpreter.cvm.CallCode(callContext.contract, toAddr, args, energy, bigVal)
if err != nil {
- stack.push(interpreter.intPool.getZero())
+ temp.Clear()
} else {
- stack.push(interpreter.intPool.get().SetUint64(1))
+ temp.SetOne()
}
- if err == nil || err == errExecutionReverted {
- memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ callContext.stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
+ callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- contract.Energy += returnEnergy
-
- interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize)
+ callContext.contract.Energy += returnEnergy
return ret, nil
}
-func opDelegateCall(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opDelegateCall(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
// Pop energy. The actual energy is in interpreter.cvm.callEnergyTemp.
- interpreter.intPool.put(stack.pop())
+ // We use it as a temporary value
+ temp := callContext.stack.pop()
energy := interpreter.cvm.callEnergyTemp
// Pop other call parameters.
- addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
- toAddr := common.BigToAddress(addr)
- // Get arguments from the memory.
- args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
+ addr, inOffset, inSize, retOffset, retSize := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop()
+ toAddr := common.Address(addr.Bytes22())
+ // Get arguments from the callContext.memory.
+ args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
- ret, returnEnergy, err := interpreter.cvm.DelegateCall(contract, toAddr, args, energy)
+ ret, returnEnergy, err := interpreter.cvm.DelegateCall(callContext.contract, toAddr, args, energy)
if err != nil {
- stack.push(interpreter.intPool.getZero())
+ temp.Clear()
} else {
- stack.push(interpreter.intPool.get().SetUint64(1))
+ temp.SetOne()
}
- if err == nil || err == errExecutionReverted {
- memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ callContext.stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
+ callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- contract.Energy += returnEnergy
-
- interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize)
+ callContext.contract.Energy += returnEnergy
return ret, nil
}
-func opStaticCall(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opStaticCall(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
// Pop energy. The actual energy is in interpreter.cvm.callEnergyTemp.
- interpreter.intPool.put(stack.pop())
+ // We use it as a temporary value
+ temp := callContext.stack.pop()
energy := interpreter.cvm.callEnergyTemp
// Pop other call parameters.
- addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
- toAddr := common.BigToAddress(addr)
- // Get arguments from the memory.
- args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
+ addr, inOffset, inSize, retOffset, retSize := callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop(), callContext.stack.pop()
+ toAddr := common.Address(addr.Bytes22())
+ // Get arguments from the callContext.memory.
+ args := callContext.memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
- ret, returnEnergy, err := interpreter.cvm.StaticCall(contract, toAddr, args, energy)
+ ret, returnEnergy, err := interpreter.cvm.StaticCall(callContext.contract, toAddr, args, energy)
if err != nil {
- stack.push(interpreter.intPool.getZero())
+ temp.Clear()
} else {
- stack.push(interpreter.intPool.get().SetUint64(1))
+ temp.SetOne()
}
- if err == nil || err == errExecutionReverted {
- memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ callContext.stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
+ callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- contract.Energy += returnEnergy
-
- interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize)
+ callContext.contract.Energy += returnEnergy
return ret, nil
}
-func opReturn(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- offset, size := stack.pop(), stack.pop()
- ret := memory.GetPtr(offset.Int64(), size.Int64())
-
- interpreter.intPool.put(offset, size)
+func opReturn(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ offset, size := callContext.stack.pop(), callContext.stack.pop()
+ ret := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
return ret, nil
}
-func opRevert(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- offset, size := stack.pop(), stack.pop()
- ret := memory.GetPtr(offset.Int64(), size.Int64())
-
- interpreter.intPool.put(offset, size)
+func opRevert(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ offset, size := callContext.stack.pop(), callContext.stack.pop()
+ ret := callContext.memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
return ret, nil
}
-func opStop(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opStop(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
return nil, nil
}
-func opSuicide(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- balance := interpreter.cvm.StateDB.GetBalance(contract.Address())
- interpreter.cvm.StateDB.AddBalance(common.BigToAddress(stack.pop()), balance)
+func opSuicide(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ beneficiary := callContext.stack.pop()
+ balance := interpreter.cvm.StateDB.GetBalance(callContext.contract.Address())
+ interpreter.cvm.StateDB.AddBalance(common.Address(beneficiary.Bytes22()), balance)
- interpreter.cvm.StateDB.Suicide(contract.Address())
+ interpreter.cvm.StateDB.Suicide(callContext.contract.Address())
return nil, nil
}
@@ -883,16 +821,17 @@ func opSuicide(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memo
// make log instruction function
func makeLog(size int) executionFunc {
- return func(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+ return func(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
topics := make([]common.Hash, size)
- mStart, mSize := stack.pop(), stack.pop()
+ mStart, mSize := callContext.stack.pop(), callContext.stack.pop()
for i := 0; i < size; i++ {
- topics[i] = common.BigToHash(stack.pop())
+ addr := callContext.stack.pop()
+ topics[i] = common.Hash(addr.Bytes32())
}
- d := memory.GetCopy(mStart.Int64(), mSize.Int64())
+ d := callContext.memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64()))
interpreter.cvm.StateDB.AddLog(&types.Log{
- Address: contract.Address(),
+ Address: callContext.contract.Address(),
Topics: topics,
Data: d,
// This is a non-consensus field, but assigned here because
@@ -900,30 +839,29 @@ func makeLog(size int) executionFunc {
BlockNumber: interpreter.cvm.BlockNumber.Uint64(),
})
- interpreter.intPool.put(mStart, mSize)
return nil, nil
}
}
// opPush1 is a specialized version of pushN
-func opPush1(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opPush1(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
var (
- codeLen = uint64(len(contract.Code))
- integer = interpreter.intPool.get()
+ codeLen = uint64(len(callContext.contract.Code))
+ integer = new(uint256.Int)
)
*pc += 1
if *pc < codeLen {
- stack.push(integer.SetUint64(uint64(contract.Code[*pc])))
+ callContext.stack.push(integer.SetUint64(uint64(callContext.contract.Code[*pc])))
} else {
- stack.push(integer.SetUint64(0))
+ callContext.stack.push(integer.Clear())
}
return nil, nil
}
// make push instruction function
func makePush(size uint64, pushByteSize int) executionFunc {
- return func(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- codeLen := len(contract.Code)
+ return func(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ codeLen := len(callContext.contract.Code)
startMin := codeLen
if int(*pc+1) < startMin {
@@ -935,8 +873,9 @@ func makePush(size uint64, pushByteSize int) executionFunc {
endMin = startMin + pushByteSize
}
- integer := interpreter.intPool.get()
- stack.push(integer.SetBytes(common.RightPadBytes(contract.Code[startMin:endMin], pushByteSize)))
+ integer := new(uint256.Int)
+ callContext.stack.push(integer.SetBytes(common.RightPadBytes(
+ callContext.contract.Code[startMin:endMin], pushByteSize)))
*pc += size
return nil, nil
@@ -945,8 +884,8 @@ func makePush(size uint64, pushByteSize int) executionFunc {
// make dup instruction function
func makeDup(size int64) executionFunc {
- return func(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.dup(interpreter.intPool, int(size))
+ return func(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.dup(int(size))
return nil, nil
}
}
@@ -955,8 +894,8 @@ func makeDup(size int64) executionFunc {
func makeSwap(size int64) executionFunc {
// switch n + 1 otherwise n would be swapped with n
size++
- return func(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.swap(int(size))
+ return func(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error) {
+ callContext.stack.swap(int(size))
return nil, nil
}
}
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index 14301c537..1c70ac314 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -20,8 +20,8 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "github.com/core-coin/uint256"
"io/ioutil"
- "math/big"
"testing"
"github.com/core-coin/go-core/common"
@@ -94,45 +94,26 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu
var (
env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
stack = newstack()
+ rstack = newReturnStack()
pc = uint64(0)
cvmInterpreter = env.interpreter.(*CVMInterpreter)
)
- // Stuff a couple of nonzero bigints into pool, to ensure that ops do not rely on pooled integers to be zero
- cvmInterpreter.intPool = poolOfIntPools.get()
- cvmInterpreter.intPool.put(big.NewInt(-1337))
- cvmInterpreter.intPool.put(big.NewInt(-1337))
- cvmInterpreter.intPool.put(big.NewInt(-1337))
-
for i, test := range tests {
- x := new(big.Int).SetBytes(common.Hex2Bytes(test.X))
- y := new(big.Int).SetBytes(common.Hex2Bytes(test.Y))
- expected := new(big.Int).SetBytes(common.Hex2Bytes(test.Expected))
+ x := new(uint256.Int).SetBytes(common.Hex2Bytes(test.X))
+ y := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Y))
+ expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected))
stack.push(x)
stack.push(y)
- opFn(&pc, cvmInterpreter, nil, nil, stack)
+ opFn(&pc, cvmInterpreter, &callCtx{nil, stack, rstack, nil})
+ if len(stack.data) != 1 {
+ t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data))
+ }
actual := stack.pop()
if actual.Cmp(expected) != 0 {
t.Errorf("Testcase %v %d, %v(%x, %x): expected %x, got %x", name, i, name, x, y, expected, actual)
}
- // Check pool usage
- // 1.pool is not allowed to contain anything on the stack
- // 2.pool is not allowed to contain the same pointers twice
- if cvmInterpreter.intPool.pool.len() > 0 {
-
- poolvals := make(map[*big.Int]struct{})
- poolvals[actual] = struct{}{}
-
- for cvmInterpreter.intPool.pool.len() > 0 {
- key := cvmInterpreter.intPool.get()
- if _, exist := poolvals[key]; exist {
- t.Errorf("Testcase %v %d, pool contains double-entry", name, i)
- }
- poolvals[key] = struct{}{}
- }
- }
}
- poolOfIntPools.put(cvmInterpreter.intPool)
}
func TestByteOp(t *testing.T) {
@@ -166,6 +147,45 @@ func TestSHL(t *testing.T) {
testTwoOperandOp(t, tests, opSHL, "shl")
}
+func TestAddMod(t *testing.T) {
+ var (
+ env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
+ stack = newstack()
+ rstack = newReturnStack()
+ cvmInterpreter = NewCVMInterpreter(env, env.vmConfig)
+ pc = uint64(0)
+ )
+ tests := []struct {
+ x string
+ y string
+ z string
+ expected string
+ }{
+ {"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
+ },
+ }
+ // x + y = 0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd
+ // in 256 bit repr, fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd
+
+ for i, test := range tests {
+ x := new(uint256.Int).SetBytes(common.Hex2Bytes(test.x))
+ y := new(uint256.Int).SetBytes(common.Hex2Bytes(test.y))
+ z := new(uint256.Int).SetBytes(common.Hex2Bytes(test.z))
+ expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.expected))
+ stack.push(z)
+ stack.push(y)
+ stack.push(x)
+ opAddmod(&pc, cvmInterpreter, &callCtx{nil, stack, rstack, nil})
+ actual := stack.pop()
+ if actual.Cmp(expected) != 0 {
+ t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual)
+ }
+ }
+}
+
func TestSHR(t *testing.T) {
// Testcases from https://github.com/core-coin/CIPs/blob/master/CIPS/cip-145.md#shr-logical-shift-right
tests := []TwoOperandTestcase{
@@ -211,19 +231,18 @@ func TestSAR(t *testing.T) {
// getResult is a convenience function to generate the expected values
func getResult(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcase {
var (
- env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
- stack = newstack()
- pc = uint64(0)
- interpreter = env.interpreter.(*CVMInterpreter)
+ env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
+ stack, rstack = newstack(), newReturnStack()
+ pc = uint64(0)
+ interpreter = env.interpreter.(*CVMInterpreter)
)
- interpreter.intPool = poolOfIntPools.get()
result := make([]TwoOperandTestcase, len(args))
for i, param := range args {
- x := new(big.Int).SetBytes(common.Hex2Bytes(param.x))
- y := new(big.Int).SetBytes(common.Hex2Bytes(param.y))
+ x := new(uint256.Int).SetBytes(common.Hex2Bytes(param.x))
+ y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y))
stack.push(x)
stack.push(y)
- opFn(&pc, interpreter, nil, nil, stack)
+ opFn(&pc, interpreter, &callCtx{nil, stack, rstack, nil})
actual := stack.pop()
result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)}
}
@@ -260,15 +279,14 @@ func TestJsonTestcases(t *testing.T) {
}
}
-func opBenchmark(bench *testing.B, op func(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error), args ...string) {
+func opBenchmark(bench *testing.B, op executionFunc, args ...string) {
var (
env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
- stack = newstack()
+ stack, rstack = newstack(), newReturnStack()
cvmInterpreter = NewCVMInterpreter(env, env.vmConfig)
)
env.interpreter = cvmInterpreter
- cvmInterpreter.intPool = poolOfIntPools.get()
// convert args
byteArgs := make([][]byte, len(args))
for i, arg := range args {
@@ -278,13 +296,12 @@ func opBenchmark(bench *testing.B, op func(pc *uint64, interpreter *CVMInterpret
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
for _, arg := range byteArgs {
- a := new(big.Int).SetBytes(arg)
- stack.push(a)
+ a := new(uint256.Int)
+ a.SetBytes(arg)
}
- op(&pc, cvmInterpreter, nil, nil, stack)
+ op(&pc, cvmInterpreter, &callCtx{nil, stack, rstack, nil})
stack.pop()
}
- poolOfIntPools.put(cvmInterpreter.intPool)
}
func BenchmarkOpAdd64(b *testing.B) {
@@ -498,71 +515,65 @@ func BenchmarkOpIsZero(b *testing.B) {
func TestOpMstore(t *testing.T) {
var (
env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
- stack = newstack()
+ stack, rstack = newstack(), newReturnStack()
mem = NewMemory()
cvmInterpreter = NewCVMInterpreter(env, env.vmConfig)
)
env.interpreter = cvmInterpreter
- cvmInterpreter.intPool = poolOfIntPools.get()
mem.Resize(64)
pc := uint64(0)
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
- stack.pushN(new(big.Int).SetBytes(common.Hex2Bytes(v)), big.NewInt(0))
- opMstore(&pc, cvmInterpreter, nil, mem, stack)
+ stack.pushN(*new(uint256.Int).SetBytes(common.Hex2Bytes(v)), *new(uint256.Int))
+ opMstore(&pc, cvmInterpreter, &callCtx{mem, stack, rstack, nil})
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
}
- stack.pushN(big.NewInt(0x1), big.NewInt(0))
- opMstore(&pc, cvmInterpreter, nil, mem, stack)
+ stack.pushN(*new(uint256.Int).SetUint64(0x1), *new(uint256.Int))
+ opMstore(&pc, cvmInterpreter, &callCtx{mem, stack, rstack, nil})
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
t.Fatalf("Mstore failed to overwrite previous value")
}
- poolOfIntPools.put(cvmInterpreter.intPool)
}
func BenchmarkOpMstore(bench *testing.B) {
var (
env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
- stack = newstack()
+ stack, rstack = newstack(), newReturnStack()
mem = NewMemory()
cvmInterpreter = NewCVMInterpreter(env, env.vmConfig)
)
env.interpreter = cvmInterpreter
- cvmInterpreter.intPool = poolOfIntPools.get()
mem.Resize(64)
pc := uint64(0)
- memStart := big.NewInt(0)
- value := big.NewInt(0x1337)
+ memStart := new(uint256.Int)
+ value := new(uint256.Int).SetUint64(0x1337)
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
- stack.pushN(value, memStart)
- opMstore(&pc, cvmInterpreter, nil, mem, stack)
+ stack.pushN(*value, *memStart)
+ opMstore(&pc, cvmInterpreter, &callCtx{mem, stack, rstack, nil})
}
- poolOfIntPools.put(cvmInterpreter.intPool)
}
func BenchmarkOpSHA3(bench *testing.B) {
var (
env = NewCVM(Context{}, nil, params.TestChainConfig, Config{})
- stack = newstack()
+ stack, rstack = newstack(), newReturnStack()
mem = NewMemory()
cvmInterpreter = NewCVMInterpreter(env, env.vmConfig)
)
env.interpreter = cvmInterpreter
- cvmInterpreter.intPool = poolOfIntPools.get()
mem.Resize(32)
pc := uint64(0)
- start := big.NewInt(0)
+ start := uint256.NewInt(0)
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
- stack.pushN(big.NewInt(32), start)
- opSha3(&pc, cvmInterpreter, nil, mem, stack)
+ stack.pushN(*uint256.NewInt(32), *start)
+ opSha3(&pc, cvmInterpreter, &callCtx{mem, stack, rstack, nil})
}
- poolOfIntPools.put(cvmInterpreter.intPool)
}
func TestCreate2Addreses(t *testing.T) { //TODO: TEST
diff --git a/core/vm/int_pool_verifier_empty.go b/core/vm/int_pool_verifier_empty.go
deleted file mode 100644
index f20ea3294..000000000
--- a/core/vm/int_pool_verifier_empty.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2017 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-// +build !VERIFY_CVM_INTEGER_POOL
-
-package vm
-
-const verifyPool = false
-
-func verifyIntegerPool(ip *intPool) {}
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 51a40ae5a..bfe782ea2 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -17,7 +17,6 @@
package vm
import (
- "fmt"
"hash"
"sync/atomic"
@@ -32,7 +31,7 @@ type Config struct {
NoRecursion bool // Disables call, callcode, delegate call and create
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
- JumpTable [256]operation // CVM instruction table, automatically populated if unset
+ JumpTable [256]*operation // CVM instruction table, automatically populated if unset
EWASMInterpreter string // External EWASM interpreter options
CVMInterpreter string // External CVM interpreter options
@@ -62,6 +61,15 @@ type Interpreter interface {
CanRun([]byte) bool
}
+// callCtx contains the things that are per-call, such as stack and memory,
+// but not transients like pc and energy
+type callCtx struct {
+ memory *Memory
+ stack *Stack
+ rstack *ReturnStack
+ contract *Contract
+}
+
// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports
// Read to get a variable amount of data from the hash state. Read is faster than Sum
// because it doesn't copy the internal state, but also modifies the internal state.
@@ -75,8 +83,6 @@ type CVMInterpreter struct {
cvm *CVM
cfg Config
- intPool *intPool
-
hasher keccakState // SHA3 hasher instance shared across opcodes
hasherBuf common.Hash // SHA3 hasher result array shared aross opcodes
@@ -89,7 +95,7 @@ func NewCVMInterpreter(cvm *CVM, cfg Config) *CVMInterpreter {
// We use the STOP instruction whether to see
// the jump table was initialised. If it was not
// we'll set the default jump table.
- if !cfg.JumpTable[STOP].valid {
+ if cfg.JumpTable[STOP] == nil {
var jt JumpTable
switch {
default:
@@ -109,16 +115,8 @@ func NewCVMInterpreter(cvm *CVM, cfg Config) *CVMInterpreter {
//
// It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-energy operation except for
-// errExecutionReverted which means revert-and-keep-energy-left.
+// ErrExecutionReverted which means revert-and-keep-energy-left.
func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
- if in.intPool == nil {
- in.intPool = poolOfIntPools.get()
- defer func() {
- poolOfIntPools.put(in.intPool)
- in.intPool = nil
- }()
- }
-
// Increment the call depth which is restricted to 1024
in.cvm.depth++
defer func() { in.cvm.depth-- }()
@@ -140,9 +138,16 @@ func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
}
var (
- op OpCode // current opcode
- mem = NewMemory() // bound memory
- stack = newstack() // local stack
+ op OpCode // current opcode
+ mem = NewMemory() // bound memory
+ stack = newstack() // local stack
+ returns = newReturnStack() // local returns stack
+ callContext = &callCtx{
+ memory: mem,
+ stack: stack,
+ rstack: returns,
+ contract: contract,
+ }
// For optimisation reason we're using uint64 as the program counter.
// It's theoretically possible to go above 2^64. The YP defines the PC
// to be uint256. Practically much less so feasible.
@@ -154,18 +159,22 @@ func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
logged bool // deferred Tracer should ignore already logged steps
res []byte // result of the opcode execution function
)
+ // Don't move this deferrred function, it's placed before the capturestate-deferred method,
+ // so that it get's executed _after_: the capturestate needs the stacks before
+ // they are returned to the pools
+ defer func() {
+ returnStack(stack)
+ returnRStack(returns)
+ }()
contract.Input = input
- // Reclaim the stack as an int pool when the execution stops
- defer func() { in.intPool.put(stack.data...) }()
-
if in.cfg.Debug {
defer func() {
if err != nil {
if !logged {
- in.cfg.Tracer.CaptureState(in.cvm, pcCopy, op, energyCopy, cost, mem, stack, contract, in.cvm.depth, err)
+ in.cfg.Tracer.CaptureState(in.cvm, pcCopy, op, energyCopy, cost, mem, stack, returns, in.returnData, contract, in.cvm.depth, err)
} else {
- in.cfg.Tracer.CaptureFault(in.cvm, pcCopy, op, energyCopy, cost, mem, stack, contract, in.cvm.depth, err)
+ in.cfg.Tracer.CaptureFault(in.cvm, pcCopy, op, energyCopy, cost, mem, stack, returns, contract, in.cvm.depth, err)
}
}
}()
@@ -174,7 +183,12 @@ func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
// the execution of one of the operations or until the done flag is set by the
// parent context.
- for atomic.LoadInt32(&in.cvm.abort) == 0 {
+ steps := 0
+ for {
+ steps++
+ if steps%1000 == 0 && atomic.LoadInt32(&in.cvm.abort) != 0 {
+ break
+ }
if in.cfg.Debug {
// Capture pre-execution values for tracing.
logged, pcCopy, energyCopy = false, pc, contract.Energy
@@ -184,14 +198,14 @@ func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// enough stack items available to perform the operation.
op = contract.GetOp(pc)
operation := in.cfg.JumpTable[op]
- if !operation.valid {
- return nil, fmt.Errorf("invalid opcode 0x%x", int(op))
+ if operation == nil {
+ return nil, &ErrInvalidOpCode{opcode: op}
}
// Validate stack
if sLen := stack.len(); sLen < operation.minStack {
- return nil, fmt.Errorf("stack underflow (%d <=> %d)", sLen, operation.minStack)
+ return nil, &ErrStackUnderflow{stackLen: sLen, required: operation.minStack}
} else if sLen > operation.maxStack {
- return nil, fmt.Errorf("stack limit reached %d (%d)", sLen, operation.maxStack)
+ return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
}
// If the operation is valid, enforce and write restrictions
if in.readOnly {
@@ -201,7 +215,7 @@ func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// account to the others means the state is modified and should also
// return with an error.
if operation.writes || (op == CALL && stack.Back(2).Sign() != 0) {
- return nil, errWriteProtection
+ return nil, ErrWriteProtection
}
}
// Static portion of energy
@@ -218,12 +232,12 @@ func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if operation.memorySize != nil {
memSize, overflow := operation.memorySize(stack)
if overflow {
- return nil, errEnergyUintOverflow
+ return nil, ErrEnergyUintOverflow
}
// memory is expanded in words of 32 bytes. Energy
// is also calculated in words.
if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow {
- return nil, errEnergyUintOverflow
+ return nil, ErrEnergyUintOverflow
}
}
// Dynamic portion of energy
@@ -242,28 +256,23 @@ func (in *CVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
}
if in.cfg.Debug {
- in.cfg.Tracer.CaptureState(in.cvm, pc, op, energyCopy, cost, mem, stack, contract, in.cvm.depth, err)
+ in.cfg.Tracer.CaptureState(in.cvm, pc, op, energyCopy, cost, mem, stack, returns, in.returnData, contract, in.cvm.depth, err)
logged = true
}
// execute the operation
- res, err = operation.execute(&pc, in, contract, mem, stack)
- // verifyPool is a build flag. Pool verification makes sure the integrity
- // of the integer pool by comparing values to a default value.
- if verifyPool {
- verifyIntegerPool(in.intPool)
- }
+ res, err = operation.execute(&pc, in, callContext)
// if the operation clears the return data (e.g. it has returning data)
// set the last return to the result of the operation.
if operation.returns {
- in.returnData = res
+ in.returnData = common.CopyBytes(res)
}
switch {
case err != nil:
return nil, err
case operation.reverts:
- return res, errExecutionReverted
+ return res, ErrExecutionReverted
case operation.halts:
return res, nil
case !operation.jumps:
diff --git a/core/vm/intpool.go b/core/vm/intpool.go
deleted file mode 100644
index e6f5ccc5e..000000000
--- a/core/vm/intpool.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2017 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package vm
-
-import (
- "math/big"
- "sync"
-)
-
-var checkVal = big.NewInt(-42)
-
-const poolLimit = 256
-
-// intPool is a pool of big integers that
-// can be reused for all big.Int operations.
-type intPool struct {
- pool *Stack
-}
-
-func newIntPool() *intPool {
- return &intPool{pool: newstack()}
-}
-
-// get retrieves a big int from the pool, allocating one if the pool is empty.
-// Note, the returned int's value is arbitrary and will not be zeroed!
-func (p *intPool) get() *big.Int {
- if p.pool.len() > 0 {
- return p.pool.pop()
- }
- return new(big.Int)
-}
-
-// getZero retrieves a big int from the pool, setting it to zero or allocating
-// a new one if the pool is empty.
-func (p *intPool) getZero() *big.Int {
- if p.pool.len() > 0 {
- return p.pool.pop().SetUint64(0)
- }
- return new(big.Int)
-}
-
-// put returns an allocated big int to the pool to be later reused by get calls.
-// Note, the values as saved as is; neither put nor get zeroes the ints out!
-func (p *intPool) put(is ...*big.Int) {
- if len(p.pool.data) > poolLimit {
- return
- }
- for _, i := range is {
- // verifyPool is a build flag. Pool verification makes sure the integrity
- // of the integer pool by comparing values to a default value.
- if verifyPool {
- i.Set(checkVal)
- }
- p.pool.push(i)
- }
-}
-
-// The intPool pool's default capacity
-const poolDefaultCap = 25
-
-// intPoolPool manages a pool of intPools.
-type intPoolPool struct {
- pools []*intPool
- lock sync.Mutex
-}
-
-var poolOfIntPools = &intPoolPool{
- pools: make([]*intPool, 0, poolDefaultCap),
-}
-
-// get is looking for an available pool to return.
-func (ipp *intPoolPool) get() *intPool {
- ipp.lock.Lock()
- defer ipp.lock.Unlock()
-
- if len(poolOfIntPools.pools) > 0 {
- ip := ipp.pools[len(ipp.pools)-1]
- ipp.pools = ipp.pools[:len(ipp.pools)-1]
- return ip
- }
- return newIntPool()
-}
-
-// put a pool that has been allocated with get.
-func (ipp *intPoolPool) put(ip *intPool) {
- ipp.lock.Lock()
- defer ipp.lock.Unlock()
-
- if len(ipp.pools) < cap(ipp.pools) {
- ipp.pools = append(ipp.pools, ip)
- }
-}
diff --git a/core/vm/intpool_test.go b/core/vm/intpool_test.go
deleted file mode 100644
index 31b85eebd..000000000
--- a/core/vm/intpool_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package vm
-
-import (
- "testing"
-)
-
-func TestIntPoolPoolGet(t *testing.T) {
- poolOfIntPools.pools = make([]*intPool, 0, poolDefaultCap)
-
- nip := poolOfIntPools.get()
- if nip == nil {
- t.Fatalf("Invalid pool allocation")
- }
-}
-
-func TestIntPoolPoolPut(t *testing.T) {
- poolOfIntPools.pools = make([]*intPool, 0, poolDefaultCap)
-
- nip := poolOfIntPools.get()
- if len(poolOfIntPools.pools) != 0 {
- t.Fatalf("Pool got added to list when none should have been")
- }
-
- poolOfIntPools.put(nip)
- if len(poolOfIntPools.pools) == 0 {
- t.Fatalf("Pool did not get added to list when one should have been")
- }
-}
-
-func TestIntPoolPoolReUse(t *testing.T) {
- poolOfIntPools.pools = make([]*intPool, 0, poolDefaultCap)
- nip := poolOfIntPools.get()
- poolOfIntPools.put(nip)
- poolOfIntPools.get()
-
- if len(poolOfIntPools.pools) != 0 {
- t.Fatalf("Invalid number of pools. Got %d, expected %d", len(poolOfIntPools.pools), 0)
- }
-}
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 51615af21..a6e36aac3 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -17,20 +17,16 @@
package vm
import (
- "errors"
-
"github.com/core-coin/go-core/params"
)
type (
- executionFunc func(pc *uint64, interpreter *CVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error)
+ executionFunc func(pc *uint64, interpreter *CVMInterpreter, callContext *callCtx) ([]byte, error)
energyFunc func(*CVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64
// memorySizeFunc returns the required size, and whether the operation overflowed a uint64
memorySizeFunc func(*Stack) (size uint64, overflow bool)
)
-var errEnergyUintOverflow = errors.New("energy uint64 overflow")
-
type operation struct {
// execute is the operation function
execute executionFunc
@@ -48,7 +44,6 @@ type operation struct {
halts bool // indicates whether the operation should halt further execution
jumps bool // indicates whether the program counter should not increment
writes bool // determines whether this a state modifying operation
- valid bool // indication whether the retrieved operation is valid and known
reverts bool // determines whether the operation reverts state (implicitly halts)
returns bool // determines whether the operations sets the return data content
}
@@ -58,7 +53,7 @@ var (
)
// JumpTable contains the CVM opcodes supported at a given fork.
-type JumpTable [256]operation
+type JumpTable [256]*operation
// newInstructionSet returns the instructions.
func newInstructionSet() JumpTable {
@@ -69,161 +64,138 @@ func newInstructionSet() JumpTable {
minStack: minStack(0, 0),
maxStack: maxStack(0, 0),
halts: true,
- valid: true,
},
ADD: {
execute: opAdd,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
MUL: {
execute: opMul,
constantEnergy: EnergyFastStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SUB: {
execute: opSub,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
DIV: {
execute: opDiv,
constantEnergy: EnergyFastStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SDIV: {
execute: opSdiv,
constantEnergy: EnergyFastStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
MOD: {
execute: opMod,
constantEnergy: EnergyFastStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SMOD: {
execute: opSmod,
constantEnergy: EnergyFastStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
ADDMOD: {
execute: opAddmod,
constantEnergy: EnergyMidStep,
minStack: minStack(3, 1),
maxStack: maxStack(3, 1),
- valid: true,
},
MULMOD: {
execute: opMulmod,
constantEnergy: EnergyMidStep,
minStack: minStack(3, 1),
maxStack: maxStack(3, 1),
- valid: true,
},
EXP: {
execute: opExp,
dynamicEnergy: energyExp,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SIGNEXTEND: {
execute: opSignExtend,
constantEnergy: EnergyFastStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
LT: {
execute: opLt,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
GT: {
execute: opGt,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SLT: {
execute: opSlt,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SGT: {
execute: opSgt,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
EQ: {
execute: opEq,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
ISZERO: {
execute: opIszero,
constantEnergy: EnergyFastestStep,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
AND: {
execute: opAnd,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
XOR: {
execute: opXor,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
OR: {
execute: opOr,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
NOT: {
execute: opNot,
constantEnergy: EnergyFastestStep,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
BYTE: {
execute: opByte,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SHA3: {
execute: opSha3,
@@ -232,56 +204,48 @@ func newInstructionSet() JumpTable {
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
memorySize: memorySha3,
- valid: true,
},
ADDRESS: {
execute: opAddress,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
BALANCE: {
execute: opBalance,
constantEnergy: params.BalanceEnergy,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
ORIGIN: {
execute: opOrigin,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
CALLER: {
execute: opCaller,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
CALLVALUE: {
execute: opCallValue,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
CALLDATALOAD: {
execute: opCallDataLoad,
constantEnergy: EnergyFastestStep,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
CALLDATASIZE: {
execute: opCallDataSize,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
CALLDATACOPY: {
execute: opCallDataCopy,
@@ -290,14 +254,12 @@ func newInstructionSet() JumpTable {
minStack: minStack(3, 0),
maxStack: maxStack(3, 0),
memorySize: memoryCallDataCopy,
- valid: true,
},
CODESIZE: {
execute: opCodeSize,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
CODECOPY: {
execute: opCodeCopy,
@@ -306,21 +268,18 @@ func newInstructionSet() JumpTable {
minStack: minStack(3, 0),
maxStack: maxStack(3, 0),
memorySize: memoryCodeCopy,
- valid: true,
},
ENERGYPRICE: {
execute: opEnergyprice,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
EXTCODESIZE: {
execute: opExtCodeSize,
constantEnergy: params.ExtcodeSizeEnergy,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
EXTCODECOPY: {
execute: opExtCodeCopy,
@@ -329,56 +288,48 @@ func newInstructionSet() JumpTable {
minStack: minStack(4, 0),
maxStack: maxStack(4, 0),
memorySize: memoryExtCodeCopy,
- valid: true,
},
BLOCKHASH: {
execute: opBlockhash,
constantEnergy: EnergyExtStep,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
COINBASE: {
execute: opCoinbase,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
TIMESTAMP: {
execute: opTimestamp,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
NUMBER: {
execute: opNumber,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
DIFFICULTY: {
execute: opDifficulty,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
ENERGYLIMIT: {
execute: opEnergyLimit,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
POP: {
execute: opPop,
constantEnergy: EnergyQuickStep,
minStack: minStack(1, 0),
maxStack: maxStack(1, 0),
- valid: true,
},
MLOAD: {
execute: opMload,
@@ -387,7 +338,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
memorySize: memoryMLoad,
- valid: true,
},
MSTORE: {
execute: opMstore,
@@ -396,7 +346,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(2, 0),
maxStack: maxStack(2, 0),
memorySize: memoryMStore,
- valid: true,
},
MSTORE8: {
execute: opMstore8,
@@ -405,22 +354,18 @@ func newInstructionSet() JumpTable {
memorySize: memoryMStore8,
minStack: minStack(2, 0),
maxStack: maxStack(2, 0),
-
- valid: true,
},
SLOAD: {
execute: opSload,
constantEnergy: params.SloadEnergy,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
SSTORE: {
execute: opSstore,
dynamicEnergy: energySStore,
minStack: minStack(2, 0),
maxStack: maxStack(2, 0),
- valid: true,
writes: true,
},
JUMP: {
@@ -429,7 +374,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(1, 0),
maxStack: maxStack(1, 0),
jumps: true,
- valid: true,
},
JUMPI: {
execute: opJumpi,
@@ -437,483 +381,414 @@ func newInstructionSet() JumpTable {
minStack: minStack(2, 0),
maxStack: maxStack(2, 0),
jumps: true,
- valid: true,
},
PC: {
execute: opPc,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
MSIZE: {
execute: opMsize,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
ENERGY: {
execute: opEnergy,
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
JUMPDEST: {
execute: opJumpdest,
constantEnergy: params.JumpdestEnergy,
minStack: minStack(0, 0),
maxStack: maxStack(0, 0),
- valid: true,
},
PUSH1: {
execute: opPush1,
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH2: {
execute: makePush(2, 2),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH3: {
execute: makePush(3, 3),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH4: {
execute: makePush(4, 4),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH5: {
execute: makePush(5, 5),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH6: {
execute: makePush(6, 6),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH7: {
execute: makePush(7, 7),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH8: {
execute: makePush(8, 8),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH9: {
execute: makePush(9, 9),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH10: {
execute: makePush(10, 10),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH11: {
execute: makePush(11, 11),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH12: {
execute: makePush(12, 12),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH13: {
execute: makePush(13, 13),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH14: {
execute: makePush(14, 14),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH15: {
execute: makePush(15, 15),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH16: {
execute: makePush(16, 16),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH17: {
execute: makePush(17, 17),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH18: {
execute: makePush(18, 18),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH19: {
execute: makePush(19, 19),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH20: {
execute: makePush(20, 20),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH21: {
execute: makePush(21, 21),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH22: {
execute: makePush(22, 22),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH23: {
execute: makePush(23, 23),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH24: {
execute: makePush(24, 24),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH25: {
execute: makePush(25, 25),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH26: {
execute: makePush(26, 26),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH27: {
execute: makePush(27, 27),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH28: {
execute: makePush(28, 28),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH29: {
execute: makePush(29, 29),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH30: {
execute: makePush(30, 30),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH31: {
execute: makePush(31, 31),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
PUSH32: {
execute: makePush(32, 32),
constantEnergy: EnergyFastestStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
DUP1: {
execute: makeDup(1),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(1),
maxStack: maxDupStack(1),
- valid: true,
},
DUP2: {
execute: makeDup(2),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(2),
maxStack: maxDupStack(2),
- valid: true,
},
DUP3: {
execute: makeDup(3),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(3),
maxStack: maxDupStack(3),
- valid: true,
},
DUP4: {
execute: makeDup(4),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(4),
maxStack: maxDupStack(4),
- valid: true,
},
DUP5: {
execute: makeDup(5),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(5),
maxStack: maxDupStack(5),
- valid: true,
},
DUP6: {
execute: makeDup(6),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(6),
maxStack: maxDupStack(6),
- valid: true,
},
DUP7: {
execute: makeDup(7),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(7),
maxStack: maxDupStack(7),
- valid: true,
},
DUP8: {
execute: makeDup(8),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(8),
maxStack: maxDupStack(8),
- valid: true,
},
DUP9: {
execute: makeDup(9),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(9),
maxStack: maxDupStack(9),
- valid: true,
},
DUP10: {
execute: makeDup(10),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(10),
maxStack: maxDupStack(10),
- valid: true,
},
DUP11: {
execute: makeDup(11),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(11),
maxStack: maxDupStack(11),
- valid: true,
},
DUP12: {
execute: makeDup(12),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(12),
maxStack: maxDupStack(12),
- valid: true,
},
DUP13: {
execute: makeDup(13),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(13),
maxStack: maxDupStack(13),
- valid: true,
},
DUP14: {
execute: makeDup(14),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(14),
maxStack: maxDupStack(14),
- valid: true,
},
DUP15: {
execute: makeDup(15),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(15),
maxStack: maxDupStack(15),
- valid: true,
},
DUP16: {
execute: makeDup(16),
constantEnergy: EnergyFastestStep,
minStack: minDupStack(16),
maxStack: maxDupStack(16),
- valid: true,
},
SWAP1: {
execute: makeSwap(1),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(2),
maxStack: maxSwapStack(2),
- valid: true,
},
SWAP2: {
execute: makeSwap(2),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(3),
maxStack: maxSwapStack(3),
- valid: true,
},
SWAP3: {
execute: makeSwap(3),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(4),
maxStack: maxSwapStack(4),
- valid: true,
},
SWAP4: {
execute: makeSwap(4),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(5),
maxStack: maxSwapStack(5),
- valid: true,
},
SWAP5: {
execute: makeSwap(5),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(6),
maxStack: maxSwapStack(6),
- valid: true,
},
SWAP6: {
execute: makeSwap(6),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(7),
maxStack: maxSwapStack(7),
- valid: true,
},
SWAP7: {
execute: makeSwap(7),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(8),
maxStack: maxSwapStack(8),
- valid: true,
},
SWAP8: {
execute: makeSwap(8),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(9),
maxStack: maxSwapStack(9),
- valid: true,
},
SWAP9: {
execute: makeSwap(9),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(10),
maxStack: maxSwapStack(10),
- valid: true,
},
SWAP10: {
execute: makeSwap(10),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(11),
maxStack: maxSwapStack(11),
- valid: true,
},
SWAP11: {
execute: makeSwap(11),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(12),
maxStack: maxSwapStack(12),
- valid: true,
},
SWAP12: {
execute: makeSwap(12),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(13),
maxStack: maxSwapStack(13),
- valid: true,
},
SWAP13: {
execute: makeSwap(13),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(14),
maxStack: maxSwapStack(14),
- valid: true,
},
SWAP14: {
execute: makeSwap(14),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(15),
maxStack: maxSwapStack(15),
- valid: true,
},
SWAP15: {
execute: makeSwap(15),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(16),
maxStack: maxSwapStack(16),
- valid: true,
},
SWAP16: {
execute: makeSwap(16),
constantEnergy: EnergyFastestStep,
minStack: minSwapStack(17),
maxStack: maxSwapStack(17),
- valid: true,
},
LOG0: {
execute: makeLog(0),
@@ -921,7 +796,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(2, 0),
maxStack: maxStack(2, 0),
memorySize: memoryLog,
- valid: true,
writes: true,
},
LOG1: {
@@ -930,7 +804,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(3, 0),
maxStack: maxStack(3, 0),
memorySize: memoryLog,
- valid: true,
writes: true,
},
LOG2: {
@@ -939,7 +812,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(4, 0),
maxStack: maxStack(4, 0),
memorySize: memoryLog,
- valid: true,
writes: true,
},
LOG3: {
@@ -948,7 +820,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(5, 0),
maxStack: maxStack(5, 0),
memorySize: memoryLog,
- valid: true,
writes: true,
},
LOG4: {
@@ -957,7 +828,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(6, 0),
maxStack: maxStack(6, 0),
memorySize: memoryLog,
- valid: true,
writes: true,
},
CREATE: {
@@ -967,7 +837,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(3, 1),
maxStack: maxStack(3, 1),
memorySize: memoryCreate,
- valid: true,
writes: true,
returns: true,
},
@@ -978,7 +847,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(7, 1),
maxStack: maxStack(7, 1),
memorySize: memoryCall,
- valid: true,
returns: true,
},
CALLCODE: {
@@ -988,7 +856,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(7, 1),
maxStack: maxStack(7, 1),
memorySize: memoryCall,
- valid: true,
returns: true,
},
RETURN: {
@@ -998,7 +865,6 @@ func newInstructionSet() JumpTable {
maxStack: maxStack(2, 0),
memorySize: memoryReturn,
halts: true,
- valid: true,
},
SELFDESTRUCT: {
execute: opSuicide,
@@ -1006,7 +872,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(1, 0),
maxStack: maxStack(1, 0),
halts: true,
- valid: true,
writes: true,
},
DELEGATECALL: {
@@ -1016,7 +881,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(6, 1),
maxStack: maxStack(6, 1),
memorySize: memoryDelegateCall,
- valid: true,
returns: true,
},
STATICCALL: {
@@ -1026,7 +890,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(6, 1),
maxStack: maxStack(6, 1),
memorySize: memoryStaticCall,
- valid: true,
returns: true,
},
RETURNDATASIZE: {
@@ -1034,7 +897,6 @@ func newInstructionSet() JumpTable {
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
RETURNDATACOPY: {
execute: opReturnDataCopy,
@@ -1043,7 +905,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(3, 0),
maxStack: maxStack(3, 0),
memorySize: memoryReturnDataCopy,
- valid: true,
},
REVERT: {
execute: opRevert,
@@ -1051,7 +912,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(2, 0),
maxStack: maxStack(2, 0),
memorySize: memoryRevert,
- valid: true,
reverts: true,
returns: true,
},
@@ -1060,28 +920,24 @@ func newInstructionSet() JumpTable {
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SHR: {
execute: opSHR,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
SAR: {
execute: opSAR,
constantEnergy: EnergyFastestStep,
minStack: minStack(2, 1),
maxStack: maxStack(2, 1),
- valid: true,
},
EXTCODEHASH: {
execute: opExtCodeHash,
constantEnergy: params.ExtcodeHashEnergy,
minStack: minStack(1, 1),
maxStack: maxStack(1, 1),
- valid: true,
},
CREATE2: {
execute: opCreate2,
@@ -1090,7 +946,6 @@ func newInstructionSet() JumpTable {
minStack: minStack(4, 1),
maxStack: maxStack(4, 1),
memorySize: memoryCreate2,
- valid: true,
writes: true,
returns: true,
},
@@ -1099,14 +954,34 @@ func newInstructionSet() JumpTable {
constantEnergy: EnergyQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
},
SELFBALANCE: {
execute: opSelfBalance,
constantEnergy: EnergyFastStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
- valid: true,
+ },
+ BEGINSUB: {
+ execute: opBeginSub,
+ constantEnergy: EnergyQuickStep,
+ minStack: minStack(0, 0),
+ maxStack: maxStack(0, 0),
+ },
+ // New opcode
+ JUMPSUB: {
+ execute: opJumpSub,
+ constantEnergy: EnergySlowStep,
+ minStack: minStack(1, 0),
+ maxStack: maxStack(1, 0),
+ jumps: true,
+ },
+ // New opcode
+ RETURNSUB: {
+ execute: opReturnSub,
+ constantEnergy: EnergyFastStep,
+ minStack: minStack(0, 0),
+ maxStack: maxStack(0, 0),
+ jumps: true,
},
}
}
diff --git a/core/vm/logger.go b/core/vm/logger.go
index 7e6790717..866294390 100644
--- a/core/vm/logger.go
+++ b/core/vm/logger.go
@@ -18,9 +18,11 @@ package vm
import (
"encoding/hex"
+ "errors"
"fmt"
"io"
"math/big"
+ "strings"
"time"
"github.com/core-coin/go-core/common"
@@ -29,6 +31,8 @@ import (
"github.com/core-coin/go-core/core/types"
)
+var errTraceLimitReached = errors.New("the number of logs reached the specified limit")
+
// Storage represents a contract's storage.
type Storage map[common.Hash]common.Hash
@@ -44,11 +48,12 @@ func (s Storage) Copy() Storage {
// LogConfig are the configuration options for structured logger the CVM
type LogConfig struct {
- DisableMemory bool // disable memory capture
- DisableStack bool // disable stack capture
- DisableStorage bool // disable storage capture
- Debug bool // print output during capture end
- Limit int // maximum length of output, but zero means unlimited
+ DisableMemory bool // disable memory capture
+ DisableStack bool // disable stack capture
+ DisableStorage bool // disable storage capture
+ DisableReturnData bool // disable return data capture
+ Debug bool // print output during capture end
+ Limit int // maximum length of output, but zero means unlimited
}
//go:generate gencodec -type StructLog -field-override structLogMarshaling -out gen_structlog.go
@@ -58,11 +63,13 @@ type LogConfig struct {
type StructLog struct {
Pc uint64 `json:"pc"`
Op OpCode `json:"op"`
- Energy uint64 `json:"energy"`
- EnergyCost uint64 `json:"energyCost"`
+ Energy uint64 `json:"energy"`
+ EnergyCost uint64 `json:"energyCost"`
Memory []byte `json:"memory"`
MemorySize int `json:"memSize"`
Stack []*big.Int `json:"stack"`
+ ReturnStack []uint32 `json:"returnStack"`
+ ReturnData []byte `json:"returnData"`
Storage map[common.Hash]common.Hash `json:"-"`
Depth int `json:"depth"`
RefundCounter uint64 `json:"refund"`
@@ -72,8 +79,9 @@ type StructLog struct {
// overrides for gencodec
type structLogMarshaling struct {
Stack []*math.HexOrDecimal256
- Energy math.HexOrDecimal64
- EnergyCost math.HexOrDecimal64
+ ReturnStack []math.HexOrDecimal64
+ Energy math.HexOrDecimal64
+ EnergyCost math.HexOrDecimal64
Memory hexutil.Bytes
OpName string `json:"opName"` // adds call to OpName() in MarshalJSON
ErrorString string `json:"error"` // adds call to ErrorString() in MarshalJSON
@@ -99,8 +107,8 @@ func (s *StructLog) ErrorString() string {
// if you need to retain them beyond the current call.
type Tracer interface {
CaptureStart(from common.Address, to common.Address, create bool, input []byte, energy uint64, value *big.Int) error
- CaptureState(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error
- CaptureFault(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error
+ CaptureState(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, rData []byte, contract *Contract, depth int, err error) error
+ CaptureFault(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, contract *Contract, depth int, err error) error
CaptureEnd(output []byte, energyUsed uint64, t time.Duration, err error) error
}
@@ -137,10 +145,10 @@ func (l *StructLogger) CaptureStart(from common.Address, to common.Address, crea
// CaptureState logs a new structured log message and pushes it out to the environment
//
// CaptureState also tracks SSTORE ops to track dirty values.
-func (l *StructLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
+func (l *StructLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, rData []byte, contract *Contract, depth int, err error) error {
// check if already accumulated the specified number of logs
if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) {
- return ErrTraceLimitReached
+ return errTraceLimitReached
}
// initialise new changed values storage container for this contract
@@ -153,8 +161,8 @@ func (l *StructLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost
// it in the local storage container.
if op == SSTORE && stack.len() >= 2 {
var (
- value = common.BigToHash(stack.data[stack.len()-2])
- address = common.BigToHash(stack.data[stack.len()-1])
+ value = common.Hash(stack.data[stack.len()-2].Bytes32())
+ address = common.Hash(stack.data[stack.len()-1].Bytes32())
)
l.changedValues[contract.Address()][address] = value
}
@@ -169,7 +177,7 @@ func (l *StructLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost
if !l.cfg.DisableStack {
stck = make([]*big.Int, len(stack.Data()))
for i, item := range stack.Data() {
- stck[i] = new(big.Int).Set(item)
+ stck[i] = new(big.Int).Set(item.ToBig())
}
}
// Copy a snapshot of the current storage to a new container
@@ -177,16 +185,25 @@ func (l *StructLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost
if !l.cfg.DisableStorage {
storage = l.changedValues[contract.Address()].Copy()
}
- // create a new snapshot of the CVM.
- log := StructLog{pc, op, energy, cost, mem, memory.Len(), stck, storage, depth, env.StateDB.GetRefund(), err}
-
+ var rstack []uint32
+ if !l.cfg.DisableStack && rStack != nil {
+ rstck := make([]uint32, len(rStack.data))
+ copy(rstck, rStack.data)
+ }
+ var rdata []byte
+ if !l.cfg.DisableReturnData {
+ rdata = make([]byte, len(rData))
+ copy(rdata, rData)
+ }
+ // create a new snapshot of the EVM.
+ log := StructLog{pc, op, energy, cost, mem, memory.Len(), stck, rstack, rdata, storage, depth, env.StateDB.GetRefund(), err}
l.logs = append(l.logs, log)
return nil
}
// CaptureFault implements the Tracer interface to trace an execution fault
// while running an opcode.
-func (l *StructLogger) CaptureFault(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
+func (l *StructLogger) CaptureFault(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, contract *Contract, depth int, err error) error {
return nil
}
@@ -227,6 +244,12 @@ func WriteTrace(writer io.Writer, logs []StructLog) {
fmt.Fprintf(writer, "%08d %x\n", len(log.Stack)-i-1, math.PaddedBigBytes(log.Stack[i], 32))
}
}
+ if len(log.ReturnStack) > 0 {
+ fmt.Fprintln(writer, "ReturnStack:")
+ for i := len(log.Stack) - 1; i >= 0; i-- {
+ fmt.Fprintf(writer, "%08d 0x%x (%d)\n", len(log.Stack)-i-1, log.ReturnStack[i], log.ReturnStack[i])
+ }
+ }
if len(log.Memory) > 0 {
fmt.Fprintln(writer, "Memory:")
fmt.Fprint(writer, hex.Dump(log.Memory))
@@ -237,6 +260,10 @@ func WriteTrace(writer io.Writer, logs []StructLog) {
fmt.Fprintf(writer, "%x: %x\n", h, item)
}
}
+ if len(log.ReturnData) > 0 {
+ fmt.Fprintln(writer, "ReturnData:")
+ fmt.Fprint(writer, hex.Dump(log.ReturnData))
+ }
fmt.Fprintln(writer)
}
}
@@ -254,3 +281,79 @@ func WriteLogs(writer io.Writer, logs []*types.Log) {
fmt.Fprintln(writer)
}
}
+
+type mdLogger struct {
+ out io.Writer
+ cfg *LogConfig
+}
+
+// NewMarkdownLogger creates a logger which outputs information in a format adapted
+// for human readability, and is also a valid markdown table
+func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger {
+ l := &mdLogger{writer, cfg}
+ if l.cfg == nil {
+ l.cfg = &LogConfig{}
+ }
+ return l
+}
+
+func (t *mdLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {
+ if !create {
+ fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n",
+ from.String(), to.String(),
+ input, gas, value)
+ } else {
+ fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n",
+ from.String(), to.String(),
+ input, gas, value)
+ }
+
+ fmt.Fprintf(t.out, `
+| Pc | Op | Cost | Stack | RStack |
+|-------|-------------|------|-----------|-----------|
+`)
+ return nil
+}
+
+func (t *mdLogger) CaptureState(env *CVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, rData []byte, contract *Contract, depth int, err error) error {
+ fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost)
+
+ if !t.cfg.DisableStack { // format stack
+ var a []string
+ for _, elem := range stack.data {
+ a = append(a, fmt.Sprintf("%d", elem))
+ }
+ b := fmt.Sprintf("[%v]", strings.Join(a, ","))
+ fmt.Fprintf(t.out, "%10v |", b)
+ }
+ if !t.cfg.DisableStack { // format return stack
+ var a []string
+ for _, elem := range rStack.data {
+ a = append(a, fmt.Sprintf("%2d", elem))
+ }
+ b := fmt.Sprintf("[%v]", strings.Join(a, ","))
+ fmt.Fprintf(t.out, "%10v |", b)
+ }
+ fmt.Fprintln(t.out, "")
+ if err != nil {
+ fmt.Fprintf(t.out, "Error: %v\n", err)
+ }
+ return nil
+}
+
+func (t *mdLogger) CaptureFault(env *CVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, contract *Contract, depth int, err error) error {
+
+ fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err)
+
+ return nil
+}
+
+func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) error {
+ fmt.Fprintf(t.out, `
+Output: 0x%x
+Consumed gas: %d
+Error: %v
+`,
+ output, gasUsed, err)
+ return nil
+}
diff --git a/core/vm/logger_json.go b/core/vm/logger_json.go
index 0c9ad474a..af22b1b0d 100644
--- a/core/vm/logger_json.go
+++ b/core/vm/logger_json.go
@@ -46,12 +46,12 @@ func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create
}
// CaptureState outputs state information on the logger.
-func (l *JSONLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
+func (l *JSONLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, rData []byte, contract *Contract, depth int, err error) error {
log := StructLog{
Pc: pc,
Op: op,
- Energy: energy,
- EnergyCost: cost,
+ Energy: energy,
+ EnergyCost: cost,
MemorySize: memory.Len(),
Storage: nil,
Depth: depth,
@@ -62,23 +62,31 @@ func (l *JSONLogger) CaptureState(env *CVM, pc uint64, op OpCode, energy, cost u
log.Memory = memory.Data()
}
if !l.cfg.DisableStack {
- log.Stack = stack.Data()
+ //TODO(@holiman) improve this
+ logstack := make([]*big.Int, len(stack.Data()))
+ for i, item := range stack.Data() {
+ logstack[i] = item.ToBig()
+ }
+ log.Stack = logstack
+ }
+ if !l.cfg.DisableReturnData {
+ log.ReturnData = rData
}
return l.encoder.Encode(log)
}
// CaptureFault outputs state information on the logger.
-func (l *JSONLogger) CaptureFault(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
+func (l *JSONLogger) CaptureFault(env *CVM, pc uint64, op OpCode, energy, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, contract *Contract, depth int, err error) error {
return nil
}
// CaptureEnd is triggered at end of execution.
func (l *JSONLogger) CaptureEnd(output []byte, energyUsed uint64, t time.Duration, err error) error {
type endLog struct {
- Output string `json:"output"`
+ Output string `json:"output"`
EnergyUsed math.HexOrDecimal64 `json:"energyUsed"`
- Time time.Duration `json:"time"`
- Err string `json:"error,omitempty"`
+ Time time.Duration `json:"time"`
+ Err string `json:"error,omitempty"`
}
if err != nil {
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(energyUsed), t, err.Error()})
diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go
index 9eb698364..e04a296ee 100644
--- a/core/vm/logger_test.go
+++ b/core/vm/logger_test.go
@@ -17,6 +17,7 @@
package vm
import (
+ "github.com/core-coin/uint256"
"math/big"
"testing"
@@ -29,7 +30,7 @@ type dummyContractRef struct {
calledForEach bool
}
-func (dummyContractRef) ReturnEnergy(*big.Int) {}
+func (dummyContractRef) ReturnEnergy(*big.Int) {}
func (dummyContractRef) Address() common.Address { return common.Address{} }
func (dummyContractRef) Value() *big.Int { return new(big.Int) }
func (dummyContractRef) SetCode(common.Hash, []byte) {}
@@ -54,12 +55,13 @@ func TestStoreCapture(t *testing.T) {
logger = NewStructLogger(nil)
mem = NewMemory()
stack = newstack()
+ rstack = newReturnStack()
contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 0)
)
- stack.push(big.NewInt(1))
- stack.push(big.NewInt(0))
+ stack.push(uint256.NewInt(1))
+ stack.push(new(uint256.Int))
var index common.Hash
- logger.CaptureState(env, 0, SSTORE, 0, 0, mem, stack, contract, 0, nil)
+ logger.CaptureState(env, 0, SSTORE, 0, 0, mem, stack, rstack, nil, contract, 0, nil)
if len(logger.changedValues[contract.Address()]) == 0 {
t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.changedValues[contract.Address()]))
}
diff --git a/core/vm/memory.go b/core/vm/memory.go
index 144b3481c..5f223bcbd 100644
--- a/core/vm/memory.go
+++ b/core/vm/memory.go
@@ -18,14 +18,12 @@ package vm
import (
"fmt"
- "math/big"
-
- "github.com/core-coin/go-core/common/math"
+ "github.com/core-coin/uint256"
)
// Memory implements a simple memory model for the Core Virtual Machine.
type Memory struct {
- store []byte
+ store []byte
lastEnergyCost uint64
}
@@ -50,7 +48,7 @@ func (m *Memory) Set(offset, size uint64, value []byte) {
// Set32 sets the 32 bytes starting at offset to the value of val, left-padded with zeroes to
// 32 bytes.
-func (m *Memory) Set32(offset uint64, val *big.Int) {
+func (m *Memory) Set32(offset uint64, val *uint256.Int) {
// length of store may never be less than offset + size.
// The store should be resized PRIOR to setting the memory
if offset+32 > uint64(len(m.store)) {
@@ -59,7 +57,7 @@ func (m *Memory) Set32(offset uint64, val *big.Int) {
// Zero the memory area
copy(m.store[offset:offset+32], []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
// Fill in relevant bits
- math.ReadBits(val, m.store[offset:offset+32])
+ val.WriteToSlice(m.store[offset:])
}
// Resize resizes the memory to size
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index 976690163..66791431a 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -107,18 +107,21 @@ const (
// 0x50 range - 'storage' and execution.
const (
- POP OpCode = 0x50 + iota
- MLOAD
- MSTORE
- MSTORE8
- SLOAD
- SSTORE
- JUMP
- JUMPI
- PC
- MSIZE
- ENERGY
- JUMPDEST
+ POP OpCode = 0x50
+ MLOAD OpCode = 0x51
+ MSTORE OpCode = 0x52
+ MSTORE8 OpCode = 0x53
+ SLOAD OpCode = 0x54
+ SSTORE OpCode = 0x55
+ JUMP OpCode = 0x56
+ JUMPI OpCode = 0x57
+ PC OpCode = 0x58
+ MSIZE OpCode = 0x59
+ ENERGY OpCode = 0x5a
+ JUMPDEST OpCode = 0x5b
+ BEGINSUB OpCode = 0x5c
+ RETURNSUB OpCode = 0x5d
+ JUMPSUB OpCode = 0x5e
)
// 0x60 range.
@@ -297,6 +300,10 @@ var opCodeToString = map[OpCode]string{
ENERGY: "ENERGY",
JUMPDEST: "JUMPDEST",
+ BEGINSUB: "BEGINSUB",
+ JUMPSUB: "JUMPSUB",
+ RETURNSUB: "RETURNSUB",
+
// 0x60 range - push.
PUSH1: "PUSH1",
PUSH2: "PUSH2",
@@ -389,7 +396,7 @@ var opCodeToString = map[OpCode]string{
func (op OpCode) String() string {
str := opCodeToString[op]
if len(str) == 0 {
- return fmt.Sprintf("Missing opcode 0x%x", int(op))
+ return fmt.Sprintf("opcode 0x%x not defined", int(op))
}
return str
@@ -461,6 +468,9 @@ var stringToOp = map[string]OpCode{
"MSIZE": MSIZE,
"ENERGY": ENERGY,
"JUMPDEST": JUMPDEST,
+ "BEGINSUB": BEGINSUB,
+ "RETURNSUB": RETURNSUB,
+ "JUMPSUB": JUMPSUB,
"PUSH1": PUSH1,
"PUSH2": PUSH2,
"PUSH3": PUSH3,
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 303ad4a77..6c28187f0 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -17,10 +17,14 @@
package runtime
import (
+ "fmt"
+ "github.com/core-coin/go-core/core/asm"
"github.com/hpcloud/tail/util"
"math/big"
+ "os"
"strings"
"testing"
+ "time"
"github.com/core-coin/go-core/accounts/abi"
"github.com/core-coin/go-core/common"
@@ -316,3 +320,401 @@ func TestBlockhash(t *testing.T) {
t.Errorf("suboptimal; too much chain iteration, expected %d, got %d", exp, got)
}
}
+
+type stepCounter struct {
+ inner *vm.JSONLogger
+ steps int
+}
+
+func (s *stepCounter) CaptureStart(from common.Address, to common.Address, create bool, input []byte, energy uint64, value *big.Int) error {
+ return nil
+}
+
+func (s *stepCounter) CaptureState(env *vm.CVM, pc uint64, op vm.OpCode, energy, cost uint64, memory *vm.Memory, stack *vm.Stack, rStack *vm.ReturnStack, rData []byte, contract *vm.Contract, depth int, err error) error {
+ s.steps++
+ // Enable this for more output
+ //s.inner.CaptureState(env, pc, op, energy, cost, memory, stack, rStack, contract, depth, err)
+ return nil
+}
+
+func (s *stepCounter) CaptureFault(env *vm.CVM, pc uint64, op vm.OpCode, energy, cost uint64, memory *vm.Memory, stack *vm.Stack, rStack *vm.ReturnStack, contract *vm.Contract, depth int, err error) error {
+ return nil
+}
+
+func (s *stepCounter) CaptureEnd(output []byte, energyUsed uint64, t time.Duration, err error) error {
+ return nil
+}
+
+func TestJumpSub1024Limit(t *testing.T) {
+ state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ address, _ := common.HexToAddress("cb270000000000000000000000000000000000000001")
+ // Code is
+ // 0 beginsub
+ // 1 push 0
+ // 3 jumpsub
+ //
+ // The code recursively calls itself. It should error when the returns-stack
+ // grows above 1023
+ state.SetCode(address, []byte{
+ byte(vm.PUSH1), 3,
+ byte(vm.JUMPSUB),
+ byte(vm.BEGINSUB),
+ byte(vm.PUSH1), 3,
+ byte(vm.JUMPSUB),
+ })
+ tracer := stepCounter{inner: vm.NewJSONLogger(nil, os.Stdout)}
+ // Enable 2315
+ _, _, err := Call(address, nil, &Config{State: state,
+ EnergyLimit: 20000,
+ ChainConfig: params.AllCryptoreProtocolChanges,
+ CVMConfig: vm.Config{
+ Debug: true,
+ //Tracer: vm.NewJSONLogger(nil, os.Stdout),
+ Tracer: &tracer,
+ }})
+ exp := "return stack limit reached"
+ if err.Error() != exp {
+ t.Fatalf("expected %v, got %v", exp, err)
+ }
+ if exp, got := 2048, tracer.steps; exp != got {
+ t.Fatalf("expected %d steps, got %d", exp, got)
+ }
+}
+
+func TestReturnSubShallow(t *testing.T) {
+ state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ address, _ := common.HexToAddress("cb270000000000000000000000000000000000000001")
+ // The code does returnsub without having anything on the returnstack.
+ // It should not panic, but just fail after one step
+ state.SetCode(address, []byte{
+ byte(vm.PUSH1), 5,
+ byte(vm.JUMPSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.PC),
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.PC),
+ })
+ tracer := stepCounter{}
+
+ // Enable 2315
+ _, _, err := Call(address, nil, &Config{State: state,
+ EnergyLimit: 10000,
+ ChainConfig: params.AllCryptoreProtocolChanges,
+ CVMConfig: vm.Config{
+ Debug: true,
+ Tracer: &tracer,
+ }})
+
+ exp := "invalid retsub"
+ if err.Error() != exp {
+ t.Fatalf("expected %v, got %v", exp, err)
+ }
+ if exp, got := 4, tracer.steps; exp != got {
+ t.Fatalf("expected %d steps, got %d", exp, got)
+ }
+}
+
+// disabled -- only used for generating markdown
+func DisabledTestReturnCases(t *testing.T) {
+ cfg := &Config{
+ CVMConfig: vm.Config{
+ Debug: true,
+ Tracer: vm.NewMarkdownLogger(nil, os.Stdout),
+ },
+ }
+ // This should fail at first opcode
+ Execute([]byte{
+ byte(vm.RETURNSUB),
+ byte(vm.PC),
+ byte(vm.PC),
+ }, nil, cfg)
+
+ // Should also fail
+ Execute([]byte{
+ byte(vm.PUSH1), 5,
+ byte(vm.JUMPSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.PC),
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.PC),
+ }, nil, cfg)
+
+ // This should complete
+ Execute([]byte{
+ byte(vm.PUSH1), 0x4,
+ byte(vm.JUMPSUB),
+ byte(vm.STOP),
+ byte(vm.BEGINSUB),
+ byte(vm.PUSH1), 0x9,
+ byte(vm.JUMPSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ }, nil, cfg)
+}
+
+// DisabledTestEipExampleCases contains various testcases that are used for the
+// EIP examples
+// This test is disabled, as it's only used for generating markdown
+func DisabledTestEipExampleCases(t *testing.T) {
+ cfg := &Config{
+ CVMConfig: vm.Config{
+ Debug: true,
+ Tracer: vm.NewMarkdownLogger(nil, os.Stdout),
+ },
+ }
+ prettyPrint := func(comment string, code []byte) {
+ instrs := make([]string, 0)
+ it := asm.NewInstructionIterator(code)
+ for it.Next() {
+ if it.Arg() != nil && 0 < len(it.Arg()) {
+ instrs = append(instrs, fmt.Sprintf("%v 0x%x", it.Op(), it.Arg()))
+ } else {
+ instrs = append(instrs, fmt.Sprintf("%v", it.Op()))
+ }
+ }
+ ops := strings.Join(instrs, ", ")
+
+ fmt.Printf("%v\nBytecode: `0x%x` (`%v`)\n",
+ comment,
+ code, ops)
+ Execute(code, nil, cfg)
+ }
+
+ { // First eip testcase
+ code := []byte{
+ byte(vm.PUSH1), 4,
+ byte(vm.JUMPSUB),
+ byte(vm.STOP),
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ }
+ prettyPrint("This should jump into a subroutine, back out and stop.", code)
+ }
+
+ {
+ code := []byte{
+ byte(vm.PUSH9), 0x00, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, (4 + 8),
+ byte(vm.JUMPSUB),
+ byte(vm.STOP),
+ byte(vm.BEGINSUB),
+ byte(vm.PUSH1), 8 + 9,
+ byte(vm.JUMPSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ }
+ prettyPrint("This should execute fine, going into one two depths of subroutines", code)
+ }
+ // TODO(@holiman) move this test into an actual test, which not only prints
+ // out the trace.
+ {
+ code := []byte{
+ byte(vm.PUSH9), 0x01, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, (4 + 8),
+ byte(vm.JUMPSUB),
+ byte(vm.STOP),
+ byte(vm.BEGINSUB),
+ byte(vm.PUSH1), 8 + 9,
+ byte(vm.JUMPSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ }
+ prettyPrint("This should fail, since the given location is outside of the "+
+ "code-range. The code is the same as previous example, except that the "+
+ "pushed location is `0x01000000000000000c` instead of `0x0c`.", code)
+ }
+ {
+ // This should fail at first opcode
+ code := []byte{
+ byte(vm.RETURNSUB),
+ byte(vm.PC),
+ byte(vm.PC),
+ }
+ prettyPrint("This should fail at first opcode, due to shallow `return_stack`", code)
+
+ }
+ {
+ code := []byte{
+ byte(vm.PUSH1), 5, // Jump past the subroutine
+ byte(vm.JUMP),
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.JUMPDEST),
+ byte(vm.PUSH1), 3, // Now invoke the subroutine
+ byte(vm.JUMPSUB),
+ }
+ prettyPrint("In this example. the JUMPSUB is on the last byte of code. When the "+
+ "subroutine returns, it should hit the 'virtual stop' _after_ the bytecode, "+
+ "and not exit with error", code)
+ }
+
+ {
+ code := []byte{
+ byte(vm.BEGINSUB),
+ byte(vm.RETURNSUB),
+ byte(vm.STOP),
+ }
+ prettyPrint("In this example, the code 'walks' into a subroutine, which is not "+
+ "allowed, and causes an error", code)
+ }
+}
+
+// benchmarkNonModifyingCode benchmarks code, but if the code modifies the
+// state, this should not be used, since it does not reset the state between runs.
+func benchmarkNonModifyingCode(energy uint64, code []byte, name string, b *testing.B) {
+ cfg := new(Config)
+ setDefaults(cfg)
+ cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ cfg.EnergyLimit = energy
+ var (
+ destination = common.BytesToAddress([]byte("contract"))
+ vmenv = NewEnv(cfg)
+ sender = vm.AccountRef(cfg.Origin)
+ )
+ cfg.State.CreateAccount(destination)
+ eoa, _ := common.HexToAddress("cb270000000000000000000000000000000000000001")
+ {
+ cfg.State.CreateAccount(eoa)
+ cfg.State.SetNonce(eoa, 100)
+ }
+ reverting, _ := common.HexToAddress("cb970000000000000000000000000000000000000002")
+ {
+ cfg.State.CreateAccount(reverting)
+ cfg.State.SetCode(reverting, []byte{
+ byte(vm.PUSH1), 0x00,
+ byte(vm.PUSH1), 0x00,
+ byte(vm.REVERT),
+ })
+ }
+
+ //cfg.State.CreateAccount(cfg.Origin)
+ // set the receiver's (the executing contract) code for execution.
+ cfg.State.SetCode(destination, code)
+ vmenv.Call(sender, destination, nil, energy, cfg.Value)
+
+ b.Run(name, func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ vmenv.Call(sender, destination, nil, energy, cfg.Value)
+ }
+ })
+}
+
+// BenchmarkSimpleLoop test a pretty simple loop which loops until OOG
+// 55 ms
+func BenchmarkSimpleLoop(b *testing.B) {
+
+ staticCallIdentity := []byte{
+ byte(vm.JUMPDEST), // [ count ]
+ // push args for the call
+ byte(vm.PUSH1), 0, // out size
+ byte(vm.DUP1), // out offset
+ byte(vm.DUP1), // out insize
+ byte(vm.DUP1), // in offset
+ byte(vm.PUSH1), 0x4, // address of identity
+ byte(vm.ENERGY), // energy
+ byte(vm.STATICCALL),
+ byte(vm.POP), // pop return value
+ byte(vm.PUSH1), 0, // jumpdestination
+ byte(vm.JUMP),
+ }
+
+ callIdentity := []byte{
+ byte(vm.JUMPDEST), // [ count ]
+ // push args for the call
+ byte(vm.PUSH1), 0, // out size
+ byte(vm.DUP1), // out offset
+ byte(vm.DUP1), // out insize
+ byte(vm.DUP1), // in offset
+ byte(vm.DUP1), // value
+ byte(vm.PUSH1), 0x4, // address of identity
+ byte(vm.ENERGY), // energy
+ byte(vm.CALL),
+ byte(vm.POP), // pop return value
+ byte(vm.PUSH1), 0, // jumpdestination
+ byte(vm.JUMP),
+ }
+
+ callInexistant := []byte{
+ byte(vm.JUMPDEST), // [ count ]
+ // push args for the call
+ byte(vm.PUSH1), 0, // out size
+ byte(vm.DUP1), // out offset
+ byte(vm.DUP1), // out insize
+ byte(vm.DUP1), // in offset
+ byte(vm.DUP1), // value
+ byte(vm.PUSH1), 0xff, // address of existing contract
+ byte(vm.ENERGY), // energy
+ byte(vm.CALL),
+ byte(vm.POP), // pop return value
+ byte(vm.PUSH1), 0, // jumpdestination
+ byte(vm.JUMP),
+ }
+
+ callEOA := []byte{
+ byte(vm.JUMPDEST), // [ count ]
+ // push args for the call
+ byte(vm.PUSH1), 0, // out size
+ byte(vm.DUP1), // out offset
+ byte(vm.DUP1), // out insize
+ byte(vm.DUP1), // in offset
+ byte(vm.DUP1), // value
+ byte(vm.PUSH1), 0xE0, // address of EOA
+ byte(vm.ENERGY), // energy
+ byte(vm.CALL),
+ byte(vm.POP), // pop return value
+ byte(vm.PUSH1), 0, // jumpdestination
+ byte(vm.JUMP),
+ }
+
+ loopingCode := []byte{
+ byte(vm.JUMPDEST), // [ count ]
+ // push args for the call
+ byte(vm.PUSH1), 0, // out size
+ byte(vm.DUP1), // out offset
+ byte(vm.DUP1), // out insize
+ byte(vm.DUP1), // in offset
+ byte(vm.PUSH1), 0x4, // address of identity
+ byte(vm.ENERGY), // energy
+
+ byte(vm.POP), byte(vm.POP), byte(vm.POP), byte(vm.POP), byte(vm.POP), byte(vm.POP),
+ byte(vm.PUSH1), 0, // jumpdestination
+ byte(vm.JUMP),
+ }
+
+ calllRevertingContractWithInput := []byte{
+ byte(vm.JUMPDEST), //
+ // push args for the call
+ byte(vm.PUSH1), 0, // out size
+ byte(vm.DUP1), // out offset
+ byte(vm.PUSH1), 0x20, // in size
+ byte(vm.PUSH1), 0x00, // in offset
+ byte(vm.PUSH1), 0x00, // value
+ byte(vm.PUSH1), 0xEE, // address of reverting contract
+ byte(vm.ENERGY), // energy
+ byte(vm.CALL),
+ byte(vm.POP), // pop return value
+ byte(vm.PUSH1), 0, // jumpdestination
+ byte(vm.JUMP),
+ }
+
+ //tracer := vm.NewJSONLogger(nil, os.Stdout)
+ //Execute(loopingCode, nil, &Config{
+ // EVMConfig: vm.Config{
+ // Debug: true,
+ // Tracer: tracer,
+ // }})
+ // 100M energy
+ benchmarkNonModifyingCode(100000000, staticCallIdentity, "staticcall-identity-100M", b)
+ benchmarkNonModifyingCode(100000000, callIdentity, "call-identity-100M", b)
+ benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", b)
+ benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", b)
+ benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", b)
+ benchmarkNonModifyingCode(100000000, calllRevertingContractWithInput, "call-reverting-100M", b)
+
+ //benchmarkNonModifyingCode(10000000, staticCallIdentity, "staticcall-identity-10M", b)
+ //benchmarkNonModifyingCode(10000000, loopingCode, "loop-10M", b)
+}
diff --git a/core/vm/stack.go b/core/vm/stack.go
index 90ac3ca5a..487e94437 100644
--- a/core/vm/stack.go
+++ b/core/vm/stack.go
@@ -18,36 +18,47 @@ package vm
import (
"fmt"
- "math/big"
+ "github.com/core-coin/uint256"
+ "sync"
)
+var stackPool = sync.Pool{
+ New: func() interface{} {
+ return &Stack{data: make([]uint256.Int, 0, 16)}
+ },
+}
+
// Stack is an object for basic stack operations. Items popped to the stack are
// expected to be changed and modified. stack does not take care of adding newly
// initialised objects.
type Stack struct {
- data []*big.Int
+ data []uint256.Int
}
func newstack() *Stack {
- return &Stack{data: make([]*big.Int, 0, 1024)}
+ return stackPool.Get().(*Stack)
+}
+
+func returnStack(s *Stack) {
+ s.data = s.data[:0]
+ stackPool.Put(s)
}
-// Data returns the underlying big.Int array.
-func (st *Stack) Data() []*big.Int {
+// Data returns the underlying uint256 array.
+func (st *Stack) Data() []uint256.Int {
return st.data
}
-func (st *Stack) push(d *big.Int) {
+func (st *Stack) push(d *uint256.Int) {
// NOTE push limit (1024) is checked in baseCheck
- //stackItem := new(big.Int).Set(d)
- //st.data = append(st.data, stackItem)
- st.data = append(st.data, d)
+ st.data = append(st.data, *d)
}
-func (st *Stack) pushN(ds ...*big.Int) {
+func (st *Stack) pushN(ds ...uint256.Int) {
+ // FIXME: Is there a way to pass args by pointers.
st.data = append(st.data, ds...)
}
-func (st *Stack) pop() (ret *big.Int) {
+func (st *Stack) pop() (ret uint256.Int) {
ret = st.data[len(st.data)-1]
st.data = st.data[:len(st.data)-1]
return
@@ -61,17 +72,17 @@ func (st *Stack) swap(n int) {
st.data[st.len()-n], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-n]
}
-func (st *Stack) dup(pool *intPool, n int) {
- st.push(pool.get().Set(st.data[st.len()-n]))
+func (st *Stack) dup(n int) {
+ st.push(&st.data[st.len()-n])
}
-func (st *Stack) peek() *big.Int {
- return st.data[st.len()-1]
+func (st *Stack) peek() *uint256.Int {
+ return &st.data[st.len()-1]
}
// Back returns the n'th item in stack
-func (st *Stack) Back(n int) *big.Int {
- return st.data[st.len()-n-1]
+func (st *Stack) Back(n int) *uint256.Int {
+ return &st.data[st.len()-n-1]
}
// Print dumps the content of the stack
@@ -86,3 +97,34 @@ func (st *Stack) Print() {
}
fmt.Println("#############")
}
+
+var rStackPool = sync.Pool{
+ New: func() interface{} {
+ return &ReturnStack{data: make([]uint32, 0, 10)}
+ },
+}
+
+// ReturnStack is an object for basic return stack operations.
+type ReturnStack struct {
+ data []uint32
+}
+
+func newReturnStack() *ReturnStack {
+ return rStackPool.Get().(*ReturnStack)
+}
+
+func returnRStack(rs *ReturnStack) {
+ rs.data = rs.data[:0]
+ rStackPool.Put(rs)
+}
+
+func (st *ReturnStack) push(d uint32) {
+ st.data = append(st.data, d)
+}
+
+// A uint32 is sufficient as for code below 4.2G
+func (st *ReturnStack) pop() (ret uint32) {
+ ret = st.data[len(st.data)-1]
+ st.data = st.data[:len(st.data)-1]
+ return
+}
diff --git a/go.mod b/go.mod
index 37d013b6d..be1283a7a 100644
--- a/go.mod
+++ b/go.mod
@@ -11,8 +11,9 @@ require (
github.com/aws/aws-sdk-go v1.25.48
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.14.0
- github.com/core-coin/go-goldilocks v1.0.9
- github.com/core-coin/go-randomy v0.0.14
+ github.com/core-coin/go-goldilocks v1.0.12
+ github.com/core-coin/go-randomy v0.0.18
+ github.com/core-coin/uint256 v1.0.0
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea
github.com/dlclark/regexp2 v1.4.0 // indirect
@@ -24,6 +25,7 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-stack/stack v1.8.0
+ github.com/golang-jwt/jwt/v4 v4.3.0
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.3
github.com/gorilla/websocket v1.4.2
@@ -54,9 +56,12 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
- golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
- golang.org/x/text v0.3.6
+ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect
+ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
+ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
+ golang.org/x/text v0.3.7
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
+ google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
diff --git a/go.sum b/go.sum
index 831b1a463..05f057370 100644
--- a/go.sum
+++ b/go.sum
@@ -71,10 +71,12 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
-github.com/core-coin/go-goldilocks v1.0.9 h1:NEX5+LXeLpj3yJPKQgEffj2ly33rOvANsWDcQR8jBUs=
-github.com/core-coin/go-goldilocks v1.0.9/go.mod h1:r6mSidt/OMBXorR8jBJYJttsver3m2EBAcSuf+m2Js0=
-github.com/core-coin/go-randomy v0.0.14 h1:MIzErlW7djO5IcoMAPNYLOC4YZV47TPmofusu01TGZY=
-github.com/core-coin/go-randomy v0.0.14/go.mod h1:7YzU3Hrss60CzXlzziTZNAQUN6u+eLAHH1cL1JRGLBE=
+github.com/core-coin/go-goldilocks v1.0.12 h1:wEoAtrgDGrM09pfyAvrcWHQn9dy+7afDkK9TDRTAccA=
+github.com/core-coin/go-goldilocks v1.0.12/go.mod h1:r6mSidt/OMBXorR8jBJYJttsver3m2EBAcSuf+m2Js0=
+github.com/core-coin/go-randomy v0.0.18 h1:m0IW81uirjpT0fCAnS+5PxPrbI+s/zviFK8hfQbhRcs=
+github.com/core-coin/go-randomy v0.0.18/go.mod h1:7YzU3Hrss60CzXlzziTZNAQUN6u+eLAHH1cL1JRGLBE=
+github.com/core-coin/uint256 v1.0.0 h1:AzgINl9YCnYDRJBlqFlWsVb4sr814LcvPAvkPyVBFVo=
+github.com/core-coin/uint256 v1.0.0/go.mod h1:rrinB/+X6+31MswTK2xwoFGi8i75SH1ILSsRSoOFa1I=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -125,8 +127,9 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
+github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -390,8 +393,9 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -403,8 +407,9 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -432,9 +437,10 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
-golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -442,8 +448,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -523,8 +530,9 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index 81bf58d3b..053d2e8df 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -764,16 +764,19 @@ func (b *Block) Call(ctx context.Context, args struct {
return nil, err
}
}
- result, energy, failed, err := xcbapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, 5*time.Second, b.backend.RPCEnergyCap())
+ result, err := xcbapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, 5*time.Second, b.backend.RPCEnergyCap())
+ if err != nil {
+ return nil, err
+ }
status := hexutil.Uint64(1)
- if failed {
+ if result.Failed() {
status = 0
}
return &CallResult{
- data: hexutil.Bytes(result),
- energyUsed: hexutil.Uint64(energy),
+ data: result.Return(),
+ energyUsed: hexutil.Uint64(result.UsedEnergy),
status: status,
- }, err
+ }, nil
}
func (b *Block) EstimateEnergy(ctx context.Context, args struct {
@@ -830,16 +833,19 @@ func (p *Pending) Call(ctx context.Context, args struct {
Data xcbapi.CallArgs
}) (*CallResult, error) {
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- result, energy, failed, err := xcbapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, 5*time.Second, p.backend.RPCEnergyCap())
+ result, err := xcbapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, 5*time.Second, p.backend.RPCEnergyCap())
+ if err != nil {
+ return nil, err
+ }
status := hexutil.Uint64(1)
- if failed {
+ if result.Failed() {
status = 0
}
return &CallResult{
- data: hexutil.Bytes(result),
- energyUsed: hexutil.Uint64(energy),
+ data: result.Return(),
+ energyUsed: hexutil.Uint64(result.UsedEnergy),
status: status,
- }, err
+ }, nil
}
func (p *Pending) EstimateEnergy(ctx context.Context, args struct {
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index 76ccc8a01..095d6f8b8 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -17,12 +17,113 @@
package graphql
import (
+ "fmt"
+ "github.com/core-coin/go-core/node"
+ "github.com/core-coin/go-core/xcb"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "net/http"
+ "strings"
"testing"
)
func TestBuildSchema(t *testing.T) {
// Make sure the schema can be parsed and matched up to the object model.
- if _, err := newHandler(nil); err != nil {
+ stack, err := node.New(&node.DefaultConfig)
+ if err != nil {
+ t.Fatalf("could not create new node: %v", err)
+ }
+ // Make sure the schema can be parsed and matched up to the object model.
+ if err := newHandler(stack, nil, []string{}, []string{}); err != nil {
t.Errorf("Could not construct GraphQL handler: %v", err)
}
}
+
+// Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint
+func TestGraphQLHTTPOnSamePort_GQLRequest_Successful(t *testing.T) {
+ stack := createNode(t, true)
+ defer stack.Close()
+ // start node
+ if err := stack.Start(); err != nil {
+ t.Fatalf("could not start node: %v", err)
+ }
+ // create http request
+ body := strings.NewReader("{\"query\": \"{block{number}}\",\"variables\": null}")
+ gqlReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/graphql", "127.0.0.1:9393"), body)
+ if err != nil {
+ t.Error("could not issue new http request ", err)
+ }
+ gqlReq.Header.Set("Content-Type", "application/json")
+ // read from response
+ resp := doHTTPRequest(t, gqlReq)
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatalf("could not read from response body: %v", err)
+ }
+ expected := "{\"data\":{\"block\":{\"number\":\"0x0\"}}}"
+ assert.Equal(t, expected, string(bodyBytes))
+}
+
+// Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint
+func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) {
+ stack := createNode(t, false)
+ defer stack.Close()
+ if err := stack.Start(); err != nil {
+ t.Fatalf("could not start node: %v", err)
+ }
+
+ // create http request
+ body := strings.NewReader("{\"query\": \"{block{number}}\",\"variables\": null}")
+ gqlReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/graphql", "127.0.0.1:9393"), body)
+ if err != nil {
+ t.Error("could not issue new http request ", err)
+ }
+ gqlReq.Header.Set("Content-Type", "application/json")
+ // read from response
+ resp := doHTTPRequest(t, gqlReq)
+ // make sure the request is not handled successfully
+ assert.Equal(t, http.StatusNotFound, resp.StatusCode)
+}
+
+func createNode(t *testing.T, gqlEnabled bool) *node.Node {
+ stack, err := node.New(&node.Config{
+ HTTPHost: "127.0.0.1",
+ HTTPPort: 9393,
+ WSHost: "127.0.0.1",
+ WSPort: 9393,
+ })
+ if err != nil {
+ t.Fatalf("could not create node: %v", err)
+ }
+ if !gqlEnabled {
+ return stack
+ }
+
+ createGQLService(t, stack, "127.0.0.1:9393")
+
+ return stack
+}
+
+func createGQLService(t *testing.T, stack *node.Node, endpoint string) {
+ // create backend
+ xcbBackend, err := xcb.New(stack, &xcb.DefaultConfig)
+ if err != nil {
+ t.Fatalf("could not create xcb backend: %v", err)
+ }
+
+ // create gql service
+ err = New(stack, xcbBackend.APIBackend, []string{}, []string{})
+ if err != nil {
+ t.Fatalf("could not create graphql service: %v", err)
+ }
+}
+
+func doHTTPRequest(t *testing.T, req *http.Request) *http.Response {
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ t.Fatal("could not issue a GET request to the given endpoint", err)
+
+ }
+ return resp
+}
diff --git a/graphql/service.go b/graphql/service.go
index eed48a5b1..24d043db6 100644
--- a/graphql/service.go
+++ b/graphql/service.go
@@ -17,87 +17,36 @@
package graphql
import (
- "fmt"
- "net"
- "net/http"
-
"github.com/core-coin/go-core/internal/xcbapi"
- "github.com/core-coin/go-core/log"
- "github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/rpc"
+ "github.com/core-coin/go-core/node"
"github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
)
-// Service encapsulates a GraphQL service.
-type Service struct {
- endpoint string // The host:port endpoint for this service.
- cors []string // Allowed CORS domains
- vhosts []string // Recognised vhosts
- timeouts rpc.HTTPTimeouts // Timeout settings for HTTP requests.
- backend xcbapi.Backend // The backend that queries will operate on.
- handler http.Handler // The `http.Handler` used to answer queries.
- listener net.Listener // The listening socket.
-}
-
// New constructs a new GraphQL service instance.
-func New(backend xcbapi.Backend, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) (*Service, error) {
- return &Service{
- endpoint: endpoint,
- cors: cors,
- vhosts: vhosts,
- timeouts: timeouts,
- backend: backend,
- }, nil
-}
-
-// Protocols returns the list of protocols exported by this service.
-func (s *Service) Protocols() []p2p.Protocol { return nil }
-
-// APIs returns the list of APIs exported by this service.
-func (s *Service) APIs() []rpc.API { return nil }
-
-// Start is called after all services have been constructed and the networking
-// layer was also initialized to spawn any goroutines required by the service.
-func (s *Service) Start(server *p2p.Server) error {
- var err error
- s.handler, err = newHandler(s.backend)
- if err != nil {
- return err
- }
- if s.listener, err = net.Listen("tcp", s.endpoint); err != nil {
- return err
+func New(stack *node.Node, backend xcbapi.Backend, cors, vhosts []string) error {
+ if backend == nil {
+ panic("missing backend")
}
- go rpc.NewHTTPServer(s.cors, s.vhosts, s.timeouts, s.handler).Serve(s.listener)
- log.Info("GraphQL endpoint opened", "url", fmt.Sprintf("http://%s", s.endpoint))
- return nil
+ // check if http server with given endpoint exists and enable graphQL on it
+ return newHandler(stack, backend, cors, vhosts)
}
// newHandler returns a new `http.Handler` that will answer GraphQL queries.
// It additionally exports an interactive query browser on the / endpoint.
-func newHandler(backend xcbapi.Backend) (http.Handler, error) {
+func newHandler(stack *node.Node, backend xcbapi.Backend, cors, vhosts []string) error {
q := Resolver{backend}
s, err := graphql.ParseSchema(schema, &q)
if err != nil {
- return nil, err
+ return err
}
h := &relay.Handler{Schema: s}
+ handler := node.NewHTTPHandlerStack(h, cors, vhosts, nil)
- mux := http.NewServeMux()
- mux.Handle("/", GraphiQL{})
- mux.Handle("/graphql", h)
- mux.Handle("/graphql/", h)
- return mux, nil
-}
+ stack.RegisterHandler("GraphQL UI", "/graphql/ui", GraphiQL{})
+ stack.RegisterHandler("GraphQL", "/graphql", handler)
+ stack.RegisterHandler("GraphQL", "/graphql/", handler)
-// Stop terminates all goroutines belonging to the service, blocking until they
-// are all terminated.
-func (s *Service) Stop() error {
- if s.listener != nil {
- s.listener.Close()
- s.listener = nil
- log.Info("GraphQL endpoint closed", "url", fmt.Sprintf("http://%s", s.endpoint))
- }
return nil
}
diff --git a/internal/build/env.go b/internal/build/env.go
index dc3b0cb4a..3777bdd5d 100644
--- a/internal/build/env.go
+++ b/internal/build/env.go
@@ -41,6 +41,7 @@ type Environment struct {
Name string // name of the environment
Repo string // name of GitHub repo
Commit, Date, Branch, Tag string // Git info
+ Type string
Buildnum string
IsPullRequest bool
IsCronJob bool
@@ -61,9 +62,10 @@ func Env() Environment {
Repo: os.Getenv("GITHUB_REPOSITORY"),
Commit: os.Getenv("GITHUB_SHA"),
Date: getDate(commit),
- Branch: os.Getenv("GITHUB_REF"),
- Tag: os.Getenv("GITHUB_REF"),
- Buildnum: "",
+ Branch: os.Getenv("GITHUB_REF_NAME"),
+ Tag: os.Getenv("GITHUB_REF_NAME"),
+ Buildnum: os.Getenv("GITHUB_RUN_ID"),
+ Type: os.Getenv("GITHUB_REF_TYPE"),
IsPullRequest: false,
IsCronJob: false,
}
@@ -99,7 +101,7 @@ func LocalEnv() Environment {
}
}
if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" {
- env.Tag = firstLine(RunGit("tag", "-l", "--points-at", "HEAD"))
+ env.Tag = firstLine(RunGit("describe", "--tags", "--abbrev=0"))
}
return env
}
diff --git a/internal/jsre/deps/bindata.go b/internal/jsre/deps/bindata.go
index abe83e0e8..9d94ed3cc 100644
--- a/internal/jsre/deps/bindata.go
+++ b/internal/jsre/deps/bindata.go
@@ -85,7 +85,7 @@ func bignumberJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "bignumber.js", size: 17314, mode: os.FileMode(0664), modTime: time.Unix(1603198464, 0)}
+ info := bindataFileInfo{name: "bignumber.js", size: 17314, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5b, 0x75, 0xfc, 0x15, 0x5e, 0x7d, 0x27, 0x1a, 0x9a, 0xb5, 0xfb, 0x16, 0x90, 0xf4, 0x93, 0xac, 0xcb, 0x6c, 0x9c, 0xcd, 0x68, 0xe6, 0xd0, 0x3a, 0xcf, 0xa3, 0x83, 0x5c, 0x20, 0x34, 0x66, 0x45}}
return a, nil
}
@@ -105,7 +105,7 @@ func web3Js() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "web3.js", size: 551362, mode: os.FileMode(0664), modTime: time.Unix(1633331980, 0)}
+ info := bindataFileInfo{name: "web3.js", size: 551362, mode: os.FileMode(0664), modTime: time.Unix(1655287089, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0x40, 0x45, 0x91, 0x90, 0xaf, 0x9d, 0xce, 0xcc, 0x67, 0x77, 0x6d, 0x4a, 0xd, 0x1f, 0x6, 0x41, 0x9f, 0x0, 0xf2, 0x94, 0xa0, 0x93, 0xf0, 0xe2, 0xd7, 0xcd, 0x2, 0x6e, 0x8d, 0x56, 0xa}}
return a, nil
}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index dc6358141..961f042df 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -33,6 +33,7 @@ var Modules = map[string]string{
"swarmfs": SwarmfsJs,
"txpool": TxpoolJs,
"les": LESJs,
+ "lespay": LESPayJs,
}
const ChequebookJs = `
@@ -870,3 +871,33 @@ web3._extend({
]
});
`
+
+const LESPayJs = `
+web3._extend({
+ property: 'lespay',
+ methods:
+ [
+ new web3._extend.Method({
+ name: 'distribution',
+ call: 'lespay_distribution',
+ params: 2
+ }),
+ new web3._extend.Method({
+ name: 'timeout',
+ call: 'lespay_timeout',
+ params: 2
+ }),
+ new web3._extend.Method({
+ name: 'value',
+ call: 'lespay_value',
+ params: 2
+ }),
+ ],
+ properties:
+ [
+ new web3._extend.Property({
+ name: 'requestStats',
+ getter: 'lespay_requestStats'
+ }),
+ ]
+});`
diff --git a/internal/xcbapi/api.go b/internal/xcbapi/api.go
index 44b44c7db..7ed49347e 100644
--- a/internal/xcbapi/api.go
+++ b/internal/xcbapi/api.go
@@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
+ "github.com/core-coin/go-core/accounts/abi"
"math/big"
"strings"
"time"
@@ -440,7 +441,7 @@ func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr c
}
// EcRecover returns the address for the account that was used to create the signature.
-// Note, this function is compatible with eth_sign and personal_sign. As such it recovers
+// Note, this function is compatible with xcb_sign and personal_sign. As such it recovers
// the address of:
// hash = keccak256("\x19Core Signed Message:\n"${message length}${message})
// addr = ecrecover(hash, signature)
@@ -748,7 +749,7 @@ func (args *CallArgs) ToMessage(globalEnergyCap *big.Int) types.Message {
addr = *args.From
}
- // Set default gas & gas price if none were set
+ // Set default energy & energy price if none were set
energy := globalEnergyCap.Uint64()
if energy == 0 {
energy = uint64(math.MaxUint64 / 2)
@@ -793,12 +794,12 @@ type account struct {
StateDiff *map[common.Hash]common.Hash `json:"stateDiff"`
}
-func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides map[common.Address]account, vmCfg vm.Config, timeout time.Duration, globalEnergyCap *big.Int) ([]byte, uint64, bool, error) {
+func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides map[common.Address]account, vmCfg vm.Config, timeout time.Duration, globalEnergyCap *big.Int) (*core.ExecutionResult, error) {
defer func(start time.Time) { log.Debug("Executing CVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
- return nil, 0, false, err
+ return nil, err
}
// Override the fields of specified contracts before execution.
@@ -816,7 +817,7 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
state.SetBalance(addr, (*big.Int)(*account.Balance))
}
if account.State != nil && account.StateDiff != nil {
- return nil, 0, false, fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
+ return nil, fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
}
// Replace entire state if caller requires.
if account.State != nil {
@@ -846,7 +847,7 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
msg := args.ToMessage(globalEnergyCap)
cvm, vmError, err := b.GetCVM(ctx, msg, state, header)
if err != nil {
- return nil, 0, false, err
+ return nil, err
}
// Wait for the context to be done and cancel the cvm. Even if the
// CVM has finished, cancelling may be done (repeatedly)
@@ -858,15 +859,15 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
// Setup the energy pool (also for unmetered requests)
// and apply the message.
gp := new(core.EnergyPool).AddEnergy(math.MaxUint64)
- res, energy, failed, err := core.ApplyMessage(cvm, msg, gp)
+ result, err := core.ApplyMessage(cvm, msg, gp)
if err := vmError(); err != nil {
- return nil, 0, false, err
+ return nil, err
}
// If the timer caused an abort, return an appropriate error message
if cvm.Cancelled() {
- return nil, 0, false, fmt.Errorf("execution aborted (timeout = %v)", timeout)
+ return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout)
}
- return res, energy, failed, err
+ return result, err
}
// Call executes the given transaction on the state for the given block number.
@@ -880,10 +881,29 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNrOr
if overrides != nil {
accounts = *overrides
}
- result, _, _, err := DoCall(ctx, s.b, args, blockNrOrHash, accounts, vm.Config{}, 5*time.Second, s.b.RPCEnergyCap())
- return (hexutil.Bytes)(result), err
+ result, err := DoCall(ctx, s.b, args, blockNrOrHash, accounts, vm.Config{}, 5*time.Second, s.b.RPCEnergyCap())
+ if err != nil {
+ return nil, err
+ }
+ return result.Return(), nil
}
+type estimateEnergyError struct {
+ error string // Concrete error type if it's failed to estimate energy usage
+ vmerr error // Additional field, it's non-nil if the given transaction is invalid
+ revert string // Additional field, it's non-empty if the transaction is reverted and reason is provided
+}
+
+func (e estimateEnergyError) Error() string {
+ errMsg := e.error
+ if e.vmerr != nil {
+ errMsg += fmt.Sprintf(" (%v)", e.vmerr)
+ }
+ if e.revert != "" {
+ errMsg += fmt.Sprintf(" (%s)", e.revert)
+ }
+ return errMsg
+}
func DoEstimateEnergy(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, energyCap *big.Int) (hexutil.Uint64, error) {
// Binary search the energy requirement, as it may be higher than the amount used
var (
@@ -914,20 +934,31 @@ func DoEstimateEnergy(ctx context.Context, b Backend, args CallArgs, blockNrOrHa
if args.From == nil {
args.From = new(common.Address)
}
- // Create a helper to check if a energy allowance results in an executable transaction
- executable := func(energy uint64) bool {
+ // Create a helper to check if an energy allowance results in an executable transaction
+ executable := func(energy uint64) (bool, *core.ExecutionResult, error) {
args.Energy = (*hexutil.Uint64)(&energy)
- _, _, failed, err := DoCall(ctx, b, args, blockNrOrHash, nil, vm.Config{}, 0, energyCap)
- if err != nil || failed {
- return false
+ result, err := DoCall(ctx, b, args, blockNrOrHash, nil, vm.Config{}, 0, energyCap)
+ if err != nil {
+ if err == core.ErrIntrinsicEnergy {
+ return true, nil, nil // Special case, raise energy limit
+ }
+ return true, nil, err // Bail out
}
- return true
+ return result.Failed(), result, nil
}
// Execute the binary search and hone in on an executable energy limit
for lo+1 < hi {
mid := (hi + lo) / 2
- if !executable(mid) {
+ failed, _, err := executable(mid)
+
+ // If the error is not nil(consensus error), it means the provided message
+ // call or transaction will never be accepted no matter how much energy it is
+ // assigened. Return the error directly, don't struggle any more.
+ if err != nil {
+ return 0, err
+ }
+ if failed {
lo = mid
} else {
hi = mid
@@ -935,8 +966,29 @@ func DoEstimateEnergy(ctx context.Context, b Backend, args CallArgs, blockNrOrHa
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
- if !executable(hi) {
- return 0, fmt.Errorf("energy required exceeds allowance (%d) or always failing transaction", cap)
+ failed, result, err := executable(hi)
+ if err != nil {
+ return 0, err
+ }
+ if failed {
+ if result != nil && result.Err != vm.ErrOutOfEnergy {
+ var revert string
+ if len(result.Revert()) > 0 {
+ ret, err := abi.UnpackRevert(result.Revert())
+ if err != nil {
+ revert = hexutil.Encode(result.Revert())
+ } else {
+ revert = ret
+ }
+ }
+ return 0, estimateEnergyError{
+ error: "always failing transaction",
+ vmerr: result.Err,
+ revert: revert,
+ }
+ }
+ // Otherwise, the specified energy cap is too low
+ return 0, estimateEnergyError{error: fmt.Sprintf("energy required exceeds allowance (%d)", cap)}
}
}
return hexutil.Uint64(hi), nil
@@ -1437,6 +1489,13 @@ func (args *SendTxArgs) toTransaction() *types.Transaction {
// SubmitTransaction is a helper function that submits tx to txPool and logs a message.
func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) {
+ // If the transaction fee cap is already specified, ensure the
+ // fee of the given transaction is _reasonable_.
+ feeCore := new(big.Float).Quo(new(big.Float).SetInt(new(big.Int).Mul(tx.EnergyPrice(), new(big.Int).SetUint64(tx.Energy()))), new(big.Float).SetInt(big.NewInt(params.Core)))
+ feeFloat, _ := feeCore.Float64()
+ if b.RPCTxFeeCap() != 0 && feeFloat > b.RPCTxFeeCap() {
+ return common.Hash{}, fmt.Errorf("tx fee (%.2f xcb) exceeds the configured cap (%.2f core)", feeFloat, b.RPCTxFeeCap())
+ }
if err := b.SendTx(ctx, tx); err != nil {
return common.Hash{}, err
}
diff --git a/internal/xcbapi/backend.go b/internal/xcbapi/backend.go
index 312515bec..e585f43d1 100644
--- a/internal/xcbapi/backend.go
+++ b/internal/xcbapi/backend.go
@@ -19,6 +19,7 @@ package xcbapi
import (
"context"
+ "github.com/core-coin/go-core/consensus"
"math/big"
"github.com/core-coin/go-core/accounts"
@@ -46,12 +47,15 @@ type Backend interface {
AccountManager() *accounts.Manager
ExtRPCEnabled() bool
RPCEnergyCap() *big.Int // global energy cap for xcb_call over rpc: DoS protection
+ RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
// Blockchain API
SetHead(number uint64)
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error)
HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error)
+ CurrentHeader() *types.Header
+ CurrentBlock() *types.Block
BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error)
BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error)
@@ -83,7 +87,7 @@ type Backend interface {
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
ChainConfig() *params.ChainConfig
- CurrentBlock() *types.Block
+ Engine() consensus.Engine
}
func GetAPIs(apiBackend Backend) []rpc.API {
diff --git a/les/api_backend.go b/les/api_backend.go
index 15c8c9449..d04e63066 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -19,6 +19,7 @@ package les
import (
"context"
"errors"
+ "github.com/core-coin/go-core/consensus"
"math/big"
"github.com/core-coin/go-core/accounts"
@@ -265,6 +266,10 @@ func (b *LesApiBackend) RPCEnergyCap() *big.Int {
return b.xcb.config.RPCEnergyCap
}
+func (b *LesApiBackend) RPCTxFeeCap() float64 {
+ return b.xcb.config.RPCTxFeeCap
+}
+
func (b *LesApiBackend) BloomStatus() (uint64, uint64) {
if b.xcb.bloomIndexer == nil {
return 0, 0
@@ -278,3 +283,11 @@ func (b *LesApiBackend) ServiceFilter(ctx context.Context, session *bloombits.Ma
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.xcb.bloomRequests)
}
}
+
+func (b *LesApiBackend) Engine() consensus.Engine {
+ return b.xcb.engine
+}
+
+func (b *LesApiBackend) CurrentHeader() *types.Header {
+ return b.xcb.blockchain.CurrentHeader()
+}
diff --git a/les/api_test.go b/les/api_test.go
index 9f32a6def..4d0c8cd8e 100644
--- a/les/api_test.go
+++ b/les/api_test.go
@@ -55,7 +55,7 @@ func TestMain(m *testing.M) {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
// register the Delivery service which will run as a devp2p
// protocol when using the exec adapter
- adapters.RegisterServices(services)
+ adapters.RegisterLifecycles(services)
os.Exit(m.Run())
}
@@ -392,7 +392,7 @@ func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (min
return
}
-var services = adapters.Services{
+var services = adapters.LifecycleConstructors{
"lesclient": newLesClientService,
"lesserver": newLesServerService,
}
@@ -414,7 +414,7 @@ func NewNetwork() (*simulations.Network, func(), error) {
return net, teardown, nil
}
-func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) {
+func NewAdapter(adapterType string, services adapters.LifecycleConstructors) (adapter adapters.NodeAdapter, teardown func(), err error) {
teardown = func() {}
switch adapterType {
case "sim":
@@ -454,7 +454,7 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
for i := range clients {
clientconf := adapters.RandomNodeConfig()
- clientconf.Services = []string{"lesclient"}
+ clientconf.Lifecycles = []string{"lesclient"}
if len(clientDir) == clientCount {
clientconf.DataDir = clientDir[i]
}
@@ -467,7 +467,7 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
for i := range servers {
serverconf := adapters.RandomNodeConfig()
- serverconf.Services = []string{"lesserver"}
+ serverconf.Lifecycles = []string{"lesserver"}
if len(serverDir) == serverCount {
serverconf.DataDir = serverDir[i]
}
@@ -492,26 +492,25 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
return test(ctx, net, servers, clients)
}
-func newLesClientService(ctx *adapters.ServiceContext) (node.Service, error) {
+func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
config := xcb.DefaultConfig
config.SyncMode = downloader.LightSync
config.Cryptore.PowMode = cryptore.ModeFake
- return New(ctx.NodeContext, &config)
+ return New(stack, &config)
}
-func newLesServerService(ctx *adapters.ServiceContext) (node.Service, error) {
+func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
config := xcb.DefaultConfig
config.SyncMode = downloader.FullSync
config.LightServ = testServerCapacity
config.LightPeers = testMaxClients
- core, err := xcb.New(ctx.NodeContext, &config)
+ core, err := xcb.New(stack, &config)
if err != nil {
return nil, err
}
- server, err := NewLesServer(core, &config)
+ _, err = NewLesServer(stack, core, &config)
if err != nil {
return nil, err
}
- core.AddLesServer(server)
return core, nil
}
diff --git a/les/benchmark.go b/les/benchmark.go
index c9d981e89..fba22d976 100644
--- a/les/benchmark.go
+++ b/les/benchmark.go
@@ -194,7 +194,7 @@ func (b *benchmarkTxSend) init(h *serverHandler, count int) error {
func (b *benchmarkTxSend) request(peer *serverPeer, index int) error {
enc, _ := rlp.EncodeToBytes(types.Transactions{b.txs[index]})
- return peer.sendTxs(0, enc)
+ return peer.sendTxs(0, 1, enc)
}
// benchmarkTxStatus implements requestBenchmark
diff --git a/les/checkpointoracle/oracle.go b/les/checkpointoracle/oracle.go
index 65b83aa11..ba6f6104a 100644
--- a/les/checkpointoracle/oracle.go
+++ b/les/checkpointoracle/oracle.go
@@ -51,16 +51,6 @@ type CheckpointOracle struct {
// New creates a checkpoint oracle handler with given configs and callback.
func New(config *params.CheckpointOracleConfig, getLocal func(uint64) params.TrustedCheckpoint) *CheckpointOracle {
- if config == nil {
- log.Info("Checkpoint registrar is not enabled")
- return nil
- }
- if config.Address == (common.Address{}) || uint64(len(config.Signers)) < config.Threshold {
- log.Warn("Invalid checkpoint registrar config")
- return nil
- }
- log.Info("Configured checkpoint registrar", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold)
-
return &CheckpointOracle{
config: config,
getLocal: getLocal,
diff --git a/les/client.go b/les/client.go
index cfb550fff..67b586735 100644
--- a/les/client.go
+++ b/les/client.go
@@ -19,10 +19,12 @@ package les
import (
"fmt"
+ lpc "github.com/core-coin/go-core/les/lespay/client"
+ "github.com/core-coin/go-core/xcb/filters"
"math/big"
+ "time"
"github.com/core-coin/go-core/accounts"
- "github.com/core-coin/go-core/accounts/abi/bind"
"github.com/core-coin/go-core/common"
"github.com/core-coin/go-core/common/hexutil"
"github.com/core-coin/go-core/common/mclock"
@@ -33,7 +35,6 @@ import (
"github.com/core-coin/go-core/core/types"
"github.com/core-coin/go-core/event"
"github.com/core-coin/go-core/internal/xcbapi"
- "github.com/core-coin/go-core/les/checkpointoracle"
"github.com/core-coin/go-core/light"
"github.com/core-coin/go-core/log"
"github.com/core-coin/go-core/node"
@@ -44,22 +45,23 @@ import (
"github.com/core-coin/go-core/xcb"
"github.com/core-coin/go-core/xcb/downloader"
"github.com/core-coin/go-core/xcb/energyprice"
- "github.com/core-coin/go-core/xcb/filters"
)
type LightCore struct {
lesCommons
- peers *serverPeerSet
- reqDist *requestDistributor
- retriever *retrieveManager
- odr *LesOdr
- relay *lesTxRelay
- handler *clientHandler
- txPool *light.TxPool
- blockchain *light.LightChain
- serverPool *serverPool
- pruner *pruner
+ peers *serverPeerSet
+ reqDist *requestDistributor
+ retriever *retrieveManager
+ odr *LesOdr
+ relay *lesTxRelay
+ handler *clientHandler
+ txPool *light.TxPool
+ blockchain *light.LightChain
+ serverPool *serverPool
+ valueTracker *lpc.ValueTracker
+ dialCandidates enode.Iterator
+ pruner *pruner
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports
@@ -69,10 +71,17 @@ type LightCore struct {
engine consensus.Engine
accountManager *accounts.Manager
netRPCService *xcbapi.PublicNetAPI
+
+ p2pServer *p2p.Server
}
-func New(ctx *node.ServiceContext, config *xcb.Config) (*LightCore, error) {
- chainDb, err := ctx.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "xcb/db/chaindata/")
+// New creates an instance of the light client.
+func New(stack *node.Node, config *xcb.Config) (*LightCore, error) {
+ chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "xcb/db/chaindata/")
+ if err != nil {
+ return nil, err
+ }
+ lespayDb, err := stack.OpenDatabase("lespay", 0, 0, "xcb/db/lespay")
if err != nil {
return nil, err
}
@@ -96,15 +105,26 @@ func New(ctx *node.ServiceContext, config *xcb.Config) (*LightCore, error) {
closeCh: make(chan struct{}),
},
peers: peers,
- eventMux: ctx.EventMux,
+ eventMux: stack.EventMux(),
reqDist: newRequestDistributor(peers, &mclock.System{}),
- accountManager: ctx.AccountManager,
- engine: xcb.CreateConsensusEngine(ctx, chainConfig, &config.Cryptore, nil, false, chainDb),
+ accountManager: stack.AccountManager(),
+ engine: xcb.CreateConsensusEngine(stack, chainConfig, &config.Cryptore, nil, false, chainDb),
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: xcb.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),
- serverPool: newServerPool(chainDb, config.UltraLightServers),
+ valueTracker: lpc.NewValueTracker(lespayDb, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)),
+ p2pServer: stack.Server(),
+ }
+ peers.subscribe((*vtSubscription)(lxcb.valueTracker))
+
+ dnsdisc, err := lxcb.setupDiscovery(&stack.Config().P2P)
+ if err != nil {
+ return nil, err
}
- lxcb.retriever = newRetrieveManager(peers, lxcb.reqDist, lxcb.serverPool)
+ lxcb.serverPool = newServerPool(lespayDb, []byte("serverpool:"), lxcb.valueTracker, dnsdisc, time.Second, nil, &mclock.System{}, config.UltraLightServers)
+ peers.subscribe(lxcb.serverPool)
+ lxcb.dialCandidates = lxcb.serverPool.dialIterator
+
+ lxcb.retriever = newRetrieveManager(peers, lxcb.reqDist, lxcb.serverPool.getTimeout)
lxcb.relay = newLesTxRelay(peers, lxcb.retriever)
lxcb.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, lxcb.retriever)
@@ -124,27 +144,13 @@ func New(ctx *node.ServiceContext, config *xcb.Config) (*LightCore, error) {
lxcb.chainReader = lxcb.blockchain
lxcb.txPool = light.NewTxPool(lxcb.chainConfig, lxcb.blockchain, lxcb.relay)
- // Set up checkpoint oracle.
- oracle := config.CheckpointOracle
- if oracle == nil {
- oracle = params.CheckpointOracles[genesisHash]
- }
- lxcb.oracle = checkpointoracle.New(oracle, lxcb.localCheckpoint)
+ lxcb.oracle = lxcb.setupOracle(stack, genesisHash, config)
// Note: AddChildIndexer starts the update process for the child
lxcb.bloomIndexer.AddChildIndexer(lxcb.bloomTrieIndexer)
lxcb.chtIndexer.Start(lxcb.blockchain)
lxcb.bloomIndexer.Start(lxcb.blockchain)
- lxcb.handler = newClientHandler(config.UltraLightServers, config.UltraLightFraction, checkpoint, lxcb)
- if lxcb.handler.ulc != nil {
- log.Warn("Ultra light client is enabled", "trustedNodes", len(lxcb.handler.ulc.keys), "minTrustedFraction", lxcb.handler.ulc.fraction)
- lxcb.blockchain.DisableCheckFreq()
- }
-
- // Start a light chain pruner to delete useless historical data.
- lxcb.pruner = newPruner(chainDb, lxcb.chtIndexer, lxcb.bloomTrieIndexer)
-
// Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
@@ -152,16 +158,45 @@ func New(ctx *node.ServiceContext, config *xcb.Config) (*LightCore, error) {
rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
- lxcb.ApiBackend = &LesApiBackend{ctx.ExtRPCEnabled(), lxcb, nil}
+ lxcb.ApiBackend = &LesApiBackend{stack.Config().ExtRPCEnabled(), lxcb, nil}
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.Miner.EnergyPrice
}
lxcb.ApiBackend.gpo = energyprice.NewOracle(lxcb.ApiBackend, gpoParams)
+ lxcb.netRPCService = xcbapi.NewPublicNetAPI(lxcb.p2pServer, lxcb.config.NetworkId)
+
+ // Register the backend on the node
+ stack.RegisterAPIs(lxcb.APIs())
+ stack.RegisterProtocols(lxcb.Protocols())
+ stack.RegisterLifecycle(lxcb)
+
+ lxcb.handler = newClientHandler(config.UltraLightServers, config.UltraLightFraction, checkpoint, lxcb)
+ if lxcb.handler.ulc != nil {
+ log.Warn("Ultra light client is enabled", "trustedNodes", len(lxcb.handler.ulc.keys), "minTrustedFraction", lxcb.handler.ulc.fraction)
+ lxcb.blockchain.DisableCheckFreq()
+ }
return lxcb, nil
}
+// vtSubscription implements serverPeerSubscriber
+type vtSubscription lpc.ValueTracker
+
+// registerPeer implements serverPeerSubscriber
+func (v *vtSubscription) registerPeer(p *serverPeer) {
+ vt := (*lpc.ValueTracker)(v)
+ p.setValueTracker(vt, vt.Register(p.ID()))
+ p.updateVtParams()
+}
+
+// unregisterPeer implements serverPeerSubscriber
+func (v *vtSubscription) unregisterPeer(p *serverPeer) {
+ vt := (*lpc.ValueTracker)(v)
+ vt.Unregister(p.ID())
+ p.setValueTracker(nil, nil)
+}
+
type LightDummyAPI struct{}
// Corebase is the address that mining rewards will be send to
@@ -215,6 +250,11 @@ func (s *LightCore) APIs() []rpc.API {
Version: "1.0",
Service: NewPrivateLightAPI(&s.lesCommons),
Public: false,
+ }, {
+ Namespace: "lespay",
+ Version: "1.0",
+ Service: lpc.NewPrivateClientAPI(s.valueTracker),
+ Public: false,
},
}...)
}
@@ -230,38 +270,36 @@ func (s *LightCore) LesVersion() int { return int(ClientProto
func (s *LightCore) Downloader() *downloader.Downloader { return s.handler.downloader }
func (s *LightCore) EventMux() *event.TypeMux { return s.eventMux }
-// Protocols implements node.Service, returning all the currently configured
-// network protocols to start.
+// Protocols returns all the currently configured network protocols to start.
func (s *LightCore) Protocols() []p2p.Protocol {
return s.makeProtocols(ClientProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
- if p := s.peers.peer(peerIdToString(id)); p != nil {
+ if p := s.peers.peer(id.String()); p != nil {
return p.Info()
}
return nil
- })
+ }, s.dialCandidates)
}
-// Start implements node.Service, starting all internal goroutines needed by the
+// Start implements node.Lifecycle, starting all internal goroutines needed by the
// light core protocol implementation.
-func (s *LightCore) Start(srvr *p2p.Server) error {
+func (s *LightCore) Start() error {
log.Warn("Light client mode is an experimental feature")
+ s.serverPool.start()
// Start bloom request workers.
s.wg.Add(bloomServiceThreads)
s.startBloomHandlers(params.BloomBitsBlocksClient)
+ s.handler.start()
- s.netRPCService = xcbapi.NewPublicNetAPI(srvr, s.config.NetworkId)
-
- // clients are searching for the first advertised protocol in the list
- protocolVersion := AdvertiseProtocolVersions[0]
- s.serverPool.start(srvr, lesTopic(s.blockchain.Genesis().Hash(), protocolVersion))
return nil
}
-// Stop implements node.Service, terminating all internal goroutines used by the
+// Stop implements node.Lifecycle, terminating all internal goroutines used by the
// Core protocol.
func (s *LightCore) Stop() error {
close(s.closeCh)
+ s.serverPool.stop()
+ s.valueTracker.Stop()
s.peers.close()
s.reqDist.close()
s.odr.Stop()
@@ -280,11 +318,3 @@ func (s *LightCore) Stop() error {
log.Info("Light core stopped")
return nil
}
-
-// SetClient sets the rpc client and binds the registrar contract.
-func (s *LightCore) SetContractBackend(backend bind.ContractBackend) {
- if s.oracle == nil {
- return
- }
- s.oracle.Start(backend)
-}
diff --git a/les/client_handler.go b/les/client_handler.go
index 7b909fc7a..658a51e58 100644
--- a/les/client_handler.go
+++ b/les/client_handler.go
@@ -64,16 +64,20 @@ func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.T
if checkpoint != nil {
height = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1
}
- handler.fetcher = newLightFetcher(handler)
+ handler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise)
handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer)
handler.backend.peers.subscribe((*downloaderPeerNotify)(handler))
return handler
}
+func (h *clientHandler) start() {
+ h.fetcher.start()
+}
+
func (h *clientHandler) stop() {
close(h.closeCh)
h.downloader.Terminate()
- h.fetcher.close()
+ h.fetcher.stop()
h.wg.Wait()
}
@@ -85,14 +89,9 @@ func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter)
}
peer := newServerPeer(int(version), h.backend.config.NetworkId, trusted, p, newMeteredMsgWriter(rw, int(version)))
defer peer.close()
- peer.poolEntry = h.backend.serverPool.connect(peer, peer.Node())
- if peer.poolEntry == nil {
- return p2p.DiscRequested
- }
h.wg.Add(1)
defer h.wg.Done()
err := h.handle(peer)
- h.backend.serverPool.disconnect(peer.poolEntry)
return err
}
@@ -129,10 +128,6 @@ func (h *clientHandler) handle(p *serverPeer) error {
h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td})
- // pool entry can be nil during the unit test.
- if p.poolEntry != nil {
- h.backend.serverPool.registered(p.poolEntry)
- }
// Mark the peer starts to be served.
atomic.StoreUint32(&p.serving, 1)
defer atomic.StoreUint32(&p.serving, 0)
@@ -180,6 +175,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
return errResp(ErrRequestRejected, "")
}
p.updateFlowControl(update)
+ p.updateVtParams()
if req.Hash != (common.Hash{}) {
if p.announceType == announceTypeNone {
@@ -193,6 +189,9 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
p.Log().Trace("Valid announcement signature")
}
p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth)
+
+ // Update peer head information first and then notify the announcement
+ p.updateHead(req.Hash, req.Number, req.Td)
h.fetcher.announce(p, &req)
}
case BlockHeadersMsg:
@@ -204,11 +203,17 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
+ headers := resp.Headers
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
- if h.fetcher.requestedID(resp.ReqID) {
- h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)
- } else {
- if err := h.downloader.DeliverHeaders(p.id, resp.Headers); err != nil {
+ p.answeredRequest(resp.ReqID)
+
+ // Filter out any explicitly requested headers, deliver the rest to the downloader
+ filter := len(headers) == 1
+ if filter {
+ headers = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)
+ }
+ if len(headers) != 0 || !filter {
+ if err := h.downloader.DeliverHeaders(p.id, headers); err != nil {
log.Debug("Failed to deliver headers", "err", err)
}
}
@@ -222,6 +227,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
+ p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgBlockBodies,
ReqID: resp.ReqID,
@@ -237,6 +243,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
+ p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgCode,
ReqID: resp.ReqID,
@@ -252,6 +259,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
+ p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgReceipts,
ReqID: resp.ReqID,
@@ -267,6 +275,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
+ p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgProofsV2,
ReqID: resp.ReqID,
@@ -282,6 +291,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
+ p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgHelperTrieProofs,
ReqID: resp.ReqID,
@@ -297,6 +307,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
+ p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgTxStatus,
ReqID: resp.ReqID,
@@ -321,8 +332,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error {
// Deliver the received response to retriever.
if deliverMsg != nil {
if err := h.backend.retriever.deliver(p, deliverMsg); err != nil {
- p.errCount++
- if p.errCount > maxResponseErrors {
+ if val := p.errCount.Add(1, mclock.Now()); val > maxResponseErrors {
return err
}
}
diff --git a/les/clientpool.go b/les/clientpool.go
index 5e57c243d..85df88515 100644
--- a/les/clientpool.go
+++ b/les/clientpool.go
@@ -219,7 +219,7 @@ func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool {
id, freeID := peer.ID(), peer.freeClientId()
if _, ok := f.connectedMap[id]; ok {
clientRejectedMeter.Mark(1)
- log.Debug("Client already connected", "address", freeID, "id", peerIdToString(id))
+ log.Debug("Client already connected", "address", freeID, "id", id.String())
return false
}
// Create a clientInfo but do not add it yet
@@ -288,7 +288,7 @@ func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool {
f.connectedQueue.Push(c)
}
clientRejectedMeter.Mark(1)
- log.Debug("Client rejected", "address", freeID, "id", peerIdToString(id))
+ log.Debug("Client rejected", "address", freeID, "id", id.String())
return false
}
// accept new client, drop old ones
@@ -333,7 +333,7 @@ func (f *clientPool) disconnect(p clientPoolPeer) {
// Short circuit if the peer hasn't been registered.
e := f.connectedMap[p.ID()]
if e == nil {
- log.Debug("Client not connected", "address", p.freeClientId(), "id", peerIdToString(p.ID()))
+ log.Debug("Client not connected", "address", p.freeClientId(), "id", p.ID().String())
return
}
f.dropClient(e, f.clock.Now(), false)
@@ -691,6 +691,14 @@ func (db *nodeDB) close() {
close(db.closeCh)
}
+func (db *nodeDB) getPrefix(neg bool) []byte {
+ prefix := positiveBalancePrefix
+ if neg {
+ prefix = negativeBalancePrefix
+ }
+ return append(db.verbuf[:], prefix...)
+}
+
func (db *nodeDB) key(id []byte, neg bool) []byte {
prefix := positiveBalancePrefix
if neg {
@@ -761,7 +769,8 @@ func (db *nodeDB) getPosBalanceIDs(start, stop enode.ID, maxCount int) (result [
if maxCount <= 0 {
return
}
- it := db.db.NewIteratorWithStart(db.key(start.Bytes(), false))
+ prefix := db.getPrefix(false)
+ it := db.db.NewIterator(prefix, start.Bytes())
defer it.Release()
for i := len(stop[:]) - 1; i >= 0; i-- {
stop[i]--
@@ -840,8 +849,9 @@ func (db *nodeDB) expireNodes() {
visited int
deleted int
start = time.Now()
+ prefix = db.getPrefix(true)
)
- iter := db.db.NewIteratorWithPrefix(append(db.verbuf[:], negativeBalancePrefix...))
+ iter := db.db.NewIterator(prefix, nil)
for iter.Next() {
visited += 1
var balance negBalance
diff --git a/les/commons.go b/les/commons.go
index 3ff2c2443..aa9e9ff64 100644
--- a/les/commons.go
+++ b/les/commons.go
@@ -18,6 +18,9 @@ package les
import (
"fmt"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/node"
+ "github.com/core-coin/go-core/xcbclient"
"math/big"
"sync"
@@ -81,7 +84,7 @@ type NodeInfo struct {
}
// makeProtocols creates protocol descriptors for the given LES versions.
-func (c *lesCommons) makeProtocols(versions []uint, runPeer func(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error, peerInfo func(id enode.ID) interface{}) []p2p.Protocol {
+func (c *lesCommons) makeProtocols(versions []uint, runPeer func(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error, peerInfo func(id enode.ID) interface{}, dialCandidates enode.Iterator) []p2p.Protocol {
protos := make([]p2p.Protocol, len(versions))
for i, version := range versions {
version := version
@@ -93,7 +96,8 @@ func (c *lesCommons) makeProtocols(versions []uint, runPeer func(version uint, p
Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
return runPeer(version, peer, rw)
},
- PeerInfo: peerInfo,
+ PeerInfo: peerInfo,
+ DialCandidates: dialCandidates,
}
}
return protos
@@ -144,3 +148,26 @@ func (c *lesCommons) localCheckpoint(index uint64) params.TrustedCheckpoint {
BloomRoot: light.GetBloomTrieRoot(c.chainDb, index, sectionHead),
}
}
+
+// setupOracle sets up the checkpoint oracle contract client.
+func (c *lesCommons) setupOracle(node *node.Node, genesis common.Hash, xcbconfig *xcb.Config) *checkpointoracle.CheckpointOracle {
+ config := xcbconfig.CheckpointOracle
+ if config == nil {
+ // Try loading default config.
+ config = params.CheckpointOracles[genesis]
+ }
+ if config == nil {
+ log.Info("Checkpoint registrar is not enabled")
+ return nil
+ }
+ if config.Address == (common.Address{}) || uint64(len(config.Signers)) < config.Threshold {
+ log.Warn("Invalid checkpoint registrar config")
+ return nil
+ }
+ oracle := checkpointoracle.New(config, c.localCheckpoint)
+ rpcClient, _ := node.Attach()
+ client := xcbclient.NewClient(rpcClient)
+ oracle.Start(client)
+ log.Info("Configured checkpoint registrar", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold)
+ return oracle
+}
diff --git a/les/distributor.go b/les/distributor.go
index b4bbef3ac..7612ee170 100644
--- a/les/distributor.go
+++ b/les/distributor.go
@@ -180,12 +180,12 @@ func (d *requestDistributor) loop() {
type selectPeerItem struct {
peer distPeer
req *distReq
- weight int64
+ weight uint64
}
// Weight implements wrsItem interface
-func (sp selectPeerItem) Weight() int64 {
- return sp.weight
+func selectPeerWeight(i interface{}) uint64 {
+ return i.(selectPeerItem).weight
}
// nextRequest returns the next possible request from any peer, along with the
@@ -220,9 +220,9 @@ func (d *requestDistributor) nextRequest() (distPeer, *distReq, time.Duration) {
wait, bufRemain := peer.waitBefore(cost)
if wait == 0 {
if sel == nil {
- sel = utils.NewWeightedRandomSelect()
+ sel = utils.NewWeightedRandomSelect(selectPeerWeight)
}
- sel.Update(selectPeerItem{peer: peer, req: req, weight: int64(bufRemain*1000000) + 1})
+ sel.Update(selectPeerItem{peer: peer, req: req, weight: uint64(bufRemain*1000000) + 1})
} else {
if bestWait == 0 || wait < bestWait {
bestWait = wait
diff --git a/les/enr_entry.go b/les/enr_entry.go
index 2bfa64030..3db7c674c 100644
--- a/les/enr_entry.go
+++ b/les/enr_entry.go
@@ -17,6 +17,9 @@
package les
import (
+ "github.com/core-coin/go-core/p2p"
+ "github.com/core-coin/go-core/p2p/dnsdisc"
+ "github.com/core-coin/go-core/p2p/enode"
"github.com/core-coin/go-core/rlp"
)
@@ -30,3 +33,12 @@ type lesEntry struct {
func (e lesEntry) ENRKey() string {
return "les"
}
+
+// setupDiscovery creates the node discovery source for the xcb protocol.
+func (xcb *LightCore) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) {
+ if /*cfg.NoDiscovery || */ len(xcb.config.DiscoveryURLs) == 0 {
+ return nil, nil
+ }
+ client := dnsdisc.NewClient(dnsdisc.Config{})
+ return client.NewIterator(xcb.config.DiscoveryURLs...)
+}
diff --git a/les/fetcher.go b/les/fetcher.go
index b221ee90e..bbcab4b1d 100644
--- a/les/fetcher.go
+++ b/les/fetcher.go
@@ -17,875 +17,547 @@
package les
import (
+ "github.com/core-coin/go-core/core"
+ "github.com/core-coin/go-core/light"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/xcb/fetcher"
+ "github.com/core-coin/go-core/xcbdb"
"math/big"
+ "math/rand"
"sync"
"time"
"github.com/core-coin/go-core/common"
- "github.com/core-coin/go-core/common/mclock"
"github.com/core-coin/go-core/consensus"
"github.com/core-coin/go-core/core/rawdb"
"github.com/core-coin/go-core/core/types"
- "github.com/core-coin/go-core/light"
"github.com/core-coin/go-core/log"
)
const (
- blockDelayTimeout = time.Second * 30 // timeout for a peer to announce a head that has already been confirmed by others
- maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer
- serverStateAvailable = 100 // number of recent blocks where state availability is assumed
+ blockDelayTimeout = 10 * time.Second // Timeout for retrieving the headers from the peer
+ gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired requests
+ cachedAnnosThreshold = 64 // The maximum queued announcements
)
-// lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
-// ODR system to ensure that we only request data related to a certain block from peers who have already processed
-// and announced that block.
-type lightFetcher struct {
- handler *clientHandler
- chain *light.LightChain
-
- lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
- maxConfirmedTd *big.Int
- peers map[*serverPeer]*fetcherPeerInfo
- lastUpdateStats *updateStatsEntry
- syncing bool
- syncDone chan *serverPeer
-
- reqMu sync.RWMutex // reqMu protects access to sent header fetch requests
- requested map[uint64]fetchRequest
- deliverChn chan fetchResponse
- timeoutChn chan uint64
- requestTriggered bool
- requestTrigger chan struct{}
- lastTrustedHeader *types.Header
-
- closeCh chan struct{}
- wg sync.WaitGroup
-}
-
-// fetcherPeerInfo holds fetcher-specific information about each active peer
-type fetcherPeerInfo struct {
- root, lastAnnounced *fetcherTreeNode
- nodeCnt int
- confirmedTd *big.Int
- bestConfirmed *fetcherTreeNode
- nodeByHash map[common.Hash]*fetcherTreeNode
- firstUpdateStats *updateStatsEntry
-}
-
-// fetcherTreeNode is a node of a tree that holds information about blocks recently
-// announced and confirmed by a certain peer. Each new announce message from a peer
-// adds nodes to the tree, based on the previous announced head and the reorg depth.
-// There are three possible states for a tree node:
-// - announced: not downloaded (known) yet, but we know its head, number and td
-// - intermediate: not known, hash and td are empty, they are filled out when it becomes known
-// - known: both announced by this peer and downloaded (from any peer).
-// This structure makes it possible to always know which peer has a certain block,
-// which is necessary for selecting a suitable peer for ODR requests and also for
-// canonizing new heads. It also helps to always download the minimum necessary
-// amount of headers with a single request.
-type fetcherTreeNode struct {
- hash common.Hash
- number uint64
- td *big.Int
- known, requested bool
- parent *fetcherTreeNode
- children []*fetcherTreeNode
+// announce represents an new block announcement from the les server.
+type announce struct {
+ data *announceData
+ trust bool
+ peerid enode.ID
}
-// fetchRequest represents a header download request
-type fetchRequest struct {
- hash common.Hash
- amount uint64
- peer *serverPeer
- sent mclock.AbsTime
- timeout bool
+// request represents a record when the header request is sent.
+type request struct {
+ reqid uint64
+ peerid enode.ID
+ sendAt time.Time
+ hash common.Hash
}
-// fetchResponse represents a header download response
-type fetchResponse struct {
- reqID uint64
+// response represents a response packet from network as well as a channel
+// to return all un-requested data.
+type response struct {
+ reqid uint64
headers []*types.Header
- peer *serverPeer
+ peerid enode.ID
+ remain chan []*types.Header
}
-// newLightFetcher creates a new light fetcher
-func newLightFetcher(h *clientHandler) *lightFetcher {
- f := &lightFetcher{
- handler: h,
- chain: h.backend.blockchain,
- peers: make(map[*serverPeer]*fetcherPeerInfo),
- deliverChn: make(chan fetchResponse, 100),
- requested: make(map[uint64]fetchRequest),
- timeoutChn: make(chan uint64),
- requestTrigger: make(chan struct{}, 1),
- syncDone: make(chan *serverPeer),
- closeCh: make(chan struct{}),
- maxConfirmedTd: big.NewInt(0),
- }
- h.backend.peers.subscribe(f)
-
- f.wg.Add(1)
- go f.syncLoop()
- return f
-}
+// fetcherPeer holds the fetcher-specific information for each active peer
+type fetcherPeer struct {
+ latest *announceData // The latest announcement sent from the peer
-func (f *lightFetcher) close() {
- close(f.closeCh)
- f.wg.Wait()
+ // These following two fields can track the latest announces
+ // from the peer with limited size for caching. We hold the
+ // assumption that all enqueued announces are td-monotonic.
+ announces map[common.Hash]*announce // Announcement map
+ announcesList []common.Hash // FIFO announces list
}
-// syncLoop is the main event loop of the light fetcher
-func (f *lightFetcher) syncLoop() {
- defer f.wg.Done()
- for {
- select {
- case <-f.closeCh:
- return
- // request loop keeps running until no further requests are necessary or possible
- case <-f.requestTrigger:
- f.lock.Lock()
- var (
- rq *distReq
- reqID uint64
- syncing bool
- )
- if !f.syncing {
- rq, reqID, syncing = f.nextRequest()
- }
- f.requestTriggered = rq != nil
- f.lock.Unlock()
-
- if rq != nil {
- if _, ok := <-f.handler.backend.reqDist.queue(rq); ok {
- if syncing {
- f.lock.Lock()
- f.syncing = true
- f.lock.Unlock()
- } else {
- go func() {
- time.Sleep(softRequestTimeout)
- f.reqMu.Lock()
- req, ok := f.requested[reqID]
- if ok {
- req.timeout = true
- f.requested[reqID] = req
- }
- f.reqMu.Unlock()
- // keep starting new requests while possible
- f.requestTrigger <- struct{}{}
- }()
- }
- } else {
- f.requestTrigger <- struct{}{}
- }
- }
- case reqID := <-f.timeoutChn:
- f.reqMu.Lock()
- req, ok := f.requested[reqID]
- if ok {
- delete(f.requested, reqID)
- }
- f.reqMu.Unlock()
- if ok {
- f.handler.backend.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
- req.peer.Log().Debug("Fetching data timed out hard")
- go f.handler.removePeer(req.peer.id)
- }
- case resp := <-f.deliverChn:
- f.reqMu.Lock()
- req, ok := f.requested[resp.reqID]
- if ok && req.peer != resp.peer {
- ok = false
- }
- if ok {
- delete(f.requested, resp.reqID)
- }
- f.reqMu.Unlock()
- if ok {
- f.handler.backend.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)
- }
- f.lock.Lock()
- if !ok || !(f.syncing || f.processResponse(req, resp)) {
- resp.peer.Log().Debug("Failed processing response")
- go f.handler.removePeer(resp.peer.id)
- }
- f.lock.Unlock()
- case p := <-f.syncDone:
- f.lock.Lock()
- p.Log().Debug("Done synchronising with peer")
- f.checkSyncedHeaders(p)
- f.syncing = false
- f.lock.Unlock()
- f.requestTrigger <- struct{}{} // f.requestTriggered is always true here
- }
- }
-}
-
-// registerPeer adds a new peer to the fetcher's peer set
-func (f *lightFetcher) registerPeer(p *serverPeer) {
- p.lock.Lock()
- p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
- return f.peerHasBlock(p, hash, number, hasState)
- }
- p.lock.Unlock()
-
- f.lock.Lock()
- defer f.lock.Unlock()
- f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
-}
-
-// unregisterPeer removes a new peer from the fetcher's peer set
-func (f *lightFetcher) unregisterPeer(p *serverPeer) {
- p.lock.Lock()
- p.hasBlock = nil
- p.lock.Unlock()
-
- f.lock.Lock()
- defer f.lock.Unlock()
-
- // check for potential timed out block delay statistics
- f.checkUpdateStats(p, nil)
- delete(f.peers, p)
-}
-
-// announce processes a new announcement message received from a peer, adding new
-// nodes to the peer's block tree and removing old nodes if necessary
-func (f *lightFetcher) announce(p *serverPeer, head *announceData) {
- f.lock.Lock()
- defer f.lock.Unlock()
- p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
-
- fp := f.peers[p]
- if fp == nil {
- p.Log().Debug("Announcement from unknown peer")
+// addAnno enqueues an new trusted announcement. If the queued announces overflow,
+// evict from the oldest.
+func (fp *fetcherPeer) addAnno(anno *announce) {
+ // Short circuit if the anno already exists. In normal case it should
+ // never happen since only monotonic anno is accepted. But the adversary
+ // may feed us fake announces with higher td but same hash. In this case,
+ // ignore the anno anyway.
+ hash := anno.data.Hash
+ if _, exist := fp.announces[hash]; exist {
return
}
-
- if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
- // announced tds should be strictly monotonic
- p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
- go f.handler.removePeer(p.id)
- return
- }
-
- n := fp.lastAnnounced
- for i := uint64(0); i < head.ReorgDepth; i++ {
- if n == nil {
- break
+ fp.announces[hash] = anno
+ fp.announcesList = append(fp.announcesList, hash)
+ // Evict oldest if the announces are oversized.
+ if len(fp.announcesList)-cachedAnnosThreshold > 0 {
+ for i := 0; i < len(fp.announcesList)-cachedAnnosThreshold; i++ {
+ delete(fp.announces, fp.announcesList[i])
}
- n = n.parent
- }
- // n is now the reorg common ancestor, add a new branch of nodes
- if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {
- // if announced head block height is lower or same as n or too far from it to add
- // intermediate nodes then discard previous announcement info and trigger a resync
- n = nil
- fp.nodeCnt = 0
- fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)
+ copy(fp.announcesList, fp.announcesList[len(fp.announcesList)-cachedAnnosThreshold:])
+ fp.announcesList = fp.announcesList[:cachedAnnosThreshold]
}
- // check if the node count is too high to add new nodes, discard oldest ones if necessary
- if n != nil {
- // n is now the reorg common ancestor, add a new branch of nodes
- // check if the node count is too high to add new nodes
- locked := false
- for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
- if !locked {
- f.chain.LockChain()
- defer f.chain.UnlockChain()
- locked = true
- }
- // if one of root's children is canonical, keep it, delete other branches and root itself
- var newRoot *fetcherTreeNode
- for i, nn := range fp.root.children {
- if rawdb.ReadCanonicalHash(f.handler.backend.chainDb, nn.number) == nn.hash {
- fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
- nn.parent = nil
- newRoot = nn
- break
- }
- }
- fp.deleteNode(fp.root)
- if n == fp.root {
- n = newRoot
- }
- fp.root = newRoot
- if newRoot == nil || !f.checkKnownNode(p, newRoot) {
- fp.bestConfirmed = nil
- fp.confirmedTd = nil
- }
+}
- if n == nil {
- break
- }
+// forwardAnno removes all announces from the map with a number lower than
+// the provided threshold.
+func (fp *fetcherPeer) forwardAnno(td *big.Int) []*announce {
+ var (
+ cutset int
+ evicted []*announce
+ )
+ for ; cutset < len(fp.announcesList); cutset++ {
+ anno := fp.announces[fp.announcesList[cutset]]
+ if anno == nil {
+ continue // In theory it should never ever happen
}
- if n != nil {
- for n.number < head.Number {
- nn := &fetcherTreeNode{number: n.number + 1, parent: n}
- n.children = append(n.children, nn)
- n = nn
- fp.nodeCnt++
- }
- n.hash = head.Hash
- n.td = head.Td
- fp.nodeByHash[n.hash] = n
+ if anno.data.Td.Cmp(td) > 0 {
+ break
}
+ evicted = append(evicted, anno)
+ delete(fp.announces, anno.data.Hash)
}
-
- if n == nil {
- // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
- if fp.root != nil {
- fp.deleteNode(fp.root)
- }
- n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
- fp.root = n
- fp.nodeCnt++
- fp.nodeByHash[n.hash] = n
- fp.bestConfirmed = nil
- fp.confirmedTd = nil
- }
-
- f.checkKnownNode(p, n)
- p.lock.Lock()
- p.headInfo = blockInfo{Number: head.Number, Hash: head.Hash, Td: head.Td}
- fp.lastAnnounced = n
- p.lock.Unlock()
- f.checkUpdateStats(p, nil)
- if !f.requestTriggered {
- f.requestTriggered = true
- f.requestTrigger <- struct{}{}
+ if cutset > 0 {
+ copy(fp.announcesList, fp.announcesList[cutset:])
+ fp.announcesList = fp.announcesList[:len(fp.announcesList)-cutset]
}
+ return evicted
}
-// peerHasBlock returns true if we can assume the peer knows the given block
-// based on its announcements
-func (f *lightFetcher) peerHasBlock(p *serverPeer, hash common.Hash, number uint64, hasState bool) bool {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- fp := f.peers[p]
- if fp == nil || fp.root == nil {
- return false
- }
+// lightFetcher implements retrieval of newly announced headers. It reuses
+// the xcb.BlockFetcher as the underlying fetcher but adding more additional
+// rules: e.g. evict "timeout" peers.
+type lightFetcher struct {
+ // Various handlers
+ ulc *ulc
+ chaindb xcbdb.Database
+ reqDist *requestDistributor
+ peerset *serverPeerSet // The global peerset of light client which shared by all components
+ chain *light.LightChain // The local light chain which maintains the canonical header chain.
+ fetcher *fetcher.BlockFetcher // The underlying fetcher which takes care block header retrieval.
+
+ // Peerset maintained by fetcher
+ plock sync.RWMutex
+ peers map[enode.ID]*fetcherPeer
+
+ // Various channels
+ announceCh chan *announce
+ requestCh chan *request
+ deliverCh chan *response
+ syncDone chan *types.Header
- if hasState {
- if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {
- return false
- }
- }
+ closeCh chan struct{}
+ wg sync.WaitGroup
- if f.syncing {
- // always return true when syncing
- // false positives are acceptable, a more sophisticated condition can be implemented later
- return true
- }
+ // Callback
+ synchronise func(peer *serverPeer)
- if number >= fp.root.number {
- // it is recent enough that if it is known, is should be in the peer's block tree
- return fp.nodeByHash[hash] != nil
- }
- f.chain.LockChain()
- defer f.chain.UnlockChain()
- // if it's older than the peer's block tree root but it's in the same canonical chain
- // as the root, we can still be sure the peer knows it
- //
- // when syncing, just check if it is part of the known chain, there is nothing better we
- // can do since we do not know the most recent block hash yet
- return rawdb.ReadCanonicalHash(f.handler.backend.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.handler.backend.chainDb, number) == hash
+ // Test fields or hooks
+ noAnnounce bool
+ newHeadHook func(*types.Header)
+ newAnnounce func(*serverPeer, *announceData)
}
-// requestAmount calculates the amount of headers to be downloaded starting
-// from a certain head backwards
-func (f *lightFetcher) requestAmount(p *serverPeer, n *fetcherTreeNode) uint64 {
- amount := uint64(0)
- nn := n
- for nn != nil && !f.checkKnownNode(p, nn) {
- nn = nn.parent
- amount++
- }
- if nn == nil {
- amount = n.number
+// newLightFetcher creates a light fetcher instance.
+func newLightFetcher(chain *light.LightChain, engine consensus.Engine, peers *serverPeerSet, ulc *ulc, chaindb xcbdb.Database, reqDist *requestDistributor, syncFn func(p *serverPeer)) *lightFetcher {
+ // Construct the fetcher by offering all necessary APIs
+ validator := func(header *types.Header) error {
+ // Disable seal verification explicitly if we are running in ulc mode.
+ return engine.VerifyHeader(chain, header, ulc == nil)
+ }
+ heighter := func() uint64 { return chain.CurrentHeader().Number.Uint64() }
+ dropper := func(id string) { peers.unregister(id) }
+ inserter := func(headers []*types.Header) (int, error) {
+ // Disable PoW checking explicitly if we are running in ulc mode.
+ checkFreq := 1
+ if ulc != nil {
+ checkFreq = 0
+ }
+ return chain.InsertHeaderChain(headers, checkFreq)
}
- return amount
+ f := &lightFetcher{
+ ulc: ulc,
+ peerset: peers,
+ chaindb: chaindb,
+ chain: chain,
+ reqDist: reqDist,
+ fetcher: fetcher.NewBlockFetcher(true, chain.GetHeaderByHash, nil, validator, nil, heighter, inserter, nil, dropper),
+ peers: make(map[enode.ID]*fetcherPeer),
+ synchronise: syncFn,
+ announceCh: make(chan *announce),
+ requestCh: make(chan *request),
+ deliverCh: make(chan *response),
+ syncDone: make(chan *types.Header),
+ closeCh: make(chan struct{}),
+ }
+ peers.subscribe(f)
+ return f
}
-// requestedID tells if a certain reqID has been requested by the fetcher
-func (f *lightFetcher) requestedID(reqID uint64) bool {
- f.reqMu.RLock()
- _, ok := f.requested[reqID]
- f.reqMu.RUnlock()
- return ok
+func (f *lightFetcher) start() {
+ f.wg.Add(1)
+ f.fetcher.Start()
+ go f.mainloop()
}
-// nextRequest selects the peer and announced head to be requested next, amount
-// to be downloaded starting from the head backwards is also returned
-func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
- var (
- bestHash common.Hash
- bestAmount uint64
- bestTd *big.Int
- bestSyncing bool
- )
- bestHash, bestAmount, bestTd, bestSyncing = f.findBestRequest()
-
- if bestTd == f.maxConfirmedTd {
- return nil, 0, false
- }
-
- var rq *distReq
- reqID := genReqID()
- if bestSyncing {
- rq = f.newFetcherDistReqForSync(bestHash)
- } else {
- rq = f.newFetcherDistReq(bestHash, reqID, bestAmount)
- }
- return rq, reqID, bestSyncing
+func (f *lightFetcher) stop() {
+ close(f.closeCh)
+ f.fetcher.Stop()
+ f.wg.Wait()
}
-// findBestRequest finds the best head to request that has been announced by but not yet requested from a known peer.
-// It also returns the announced Td (which should be verified after fetching the head),
-// the necessary amount to request and whether a downloader sync is necessary instead of a normal header request.
-func (f *lightFetcher) findBestRequest() (bestHash common.Hash, bestAmount uint64, bestTd *big.Int, bestSyncing bool) {
- bestTd = f.maxConfirmedTd
- bestSyncing = false
+// registerPeer adds an new peer to the fetcher's peer set
+func (f *lightFetcher) registerPeer(p *serverPeer) {
+ f.plock.Lock()
+ defer f.plock.Unlock()
- for p, fp := range f.peers {
- for hash, n := range fp.nodeByHash {
- if f.checkKnownNode(p, n) || n.requested {
- continue
- }
- // if ulc mode is disabled, isTrustedHash returns true
- amount := f.requestAmount(p, n)
- if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedTd.Int64() == 0) {
- bestHash = hash
- bestTd = n.td
- bestAmount = amount
- bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
- }
- }
- }
- return
+ f.peers[p.ID()] = &fetcherPeer{announces: make(map[common.Hash]*announce)}
}
-// isTrustedHash checks if the block can be trusted by the minimum trusted fraction.
-func (f *lightFetcher) isTrustedHash(hash common.Hash) bool {
- // If ultra light cliet mode is disabled, trust all hashes
- if f.handler.ulc == nil {
- return true
- }
- // Ultra light enabled, only trust after enough confirmations
- var agreed int
- for peer, info := range f.peers {
- if peer.trusted && info.nodeByHash[hash] != nil {
- agreed++
- }
- }
- return 100*agreed/len(f.handler.ulc.keys) >= f.handler.ulc.fraction
-}
+// unregisterPeer removes the specified peer from the fetcher's peer set
+func (f *lightFetcher) unregisterPeer(p *serverPeer) {
+ f.plock.Lock()
+ defer f.plock.Unlock()
-func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq {
- return &distReq{
- getCost: func(dp distPeer) uint64 {
- return 0
- },
- canSend: func(dp distPeer) bool {
- p := dp.(*serverPeer)
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if p.onlyAnnounce {
- return false
- }
- fp := f.peers[p]
- return fp != nil && fp.nodeByHash[bestHash] != nil
- },
- request: func(dp distPeer) func() {
- if f.handler.ulc != nil {
- // Keep last trusted header before sync
- f.setLastTrustedHeader(f.chain.CurrentHeader())
- }
- go func() {
- p := dp.(*serverPeer)
- p.Log().Debug("Synchronisation started")
- f.handler.synchronise(p)
- f.syncDone <- p
- }()
- return nil
- },
- }
+ delete(f.peers, p.ID())
}
-// newFetcherDistReq creates a new request for the distributor.
-func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq {
- return &distReq{
- getCost: func(dp distPeer) uint64 {
- p := dp.(*serverPeer)
- return p.getRequestCost(GetBlockHeadersMsg, int(bestAmount))
- },
- canSend: func(dp distPeer) bool {
- p := dp.(*serverPeer)
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if p.onlyAnnounce {
- return false
- }
- fp := f.peers[p]
- if fp == nil {
- return false
- }
- n := fp.nodeByHash[bestHash]
- return n != nil && !n.requested
- },
- request: func(dp distPeer) func() {
- p := dp.(*serverPeer)
- f.lock.Lock()
- fp := f.peers[p]
- if fp != nil {
- n := fp.nodeByHash[bestHash]
- if n != nil {
- n.requested = true
- }
- }
- f.lock.Unlock()
-
- cost := p.getRequestCost(GetBlockHeadersMsg, int(bestAmount))
- p.fcServer.QueuedRequest(reqID, cost)
- f.reqMu.Lock()
- f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
- f.reqMu.Unlock()
- go func() {
- time.Sleep(hardRequestTimeout)
- f.timeoutChn <- reqID
- }()
- return func() { p.requestHeadersByHash(reqID, bestHash, int(bestAmount), 0, true) }
- },
- }
-}
+// peer returns the peer from the fetcher peerset.
+func (f *lightFetcher) peer(id enode.ID) *fetcherPeer {
+ f.plock.RLock()
+ defer f.plock.RUnlock()
-// deliverHeaders delivers header download request responses for processing
-func (f *lightFetcher) deliverHeaders(peer *serverPeer, reqID uint64, headers []*types.Header) {
- f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
+ return f.peers[id]
}
-// processResponse processes header download request responses, returns true if successful
-func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
- if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
- req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
- return false
- }
- headers := make([]*types.Header, req.amount)
- for i, header := range resp.headers {
- headers[int(req.amount)-1-i] = header
- }
+// forEachPeer iterates the fetcher peerset, abort the iteration if the
+// callback returns false.
+func (f *lightFetcher) forEachPeer(check func(id enode.ID, p *fetcherPeer) bool) {
+ f.plock.RLock()
+ defer f.plock.RUnlock()
- if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
- if err == consensus.ErrFutureBlock {
- return true
- }
- log.Debug("Failed to insert header chain", "err", err)
- return false
- }
- tds := make([]*big.Int, len(headers))
- for i, header := range headers {
- td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
- if td == nil {
- log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
- return false
+ for id, peer := range f.peers {
+ if !check(id, peer) {
+ return
}
- tds[i] = td
}
- f.newHeaders(headers, tds)
- return true
}
-// newHeaders updates the block trees of all active peers according to a newly
-// downloaded and validated batch or headers
-func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
- var maxTd *big.Int
+// mainloop is the main event loop of the light fetcher, which is responsible for
+// - announcement maintenance(ulc)
+// If we are running in ultra light client mode, then all announcements from
+// the trusted servers are maintained. If the same announcements from trusted
+// servers reach the threshold, then the relevant header is requested for retrieval.
+//
+// - block header retrieval
+// Whenever we receive announce with higher td compared with local chain, the
+// request will be made for header retrieval.
+//
+// - re-sync trigger
+// If the local chain lags too much, then the fetcher will enter "synnchronise"
+// mode to retrieve missing headers in batch.
+func (f *lightFetcher) mainloop() {
+ defer f.wg.Done()
- for p, fp := range f.peers {
- if !f.checkAnnouncedHeaders(fp, headers, tds) {
- p.Log().Debug("Inconsistent announcement")
- go f.handler.removePeer(p.id)
- }
- if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
- maxTd = fp.confirmedTd
- }
- }
+ var (
+ syncInterval = uint64(1) // Interval used to trigger a light resync.
+ syncing bool // Indicator whether the client is syncing
- if maxTd != nil {
- f.updateMaxConfirmedTd(maxTd)
- }
-}
+ ulc = f.ulc != nil
+ headCh = make(chan core.ChainHeadEvent, 100)
+ fetching = make(map[uint64]*request)
+ requestTimer = time.NewTimer(0)
-// checkAnnouncedHeaders updates peer's block tree if necessary after validating
-// a batch of headers. It searches for the latest header in the batch that has a
-// matching tree node (if any), and if it has not been marked as known already,
-// sets it and its parents to known (even those which are older than the currently
-// validated ones). Return value shows if all hashes, numbers and Tds matched
-// correctly to the announced values (otherwise the peer should be dropped).
-func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
- var (
- n *fetcherTreeNode
- header *types.Header
- td *big.Int
+ // Local status
+ localHead = f.chain.CurrentHeader()
+ localTd = f.chain.GetTd(localHead.Hash(), localHead.Number.Uint64())
)
+ sub := f.chain.SubscribeChainHeadEvent(headCh)
+ defer sub.Unsubscribe()
+
+ // reset updates the local status with given header.
+ reset := func(header *types.Header) {
+ localHead = header
+ localTd = f.chain.GetTd(header.Hash(), header.Number.Uint64())
+ }
+ // trustedHeader returns an indicator whether the header is regarded as
+ // trusted. If we are running in the ulc mode, only when we receive enough
+ // same announcement from trusted server, the header will be trusted.
+ trustedHeader := func(hash common.Hash, number uint64) (bool, []enode.ID) {
+ var (
+ agreed []enode.ID
+ trusted bool
+ )
+ f.forEachPeer(func(id enode.ID, p *fetcherPeer) bool {
+ if anno := p.announces[hash]; anno != nil && anno.trust && anno.data.Number == number {
+ agreed = append(agreed, id)
+ if 100*len(agreed)/len(f.ulc.keys) >= f.ulc.fraction {
+ trusted = true
+ return false // abort iteration
+ }
+ }
+ return true
+ })
+ return trusted, agreed
+ }
+ for {
+ select {
+ case anno := <-f.announceCh:
+ peerid, data := anno.peerid, anno.data
+ log.Debug("Received new announce", "peer", peerid, "number", data.Number, "hash", data.Hash, "reorg", data.ReorgDepth)
- for i := len(headers) - 1; ; i-- {
- if i < 0 {
- if n == nil {
- // no more headers and nothing to match
- return true
+ peer := f.peer(peerid)
+ if peer == nil {
+ log.Debug("Receive announce from unknown peer", "peer", peerid)
+ continue
}
- // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
- hash, number := header.ParentHash, header.Number.Uint64()-1
- td = f.chain.GetTd(hash, number)
- header = f.chain.GetHeader(hash, number)
- if header == nil || td == nil {
- log.Error("Missing parent of validated header", "hash", hash, "number", number)
- return false
+ // Announced tds should be strictly monotonic, drop the peer if
+ // the announce is out-of-order.
+ if peer.latest != nil && data.Td.Cmp(peer.latest.Td) <= 0 {
+ f.peerset.unregister(peerid.String())
+ log.Debug("Non-monotonic td", "peer", peerid, "current", data.Td, "previous", peer.latest.Td)
+ continue
}
- } else {
- header = headers[i]
- td = tds[i]
- }
- hash := header.Hash()
- number := header.Number.Uint64()
- if n == nil {
- n = fp.nodeByHash[hash]
- }
- if n != nil {
- if n.td == nil {
- // node was unannounced
- if nn := fp.nodeByHash[hash]; nn != nil {
- // if there was already a node with the same hash, continue there and drop this one
- nn.children = append(nn.children, n.children...)
- n.children = nil
- fp.deleteNode(n)
- n = nn
- } else {
- n.hash = hash
- n.td = td
- fp.nodeByHash[hash] = n
+ peer.latest = data
+
+ // Filter out any stale announce, the local chain is ahead of announce
+ if localTd != nil && data.Td.Cmp(localTd) <= 0 {
+ continue
+ }
+ peer.addAnno(anno)
+
+ // If we are not syncing, try to trigger a single retrieval or re-sync
+ if !ulc && !syncing {
+ // Two scenarios lead to re-sync:
+ // - reorg happens
+ // - local chain lags
+ // We can't retrieve the parent of the announce by single retrieval
+ // in both cases, so resync is necessary.
+ if data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 {
+ syncing = true
+ go f.startSync(peerid)
+ log.Debug("Trigger light sync", "peer", peerid, "local", localHead.Number, "localhash", localHead.Hash(), "remote", data.Number, "remotehash", data.Hash)
+ continue
}
+ f.fetcher.Notify(peerid.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(peerid), nil)
+ log.Debug("Trigger header retrieval", "peer", peerid, "number", data.Number, "hash", data.Hash)
}
- // check if it matches the header
- if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {
- // peer has previously made an invalid announcement
- return false
+ // Keep collecting announces from trusted server even we are syncing.
+ if ulc && anno.trust {
+ // Notify underlying fetcher to retrieve header or trigger a resync if
+ // we have receive enough announcements from trusted server.
+ trusted, agreed := trustedHeader(data.Hash, data.Number)
+ if trusted && !syncing {
+ if data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 {
+ syncing = true
+ go f.startSync(peerid)
+ log.Debug("Trigger trusted light sync", "local", localHead.Number, "localhash", localHead.Hash(), "remote", data.Number, "remotehash", data.Hash)
+ continue
+ }
+ p := agreed[rand.Intn(len(agreed))]
+ f.fetcher.Notify(p.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(p), nil)
+ log.Debug("Trigger trusted header retrieval", "number", data.Number, "hash", data.Hash)
+ }
}
- if n.known {
- // we reached a known node that matched our expectations, return with success
- return true
+
+ case req := <-f.requestCh:
+ fetching[req.reqid] = req // Tracking all in-flight requests for response latency statistic.
+ if len(fetching) == 1 {
+ f.rescheduleTimer(fetching, requestTimer)
}
- n.known = true
- if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
- fp.confirmedTd = td
- fp.bestConfirmed = n
+
+ case <-requestTimer.C:
+ for reqid, request := range fetching {
+ if time.Since(request.sendAt) > blockDelayTimeout-gatherSlack {
+ delete(fetching, reqid)
+ f.peerset.unregister(request.peerid.String())
+ log.Debug("Request timeout", "peer", request.peerid, "reqid", reqid)
+ }
}
- n = n.parent
- if n == nil {
+ f.rescheduleTimer(fetching, requestTimer)
+
+ case resp := <-f.deliverCh:
+ if req := fetching[resp.reqid]; req != nil {
+ delete(fetching, resp.reqid)
+ f.rescheduleTimer(fetching, requestTimer)
+
+ // The underlying fetcher does not check the consistency of request and response.
+ // The adversary can send the fake announces with invalid hash and number but always
+ // delivery some mismatched header. So it can't be punished by the underlying fetcher.
+ // We have to add two more rules here to detect.
+ if len(resp.headers) != 1 {
+ f.peerset.unregister(req.peerid.String())
+ log.Debug("Deliver more than requested", "peer", req.peerid, "reqid", req.reqid)
+ continue
+ }
+ if resp.headers[0].Hash() != req.hash {
+ f.peerset.unregister(req.peerid.String())
+ log.Debug("Deliver invalid header", "peer", req.peerid, "reqid", req.reqid)
+ continue
+ }
+ resp.remain <- f.fetcher.FilterHeaders(resp.peerid.String(), resp.headers, time.Now())
+ } else {
+ // Discard the entire packet no matter it's a timeout response or unexpected one.
+ resp.remain <- resp.headers
+ }
+
+ case ev := <-headCh:
+ // Short circuit if we are still syncing.
+ if syncing {
+ continue
+ }
+ reset(ev.Block.Header())
+
+ // Clean stale announcements from les-servers.
+ var droplist []enode.ID
+ f.forEachPeer(func(id enode.ID, p *fetcherPeer) bool {
+ removed := p.forwardAnno(localTd)
+ for _, anno := range removed {
+ if header := f.chain.GetHeaderByHash(anno.data.Hash); header != nil {
+ if header.Number.Uint64() != anno.data.Number {
+ droplist = append(droplist, id)
+ break
+ }
+ // In theory td should exists.
+ td := f.chain.GetTd(anno.data.Hash, anno.data.Number)
+ if td != nil && td.Cmp(anno.data.Td) != 0 {
+ droplist = append(droplist, id)
+ break
+ }
+ }
+ }
return true
+ })
+ for _, id := range droplist {
+ f.peerset.unregister(id.String())
+ log.Debug("Kicked out peer for invalid announcement")
+ }
+ if f.newHeadHook != nil {
+ f.newHeadHook(localHead)
+ }
+
+ case origin := <-f.syncDone:
+ syncing = false // Reset the status
+
+ // Rewind all untrusted headers for ulc mode.
+ if ulc {
+ head := f.chain.CurrentHeader()
+ ancestor := rawdb.FindCommonAncestor(f.chaindb, origin, head)
+ var untrusted []common.Hash
+ for head.Number.Cmp(ancestor.Number) > 0 {
+ hash, number := head.Hash(), head.Number.Uint64()
+ if trusted, _ := trustedHeader(hash, number); trusted {
+ break
+ }
+ untrusted = append(untrusted, hash)
+ head = f.chain.GetHeader(head.ParentHash, number-1)
+ }
+ if len(untrusted) > 0 {
+ for i, j := 0, len(untrusted)-1; i < j; i, j = i+1, j-1 {
+ untrusted[i], untrusted[j] = untrusted[j], untrusted[i]
+ }
+ f.chain.Rollback(untrusted)
+ }
}
+ // Reset local status.
+ reset(f.chain.CurrentHeader())
+ if f.newHeadHook != nil {
+ f.newHeadHook(localHead)
+ }
+ log.Debug("light sync finished", "number", localHead.Number, "hash", localHead.Hash())
+
+ case <-f.closeCh:
+ return
}
}
}
-// checkSyncedHeaders updates peer's block tree after synchronisation by marking
-// downloaded headers as known. If none of the announced headers are found after
-// syncing, the peer is dropped.
-func (f *lightFetcher) checkSyncedHeaders(p *serverPeer) {
- fp := f.peers[p]
- if fp == nil {
- p.Log().Debug("Unknown peer to check sync headers")
- return
- }
- var (
- node = fp.lastAnnounced
- td *big.Int
- )
- if f.handler.ulc != nil {
- // Roll back untrusted blocks
- h, unapproved := f.lastTrustedTreeNode(p)
- f.chain.Rollback(unapproved)
- node = fp.nodeByHash[h.Hash()]
+// announce processes a new announcement message received from a peer.
+func (f *lightFetcher) announce(p *serverPeer, head *announceData) {
+ if f.newAnnounce != nil {
+ f.newAnnounce(p, head)
}
- // Find last valid block
- for node != nil {
- if td = f.chain.GetTd(node.hash, node.number); td != nil {
- break
- }
- node = node.parent
+ if f.noAnnounce {
+ return
}
- // Now node is the latest downloaded/approved header after syncing
- if node == nil {
- p.Log().Debug("Synchronisation failed")
- go f.handler.removePeer(p.id)
+ select {
+ case f.announceCh <- &announce{peerid: p.ID(), trust: p.trusted, data: head}:
+ case <-f.closeCh:
return
}
- header := f.chain.GetHeader(node.hash, node.number)
- f.newHeaders([]*types.Header{header}, []*big.Int{td})
}
-// lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes
-func (f *lightFetcher) lastTrustedTreeNode(p *serverPeer) (*types.Header, []common.Hash) {
- unapprovedHashes := make([]common.Hash, 0)
- current := f.chain.CurrentHeader()
-
- if f.lastTrustedHeader == nil {
- return current, unapprovedHashes
- }
-
- canonical := f.chain.CurrentHeader()
- if canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() {
- canonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64())
- }
- commonAncestor := rawdb.FindCommonAncestor(f.handler.backend.chainDb, canonical, f.lastTrustedHeader)
- if commonAncestor == nil {
- log.Error("Common ancestor of last trusted header and canonical header is nil", "canonical hash", canonical.Hash(), "trusted hash", f.lastTrustedHeader.Hash())
- return current, unapprovedHashes
+// trackRequest sends a reqID to main loop for in-flight request tracking.
+func (f *lightFetcher) trackRequest(peerid enode.ID, reqid uint64, hash common.Hash) {
+ select {
+ case f.requestCh <- &request{reqid: reqid, peerid: peerid, sendAt: time.Now(), hash: hash}:
+ case <-f.closeCh:
}
+}
- for current.Hash() == commonAncestor.Hash() {
- if f.isTrustedHash(current.Hash()) {
- break
+// requestHeaderByHash constructs a header retrieval request and sends it to
+// local request distributor.
+//
+// Note, we rely on the underlying xcb/fetcher to retrieve and validate the
+// response, so that we have to obey the rule of xcb/fetcher which only accepts
+// the response from given peer.
+func (f *lightFetcher) requestHeaderByHash(peerid enode.ID) func(common.Hash) error {
+ return func(hash common.Hash) error {
+ req := &distReq{
+ getCost: func(dp distPeer) uint64 { return dp.(*serverPeer).getRequestCost(GetBlockHeadersMsg, 1) },
+ canSend: func(dp distPeer) bool { return dp.(*serverPeer).ID() == peerid },
+ request: func(dp distPeer) func() {
+ peer, id := dp.(*serverPeer), genReqID()
+ cost := peer.getRequestCost(GetBlockHeadersMsg, 1)
+ peer.fcServer.QueuedRequest(id, cost)
+
+ return func() {
+ f.trackRequest(peer.ID(), id, hash)
+ peer.requestHeadersByHash(id, hash, 1, 0, false)
+ }
+ },
}
- unapprovedHashes = append(unapprovedHashes, current.Hash())
- current = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1)
+ f.reqDist.queue(req)
+ return nil
}
- return current, unapprovedHashes
-}
-
-func (f *lightFetcher) setLastTrustedHeader(h *types.Header) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.lastTrustedHeader = h
}
-// checkKnownNode checks if a block tree node is known (downloaded and validated)
-// If it was not known previously but found in the database, sets its known flag
-func (f *lightFetcher) checkKnownNode(p *serverPeer, n *fetcherTreeNode) bool {
- if n.known {
- return true
- }
- td := f.chain.GetTd(n.hash, n.number)
- if td == nil {
- return false
- }
- header := f.chain.GetHeader(n.hash, n.number)
- // check the availability of both header and td because reads are not protected by chain db mutex
- // Note: returning false is always safe here
- if header == nil {
- return false
- }
-
- fp := f.peers[p]
- if fp == nil {
- p.Log().Debug("Unknown peer to check known nodes")
- return false
- }
- if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
- p.Log().Debug("Inconsistent announcement")
- go f.handler.removePeer(p.id)
- }
- if fp.confirmedTd != nil {
- f.updateMaxConfirmedTd(fp.confirmedTd)
- }
- return n.known
-}
+// requestResync invokes synchronisation callback to start syncing.
+func (f *lightFetcher) startSync(id enode.ID) {
+ defer func(header *types.Header) {
+ f.syncDone <- header
+ }(f.chain.CurrentHeader())
-// deleteNode deletes a node and its child subtrees from a peer's block tree
-func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
- if n.parent != nil {
- for i, nn := range n.parent.children {
- if nn == n {
- n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
- break
- }
- }
- }
- for {
- if n.td != nil {
- delete(fp.nodeByHash, n.hash)
- }
- fp.nodeCnt--
- if len(n.children) == 0 {
- return
- }
- for i, nn := range n.children {
- if i == 0 {
- n = nn
- } else {
- fp.deleteNode(nn)
- }
- }
+ peer := f.peerset.peer(id.String())
+ if peer == nil || peer.onlyAnnounce {
+ return
}
+ f.synchronise(peer)
}
-// updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
-// than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
-// and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
-// both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
-// and it has also been downloaded from any peer, either before or after the given announcement).
-// The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
-// pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
-// the current global head).
-type updateStatsEntry struct {
- time mclock.AbsTime
- td *big.Int
- next *updateStatsEntry
-}
-
-// updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
-// adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
-// already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
-// Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
-// positive block delay value.
-func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
- if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
- f.maxConfirmedTd = td
- newEntry := &updateStatsEntry{
- time: mclock.Now(),
- td: td,
- }
- if f.lastUpdateStats != nil {
- f.lastUpdateStats.next = newEntry
- }
-
- f.lastUpdateStats = newEntry
- for p := range f.peers {
- f.checkUpdateStats(p, newEntry)
- }
- }
+// deliverHeaders delivers header download request responses for processing
+func (f *lightFetcher) deliverHeaders(peer *serverPeer, reqid uint64, headers []*types.Header) []*types.Header {
+ remain := make(chan []*types.Header, 1)
+ select {
+ case f.deliverCh <- &response{reqid: reqid, headers: headers, peerid: peer.ID(), remain: remain}:
+ case <-f.closeCh:
+ return nil
+ }
+ return <-remain
}
-// checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
-// has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
-// block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
-// the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
-// items are removed from the head of the linked list.
-// If a new entry has been added to the global tail, it is passed as a parameter here even though this function
-// assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
-// it can set the new head to newEntry.
-func (f *lightFetcher) checkUpdateStats(p *serverPeer, newEntry *updateStatsEntry) {
- now := mclock.Now()
- fp := f.peers[p]
- if fp == nil {
- p.Log().Debug("Unknown peer to check update stats")
+// rescheduleTimer resets the specified timeout timer to the next request timeout.
+func (f *lightFetcher) rescheduleTimer(requests map[uint64]*request, timer *time.Timer) {
+ // Short circuit if no inflight requests
+ if len(requests) == 0 {
+ timer.Stop()
return
}
-
- if newEntry != nil && fp.firstUpdateStats == nil {
- fp.firstUpdateStats = newEntry
- }
- for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
- f.handler.backend.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)
- fp.firstUpdateStats = fp.firstUpdateStats.next
- }
- if fp.confirmedTd != nil {
- for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
- f.handler.backend.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))
- fp.firstUpdateStats = fp.firstUpdateStats.next
+ // Otherwise find the earliest expiring request
+ earliest := time.Now()
+ for _, req := range requests {
+ if earliest.After(req.sendAt) {
+ earliest = req.sendAt
}
}
+ timer.Reset(blockDelayTimeout - time.Since(earliest))
}
diff --git a/les/fetcher_test.go b/les/fetcher_test.go
new file mode 100644
index 000000000..1d148c1a6
--- /dev/null
+++ b/les/fetcher_test.go
@@ -0,0 +1,268 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package les
+
+import (
+ "github.com/core-coin/go-core/consensus/cryptore"
+ "github.com/core-coin/go-core/core"
+ "github.com/core-coin/go-core/core/rawdb"
+ "github.com/core-coin/go-core/core/types"
+ "github.com/core-coin/go-core/p2p/enode"
+ "math/big"
+ "testing"
+ "time"
+)
+
+// verifyImportEvent verifies that one single event arrive on an import channel.
+func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
+ if arrive {
+ select {
+ case <-imported:
+ case <-time.After(3 * time.Second):
+ t.Fatalf("import timeout")
+ }
+ } else {
+ select {
+ case <-imported:
+ t.Fatalf("import invoked")
+ case <-time.After(20 * time.Millisecond):
+ }
+ }
+}
+
+// verifyImportDone verifies that no more events are arriving on an import channel.
+func verifyImportDone(t *testing.T, imported chan interface{}) {
+ select {
+ case <-imported:
+ t.Fatalf("extra block imported")
+ case <-time.After(50 * time.Millisecond):
+ }
+}
+
+// verifyChainHeight verifies the chain height is as expected.
+func verifyChainHeight(t *testing.T, fetcher *lightFetcher, height uint64) {
+ local := fetcher.chain.CurrentHeader().Number.Uint64()
+ if local != height {
+ t.Fatalf("chain height mismatch, got %d, want %d", local, height)
+ }
+}
+
+func TestSequentialAnnouncementsLes2(t *testing.T) { testSequentialAnnouncements(t, 2) }
+func TestSequentialAnnouncementsLes3(t *testing.T) { testSequentialAnnouncements(t, 3) }
+
+func testSequentialAnnouncements(t *testing.T, protocol int) {
+ s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false, false)
+ defer teardown()
+
+ // Create connected peer pair.
+ c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
+ p1, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler)
+ if err != nil {
+ t.Fatalf("Failed to create peer pair %v", err)
+ }
+ c.handler.fetcher.noAnnounce = false
+
+ importCh := make(chan interface{})
+ c.handler.fetcher.newHeadHook = func(header *types.Header) {
+ importCh <- header
+ }
+ for i := uint64(1); i <= s.backend.Blockchain().CurrentHeader().Number.Uint64(); i++ {
+ header := s.backend.Blockchain().GetHeaderByNumber(i)
+ hash, number := header.Hash(), header.Number.Uint64()
+ td := rawdb.ReadTd(s.db, hash, number)
+
+ announce := announceData{hash, number, td, 0, nil}
+ if p1.cpeer.announceType == announceTypeSigned {
+ announce.sign(s.handler.server.privateKey)
+ }
+ p1.cpeer.sendAnnounce(announce)
+ verifyImportEvent(t, importCh, true)
+ }
+ verifyImportDone(t, importCh)
+ verifyChainHeight(t, c.handler.fetcher, 4)
+}
+
+func TestGappedAnnouncementsLes2(t *testing.T) { testGappedAnnouncements(t, 2) }
+func TestGappedAnnouncementsLes3(t *testing.T) { testGappedAnnouncements(t, 3) }
+
+func testGappedAnnouncements(t *testing.T, protocol int) {
+ s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false, false)
+ defer teardown()
+
+ // Create connected peer pair.
+ c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
+ peer, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler)
+ if err != nil {
+ t.Fatalf("Failed to create peer pair %v", err)
+ }
+ c.handler.fetcher.noAnnounce = false
+
+ done := make(chan *types.Header, 1)
+ c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }
+
+ // Prepare announcement by latest header.
+ latest := s.backend.Blockchain().CurrentHeader()
+ hash, number := latest.Hash(), latest.Number.Uint64()
+ td := rawdb.ReadTd(s.db, hash, number)
+
+ // Sign the announcement if necessary.
+ announce := announceData{hash, number, td, 0, nil}
+ if peer.cpeer.announceType == announceTypeSigned {
+ announce.sign(s.handler.server.privateKey)
+ }
+ peer.cpeer.sendAnnounce(announce)
+
+ <-done // Wait syncing
+ verifyChainHeight(t, c.handler.fetcher, 4)
+
+ // Send a reorged announcement
+ var newAnno = make(chan struct{}, 1)
+ c.handler.fetcher.noAnnounce = true
+ c.handler.fetcher.newAnnounce = func(*serverPeer, *announceData) {
+ newAnno <- struct{}{}
+ }
+ blocks, _ := core.GenerateChain(rawdb.ReadChainConfig(s.db, s.backend.Blockchain().Genesis().Hash()), s.backend.Blockchain().GetBlockByNumber(3),
+ cryptore.NewFaker(), s.db, 2, func(i int, gen *core.BlockGen) {
+ gen.OffsetTime(-9) // higher block difficulty
+ })
+ s.backend.Blockchain().InsertChain(blocks)
+ <-newAnno
+ c.handler.fetcher.noAnnounce = false
+ c.handler.fetcher.newAnnounce = nil
+
+ latest = blocks[len(blocks)-1].Header()
+ hash, number = latest.Hash(), latest.Number.Uint64()
+ td = rawdb.ReadTd(s.db, hash, number)
+
+ announce = announceData{hash, number, td, 1, nil}
+ if peer.cpeer.announceType == announceTypeSigned {
+ announce.sign(s.handler.server.privateKey)
+ }
+ peer.cpeer.sendAnnounce(announce)
+
+ <-done // Wait syncing
+ verifyChainHeight(t, c.handler.fetcher, 5)
+}
+
+func TestTrustedAnnouncementsLes2(t *testing.T) { testTrustedAnnouncement(t, 2) }
+func TestTrustedAnnouncementsLes3(t *testing.T) { testTrustedAnnouncement(t, 3) }
+
+func testTrustedAnnouncement(t *testing.T, protocol int) {
+ t.Skip("long-running test")
+ var (
+ servers []*testServer
+ teardowns []func()
+ nodes []*enode.Node
+ ids []string
+ cpeers []*clientPeer
+ speers []*serverPeer
+ )
+ for i := 0; i < 10; i++ {
+ s, n, teardown := newTestServerPeer(t, 10, protocol)
+
+ servers = append(servers, s)
+ nodes = append(nodes, n)
+ teardowns = append(teardowns, teardown)
+
+ // A half of them are trusted servers.
+ if i < 5 {
+ ids = append(ids, n.String())
+ }
+ }
+ _, c, teardown := newClientServerEnv(t, 0, protocol, nil, ids, 60, false, false, false)
+ defer teardown()
+ defer func() {
+ for i := 0; i < len(teardowns); i++ {
+ teardowns[i]()
+ }
+ }()
+
+ c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
+
+ // Connect all server instances.
+ for i := 0; i < len(servers); i++ {
+ sp, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol)
+ if err != nil {
+ t.Fatalf("connect server and client failed, err %s", err)
+ }
+ cpeers = append(cpeers, cp)
+ speers = append(speers, sp)
+ }
+ c.handler.fetcher.noAnnounce = false
+
+ newHead := make(chan *types.Header, 1)
+ c.handler.fetcher.newHeadHook = func(header *types.Header) { newHead <- header }
+
+ check := func(height []uint64, expected uint64, callback func()) {
+ for i := 0; i < len(height); i++ {
+ for j := 0; j < len(servers); j++ {
+ h := servers[j].backend.Blockchain().GetHeaderByNumber(height[i])
+ hash, number := h.Hash(), h.Number.Uint64()
+ td := rawdb.ReadTd(servers[j].db, hash, number)
+
+ // Sign the announcement if necessary.
+ announce := announceData{hash, number, td, 0, nil}
+ p := cpeers[j]
+ if p.announceType == announceTypeSigned {
+ announce.sign(servers[j].handler.server.privateKey)
+ }
+ p.sendAnnounce(announce)
+ }
+ }
+ if callback != nil {
+ callback()
+ }
+ verifyChainHeight(t, c.handler.fetcher, expected)
+ }
+ check([]uint64{1}, 1, func() { <-newHead }) // Sequential announcements
+ check([]uint64{4}, 4, func() { <-newHead }) // ULC-style light syncing, rollback untrusted headers
+ check([]uint64{10}, 10, func() { <-newHead }) // Sync the whole chain.
+}
+
+func TestInvalidAnnounces(t *testing.T) {
+ s, c, teardown := newClientServerEnv(t, 4, lpv3, nil, nil, 0, false, false, false)
+ defer teardown()
+
+ // Create connected peer pair.
+ c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
+ peer, _, err := newTestPeerPair("peer", lpv3, s.handler, c.handler)
+ if err != nil {
+ t.Fatalf("Failed to create peer pair %v", err)
+ }
+ c.handler.fetcher.noAnnounce = false
+
+ done := make(chan *types.Header, 1)
+ c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }
+
+ // Prepare announcement by latest header.
+ headerOne := s.backend.Blockchain().GetHeaderByNumber(1)
+ hash, number := headerOne.Hash(), headerOne.Number.Uint64()
+ td := big.NewInt(200) // bad td
+
+ // Sign the announcement if necessary.
+ announce := announceData{hash, number, td, 0, nil}
+ if peer.cpeer.announceType == announceTypeSigned {
+ announce.sign(s.handler.server.privateKey)
+ }
+ peer.cpeer.sendAnnounce(announce)
+ <-done // Wait syncing
+
+ // Ensure the bad peer is evicited
+ if c.handler.backend.peers.len() != 0 {
+ t.Fatalf("Failed to evict invalid peer")
+ }
+}
diff --git a/les/lespay/client/api.go b/les/lespay/client/api.go
new file mode 100644
index 000000000..60df09398
--- /dev/null
+++ b/les/lespay/client/api.go
@@ -0,0 +1,106 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/les/utils"
+ "github.com/core-coin/go-core/p2p/enode"
+ "time"
+)
+
+// PrivateClientAPI implements the lespay client side API
+type PrivateClientAPI struct {
+ vt *ValueTracker
+}
+
+// NewPrivateClientAPI creates a PrivateClientAPI
+func NewPrivateClientAPI(vt *ValueTracker) *PrivateClientAPI {
+ return &PrivateClientAPI{vt}
+}
+
+// parseNodeStr converts either an enode address or a plain hex node id to enode.ID
+func parseNodeStr(nodeStr string) (enode.ID, error) {
+ if node, err := enode.ParseID(nodeStr); err == nil {
+ return node, nil
+ }
+ if node, err := enode.Parse(enode.ValidSchemes, nodeStr); err == nil {
+ return node.ID(), nil
+ } else {
+ return enode.ID{}, err
+ }
+}
+
+// RequestStats returns the current contents of the reference request basket, with
+// request values meaning average per request rather than total.
+func (api *PrivateClientAPI) RequestStats() []RequestStatsItem {
+ return api.vt.RequestStats()
+}
+
+// Distribution returns a distribution as a series of (X, Y) chart coordinates,
+// where the X axis is the response time in seconds while the Y axis is the amount of
+// service value received with a response time close to the X coordinate.
+// The distribution is optionally normalized to a sum of 1.
+// If nodeStr == "" then the global distribution is returned, otherwise the individual
+// distribution of the specified server node.
+func (api *PrivateClientAPI) Distribution(nodeStr string, normalized bool) (RtDistribution, error) {
+ var expFactor utils.ExpirationFactor
+ if !normalized {
+ expFactor = utils.ExpFactor(api.vt.StatsExpirer().LogOffset(mclock.Now()))
+ }
+ if nodeStr == "" {
+ return api.vt.RtStats().Distribution(normalized, expFactor), nil
+ }
+ if id, err := parseNodeStr(nodeStr); err == nil {
+ return api.vt.GetNode(id).RtStats().Distribution(normalized, expFactor), nil
+ } else {
+ return RtDistribution{}, err
+ }
+}
+
+// Timeout suggests a timeout value based on either the global distribution or the
+// distribution of the specified node. The parameter is the desired rate of timeouts
+// assuming a similar distribution in the future.
+// Note that the actual timeout should have a sensible minimum bound so that operating
+// under ideal working conditions for a long time (for example, using a local server
+// with very low response times) will not make it very hard for the system to accommodate
+// longer response times in the future.
+func (api *PrivateClientAPI) Timeout(nodeStr string, failRate float64) (float64, error) {
+ if nodeStr == "" {
+ return float64(api.vt.RtStats().Timeout(failRate)) / float64(time.Second), nil
+ }
+ if id, err := parseNodeStr(nodeStr); err == nil {
+ return float64(api.vt.GetNode(id).RtStats().Timeout(failRate)) / float64(time.Second), nil
+ } else {
+ return 0, err
+ }
+}
+
+// Value calculates the total service value provided either globally or by the specified
+// server node, using a weight function based on the given timeout.
+func (api *PrivateClientAPI) Value(nodeStr string, timeout float64) (float64, error) {
+ wt := TimeoutWeights(time.Duration(timeout * float64(time.Second)))
+ expFactor := utils.ExpFactor(api.vt.StatsExpirer().LogOffset(mclock.Now()))
+ if nodeStr == "" {
+ return api.vt.RtStats().Value(wt, expFactor), nil
+ }
+ if id, err := parseNodeStr(nodeStr); err == nil {
+ return api.vt.GetNode(id).RtStats().Value(wt, expFactor), nil
+ } else {
+ return 0, err
+ }
+}
diff --git a/les/lespay/client/fillset.go b/les/lespay/client/fillset.go
new file mode 100644
index 000000000..5670feebd
--- /dev/null
+++ b/les/lespay/client/fillset.go
@@ -0,0 +1,106 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/nodestate"
+ "sync"
+)
+
+// FillSet tries to read nodes from an input iterator and add them to a node set by
+// setting the specified node state flag(s) until the size of the set reaches the target.
+// Note that other mechanisms (like other FillSet instances reading from different inputs)
+// can also set the same flag(s) and FillSet will always care about the total number of
+// nodes having those flags.
+type FillSet struct {
+ lock sync.Mutex
+ cond *sync.Cond
+ ns *nodestate.NodeStateMachine
+ input enode.Iterator
+ closed bool
+ flags nodestate.Flags
+ count, target int
+}
+
+// NewFillSet creates a new FillSet
+func NewFillSet(ns *nodestate.NodeStateMachine, input enode.Iterator, flags nodestate.Flags) *FillSet {
+ fs := &FillSet{
+ ns: ns,
+ input: input,
+ flags: flags,
+ }
+ fs.cond = sync.NewCond(&fs.lock)
+
+ ns.SubscribeState(flags, func(n *enode.Node, oldState, newState nodestate.Flags) {
+ fs.lock.Lock()
+ if oldState.Equals(flags) {
+ fs.count--
+ }
+ if newState.Equals(flags) {
+ fs.count++
+ }
+ if fs.target > fs.count {
+ fs.cond.Signal()
+ }
+ fs.lock.Unlock()
+ })
+
+ go fs.readLoop()
+ return fs
+}
+
+// readLoop keeps reading nodes from the input and setting the specified flags for them
+// whenever the node set size is under the current target
+func (fs *FillSet) readLoop() {
+ for {
+ fs.lock.Lock()
+ for fs.target <= fs.count && !fs.closed {
+ fs.cond.Wait()
+ }
+
+ fs.lock.Unlock()
+ if !fs.input.Next() {
+ return
+ }
+ fs.ns.SetState(fs.input.Node(), fs.flags, nodestate.Flags{}, 0)
+ }
+}
+
+// SetTarget sets the current target for node set size. If the previous target was not
+// reached and FillSet was still waiting for the next node from the input then the next
+// incoming node will be added to the set regardless of the target. This ensures that
+// all nodes coming from the input are eventually added to the set.
+func (fs *FillSet) SetTarget(target int) {
+ fs.lock.Lock()
+ defer fs.lock.Unlock()
+
+ fs.target = target
+ if fs.target > fs.count {
+ fs.cond.Signal()
+ }
+}
+
+// Close shuts FillSet down and closes the input iterator
+func (fs *FillSet) Close() {
+ fs.lock.Lock()
+ defer fs.lock.Unlock()
+
+ fs.closed = true
+ fs.input.Close()
+ fs.cond.Signal()
+}
diff --git a/les/lespay/client/fillset_test.go b/les/lespay/client/fillset_test.go
new file mode 100644
index 000000000..a5bd05e0b
--- /dev/null
+++ b/les/lespay/client/fillset_test.go
@@ -0,0 +1,112 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/enr"
+ "github.com/core-coin/go-core/p2p/nodestate"
+ "math/rand"
+ "testing"
+ "time"
+)
+
+type testIter struct {
+ waitCh chan struct{}
+ nodeCh chan *enode.Node
+ node *enode.Node
+}
+
+func (i *testIter) Next() bool {
+ i.waitCh <- struct{}{}
+ i.node = <-i.nodeCh
+ return i.node != nil
+}
+
+func (i *testIter) Node() *enode.Node {
+ return i.node
+}
+
+func (i *testIter) Close() {}
+
+func (i *testIter) push() {
+ var id enode.ID
+ rand.Read(id[:])
+ i.nodeCh <- enode.SignNull(new(enr.Record), id)
+}
+
+func (i *testIter) waiting(timeout time.Duration) bool {
+ select {
+ case <-i.waitCh:
+ return true
+ case <-time.After(timeout):
+ return false
+ }
+}
+
+func TestFillSet(t *testing.T) {
+ ns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)
+ iter := &testIter{
+ waitCh: make(chan struct{}),
+ nodeCh: make(chan *enode.Node),
+ }
+ fs := NewFillSet(ns, iter, sfTest1)
+ ns.Start()
+
+ expWaiting := func(i int, push bool) {
+ for ; i > 0; i-- {
+ if !iter.waiting(time.Second * 10) {
+ t.Fatalf("FillSet not waiting for new nodes")
+ }
+ if push {
+ iter.push()
+ }
+ }
+ }
+
+ expNotWaiting := func() {
+ if iter.waiting(time.Millisecond * 100) {
+ t.Fatalf("FillSet unexpectedly waiting for new nodes")
+ }
+ }
+
+ expNotWaiting()
+ fs.SetTarget(3)
+ expWaiting(3, true)
+ expNotWaiting()
+ fs.SetTarget(100)
+ expWaiting(2, true)
+ expWaiting(1, false)
+ // lower the target before the previous one has been filled up
+ fs.SetTarget(0)
+ iter.push()
+ expNotWaiting()
+ fs.SetTarget(10)
+ expWaiting(4, true)
+ expNotWaiting()
+ // remove all previosly set flags
+ ns.ForEach(sfTest1, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
+ ns.SetState(node, nodestate.Flags{}, sfTest1, 0)
+ })
+ // now expect FillSet to fill the set up again with 10 new nodes
+ expWaiting(10, true)
+ expNotWaiting()
+
+ fs.Close()
+ ns.Stop()
+}
diff --git a/les/lespay/client/queueiterator.go b/les/lespay/client/queueiterator.go
new file mode 100644
index 000000000..15b15bfef
--- /dev/null
+++ b/les/lespay/client/queueiterator.go
@@ -0,0 +1,122 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/nodestate"
+ "sync"
+)
+
+// QueueIterator returns nodes from the specified selectable set in the same order as
+// they entered the set.
+type QueueIterator struct {
+ lock sync.Mutex
+ cond *sync.Cond
+
+ ns *nodestate.NodeStateMachine
+ queue []*enode.Node
+ nextNode *enode.Node
+ waitCallback func(bool)
+ fifo, closed bool
+}
+
+// NewQueueIterator creates a new QueueIterator. Nodes are selectable if they have all the required
+// and none of the disabled flags set. When a node is selected the selectedFlag is set which also
+// disables further selectability until it is removed or times out.
+func NewQueueIterator(ns *nodestate.NodeStateMachine, requireFlags, disableFlags nodestate.Flags, fifo bool, waitCallback func(bool)) *QueueIterator {
+ qi := &QueueIterator{
+ ns: ns,
+ fifo: fifo,
+ waitCallback: waitCallback,
+ }
+ qi.cond = sync.NewCond(&qi.lock)
+
+ ns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState nodestate.Flags) {
+ oldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags)
+ newMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags)
+ if newMatch == oldMatch {
+ return
+ }
+
+ qi.lock.Lock()
+ defer qi.lock.Unlock()
+
+ if newMatch {
+ qi.queue = append(qi.queue, n)
+ } else {
+ id := n.ID()
+ for i, qn := range qi.queue {
+ if qn.ID() == id {
+ copy(qi.queue[i:len(qi.queue)-1], qi.queue[i+1:])
+ qi.queue = qi.queue[:len(qi.queue)-1]
+ break
+ }
+ }
+ }
+ qi.cond.Signal()
+ })
+ return qi
+}
+
+// Next moves to the next selectable node.
+func (qi *QueueIterator) Next() bool {
+ qi.lock.Lock()
+ if !qi.closed && len(qi.queue) == 0 {
+ if qi.waitCallback != nil {
+ qi.waitCallback(true)
+ }
+ for !qi.closed && len(qi.queue) == 0 {
+ qi.cond.Wait()
+ }
+ if qi.waitCallback != nil {
+ qi.waitCallback(false)
+ }
+ }
+ if qi.closed {
+ qi.nextNode = nil
+ qi.lock.Unlock()
+ return false
+ }
+ // Move to the next node in queue.
+ if qi.fifo {
+ qi.nextNode = qi.queue[0]
+ copy(qi.queue[:len(qi.queue)-1], qi.queue[1:])
+ qi.queue = qi.queue[:len(qi.queue)-1]
+ } else {
+ qi.nextNode = qi.queue[len(qi.queue)-1]
+ qi.queue = qi.queue[:len(qi.queue)-1]
+ }
+ qi.lock.Unlock()
+ return true
+}
+
+// Close ends the iterator.
+func (qi *QueueIterator) Close() {
+ qi.lock.Lock()
+ qi.closed = true
+ qi.lock.Unlock()
+ qi.cond.Signal()
+}
+
+// Node returns the current node.
+func (qi *QueueIterator) Node() *enode.Node {
+ qi.lock.Lock()
+ defer qi.lock.Unlock()
+
+ return qi.nextNode
+}
diff --git a/les/lespay/client/queueiterator_test.go b/les/lespay/client/queueiterator_test.go
new file mode 100644
index 000000000..60132ec99
--- /dev/null
+++ b/les/lespay/client/queueiterator_test.go
@@ -0,0 +1,106 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/enr"
+ "github.com/core-coin/go-core/p2p/nodestate"
+
+ "testing"
+ "time"
+)
+
+func testNodeID(i int) enode.ID {
+ return enode.ID{42, byte(i % 256), byte(i / 256)}
+}
+
+func testNodeIndex(id enode.ID) int {
+ if id[0] != 42 {
+ return -1
+ }
+ return int(id[1]) + int(id[2])*256
+}
+
+func testNode(i int) *enode.Node {
+ return enode.SignNull(new(enr.Record), testNodeID(i))
+}
+
+func TestQueueIteratorFIFO(t *testing.T) {
+ testQueueIterator(t, true)
+}
+
+func TestQueueIteratorLIFO(t *testing.T) {
+ testQueueIterator(t, false)
+}
+
+func testQueueIterator(t *testing.T, fifo bool) {
+ ns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)
+ qi := NewQueueIterator(ns, sfTest2, sfTest3.Or(sfTest4), fifo, nil)
+ ns.Start()
+ for i := 1; i <= iterTestNodeCount; i++ {
+ ns.SetState(testNode(i), sfTest1, nodestate.Flags{}, 0)
+ }
+ next := func() int {
+ ch := make(chan struct{})
+ go func() {
+ qi.Next()
+ close(ch)
+ }()
+ select {
+ case <-ch:
+ case <-time.After(time.Second * 5):
+ t.Fatalf("Iterator.Next() timeout")
+ }
+ node := qi.Node()
+ ns.SetState(node, sfTest4, nodestate.Flags{}, 0)
+ return testNodeIndex(node.ID())
+ }
+ exp := func(i int) {
+ n := next()
+ if n != i {
+ t.Errorf("Wrong item returned by iterator (expected %d, got %d)", i, n)
+ }
+ }
+ explist := func(list []int) {
+ for i := range list {
+ if fifo {
+ exp(list[i])
+ } else {
+ exp(list[len(list)-1-i])
+ }
+ }
+ }
+
+ ns.SetState(testNode(1), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(2), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(3), sfTest2, nodestate.Flags{}, 0)
+ explist([]int{1, 2, 3})
+ ns.SetState(testNode(4), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(5), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(6), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(5), sfTest3, nodestate.Flags{}, 0)
+ explist([]int{4, 6})
+ ns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)
+ ns.SetState(testNode(2), nodestate.Flags{}, sfTest4, 0)
+ ns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)
+ ns.SetState(testNode(2), sfTest3, nodestate.Flags{}, 0)
+ ns.SetState(testNode(2), nodestate.Flags{}, sfTest3, 0)
+ explist([]int{1, 3, 2})
+ ns.Stop()
+}
diff --git a/les/lespay/client/requestbasket.go b/les/lespay/client/requestbasket.go
new file mode 100644
index 000000000..fbe3b6b52
--- /dev/null
+++ b/les/lespay/client/requestbasket.go
@@ -0,0 +1,284 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/les/utils"
+ "github.com/core-coin/go-core/rlp"
+ "io"
+)
+
+const basketFactor = 1000000 // reference basket amount and value scale factor
+
+// referenceBasket keeps track of global request usage statistics and the usual prices
+// of each used request type relative to each other. The amounts in the basket are scaled
+// up by basketFactor because of the exponential expiration of long-term statistical data.
+// Values are scaled so that the sum of all amounts and the sum of all values are equal.
+//
+// reqValues represent the internal relative value estimates for each request type and are
+// calculated as value / amount. The average reqValue of all used requests is 1.
+// In other words: SUM(refBasket[type].amount * reqValue[type]) = SUM(refBasket[type].amount)
+type referenceBasket struct {
+ basket requestBasket
+ reqValues []float64 // contents are read only, new slice is created for each update
+}
+
+// serverBasket collects served request amount and value statistics for a single server.
+//
+// Values are gradually transferred to the global reference basket with a long time
+// constant so that each server basket represents long term usage and price statistics.
+// When the transferred part is added to the reference basket the values are scaled so
+// that their sum equals the total value calculated according to the previous reqValues.
+// The ratio of request values coming from the server basket represent the pricing of
+// the specific server and modify the global estimates with a weight proportional to
+// the amount of service provided by the server.
+type serverBasket struct {
+ basket requestBasket
+ rvFactor float64
+}
+
+type (
+ // requestBasket holds amounts and values for each request type.
+ // These values are exponentially expired (see utils.ExpiredValue). The power of 2
+ // exponent is applicable to all values within.
+ requestBasket struct {
+ items []basketItem
+ exp uint64
+ }
+ // basketItem holds amount and value for a single request type. Value is the total
+ // relative request value accumulated for served requests while amount is the counter
+ // for each request type.
+ // Note that these values are both scaled up by basketFactor because of the exponential
+ // expiration.
+ basketItem struct {
+ amount, value uint64
+ }
+)
+
+// setExp sets the power of 2 exponent of the structure, scaling base values (the amounts
+// and request values) up or down if necessary.
+func (b *requestBasket) setExp(exp uint64) {
+ if exp > b.exp {
+ shift := exp - b.exp
+ for i, item := range b.items {
+ item.amount >>= shift
+ item.value >>= shift
+ b.items[i] = item
+ }
+ b.exp = exp
+ }
+ if exp < b.exp {
+ shift := b.exp - exp
+ for i, item := range b.items {
+ item.amount <<= shift
+ item.value <<= shift
+ b.items[i] = item
+ }
+ b.exp = exp
+ }
+}
+
+// init initializes a new server basket with the given service vector size (number of
+// different request types)
+func (s *serverBasket) init(size int) {
+ if s.basket.items == nil {
+ s.basket.items = make([]basketItem, size)
+ }
+}
+
+// add adds the give type and amount of requests to the basket. Cost is calculated
+// according to the server's own cost table.
+func (s *serverBasket) add(reqType, reqAmount uint32, reqCost uint64, expFactor utils.ExpirationFactor) {
+ s.basket.setExp(expFactor.Exp)
+ i := &s.basket.items[reqType]
+ i.amount += uint64(float64(uint64(reqAmount)*basketFactor) * expFactor.Factor)
+ i.value += uint64(float64(reqCost) * s.rvFactor * expFactor.Factor)
+}
+
+// updateRvFactor updates the request value factor that scales server costs into the
+// local value dimensions.
+func (s *serverBasket) updateRvFactor(rvFactor float64) {
+ s.rvFactor = rvFactor
+}
+
+// transfer decreases amounts and values in the basket with the given ratio and
+// moves the removed amounts into a new basket which is returned and can be added
+// to the global reference basket.
+func (s *serverBasket) transfer(ratio float64) requestBasket {
+ res := requestBasket{
+ items: make([]basketItem, len(s.basket.items)),
+ exp: s.basket.exp,
+ }
+ for i, v := range s.basket.items {
+ ta := uint64(float64(v.amount) * ratio)
+ tv := uint64(float64(v.value) * ratio)
+ if ta > v.amount {
+ ta = v.amount
+ }
+ if tv > v.value {
+ tv = v.value
+ }
+ s.basket.items[i] = basketItem{v.amount - ta, v.value - tv}
+ res.items[i] = basketItem{ta, tv}
+ }
+ return res
+}
+
+// init initializes the reference basket with the given service vector size (number of
+// different request types)
+func (r *referenceBasket) init(size int) {
+ r.reqValues = make([]float64, size)
+ r.normalize()
+ r.updateReqValues()
+}
+
+// add adds the transferred part of a server basket to the reference basket while scaling
+// value amounts so that their sum equals the total value calculated according to the
+// previous reqValues.
+func (r *referenceBasket) add(newBasket requestBasket) {
+ r.basket.setExp(newBasket.exp)
+ // scale newBasket to match service unit value
+ var (
+ totalCost uint64
+ totalValue float64
+ )
+ for i, v := range newBasket.items {
+ totalCost += v.value
+ totalValue += float64(v.amount) * r.reqValues[i]
+ }
+ if totalCost > 0 {
+ // add to reference with scaled values
+ scaleValues := totalValue / float64(totalCost)
+ for i, v := range newBasket.items {
+ r.basket.items[i].amount += v.amount
+ r.basket.items[i].value += uint64(float64(v.value) * scaleValues)
+ }
+ }
+ r.updateReqValues()
+}
+
+// updateReqValues recalculates reqValues after adding transferred baskets. Note that
+// values should be normalized first.
+func (r *referenceBasket) updateReqValues() {
+ r.reqValues = make([]float64, len(r.reqValues))
+ for i, b := range r.basket.items {
+ if b.amount > 0 {
+ r.reqValues[i] = float64(b.value) / float64(b.amount)
+ } else {
+ r.reqValues[i] = 0
+ }
+ }
+}
+
+// normalize ensures that the sum of values equal the sum of amounts in the basket.
+func (r *referenceBasket) normalize() {
+ var sumAmount, sumValue uint64
+ for _, b := range r.basket.items {
+ sumAmount += b.amount
+ sumValue += b.value
+ }
+ add := float64(int64(sumAmount-sumValue)) / float64(sumValue)
+ for i, b := range r.basket.items {
+ b.value += uint64(int64(float64(b.value) * add))
+ r.basket.items[i] = b
+ }
+}
+
+// reqValueFactor calculates the request value factor applicable to the server with
+// the given announced request cost list
+func (r *referenceBasket) reqValueFactor(costList []uint64) float64 {
+ var (
+ totalCost float64
+ totalValue uint64
+ )
+ for i, b := range r.basket.items {
+ totalCost += float64(costList[i]) * float64(b.amount) // use floats to avoid overflow
+ totalValue += b.value
+ }
+ if totalCost < 1 {
+ return 0
+ }
+ return float64(totalValue) * basketFactor / totalCost
+}
+
+// EncodeRLP implements rlp.Encoder
+func (b *basketItem) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, []interface{}{b.amount, b.value})
+}
+
+// DecodeRLP implements rlp.Decoder
+func (b *basketItem) DecodeRLP(s *rlp.Stream) error {
+ var item struct {
+ Amount, Value uint64
+ }
+ if err := s.Decode(&item); err != nil {
+ return err
+ }
+ b.amount, b.value = item.Amount, item.Value
+ return nil
+}
+
+// EncodeRLP implements rlp.Encoder
+func (r *requestBasket) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, []interface{}{r.items, r.exp})
+}
+
+// DecodeRLP implements rlp.Decoder
+func (r *requestBasket) DecodeRLP(s *rlp.Stream) error {
+ var enc struct {
+ Items []basketItem
+ Exp uint64
+ }
+ if err := s.Decode(&enc); err != nil {
+ return err
+ }
+ r.items, r.exp = enc.Items, enc.Exp
+ return nil
+}
+
+// convertMapping converts a basket loaded from the database into the current format.
+// If the available request types and their mapping into the service vector differ from
+// the one used when saving the basket then this function reorders old fields and fills
+// in previously unknown fields by scaling up amounts and values taken from the
+// initialization basket.
+func (r requestBasket) convertMapping(oldMapping, newMapping []string, initBasket requestBasket) requestBasket {
+ nameMap := make(map[string]int)
+ for i, name := range oldMapping {
+ nameMap[name] = i
+ }
+ rc := requestBasket{items: make([]basketItem, len(newMapping))}
+ var scale, oldScale, newScale float64
+ for i, name := range newMapping {
+ if ii, ok := nameMap[name]; ok {
+ rc.items[i] = r.items[ii]
+ oldScale += float64(initBasket.items[i].amount) * float64(initBasket.items[i].amount)
+ newScale += float64(rc.items[i].amount) * float64(initBasket.items[i].amount)
+ }
+ }
+ if oldScale > 1e-10 {
+ scale = newScale / oldScale
+ } else {
+ scale = 1
+ }
+ for i, name := range newMapping {
+ if _, ok := nameMap[name]; !ok {
+ rc.items[i].amount = uint64(float64(initBasket.items[i].amount) * scale)
+ rc.items[i].value = uint64(float64(initBasket.items[i].value) * scale)
+ }
+ }
+ return rc
+}
diff --git a/les/lespay/client/requestbasket_test.go b/les/lespay/client/requestbasket_test.go
new file mode 100644
index 000000000..0eb309c85
--- /dev/null
+++ b/les/lespay/client/requestbasket_test.go
@@ -0,0 +1,160 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/les/utils"
+ "math/rand"
+ "testing"
+)
+
+func checkU64(t *testing.T, name string, value, exp uint64) {
+ if value != exp {
+ t.Errorf("Incorrect value for %s: got %d, expected %d", name, value, exp)
+ }
+}
+
+func checkF64(t *testing.T, name string, value, exp, tol float64) {
+ if value < exp-tol || value > exp+tol {
+ t.Errorf("Incorrect value for %s: got %f, expected %f", name, value, exp)
+ }
+}
+
+func TestServerBasket(t *testing.T) {
+ var s serverBasket
+ s.init(2)
+ // add some requests with different request value factors
+ s.updateRvFactor(1)
+ noexp := utils.ExpirationFactor{Factor: 1}
+ s.add(0, 1000, 10000, noexp)
+ s.add(1, 3000, 60000, noexp)
+ s.updateRvFactor(10)
+ s.add(0, 4000, 4000, noexp)
+ s.add(1, 2000, 4000, noexp)
+ s.updateRvFactor(10)
+ // check basket contents directly
+ checkU64(t, "s.basket[0].amount", s.basket.items[0].amount, 5000*basketFactor)
+ checkU64(t, "s.basket[0].value", s.basket.items[0].value, 50000)
+ checkU64(t, "s.basket[1].amount", s.basket.items[1].amount, 5000*basketFactor)
+ checkU64(t, "s.basket[1].value", s.basket.items[1].value, 100000)
+ // transfer 50% of the contents of the basket
+ transfer1 := s.transfer(0.5)
+ checkU64(t, "transfer1[0].amount", transfer1.items[0].amount, 2500*basketFactor)
+ checkU64(t, "transfer1[0].value", transfer1.items[0].value, 25000)
+ checkU64(t, "transfer1[1].amount", transfer1.items[1].amount, 2500*basketFactor)
+ checkU64(t, "transfer1[1].value", transfer1.items[1].value, 50000)
+ // add more requests
+ s.updateRvFactor(100)
+ s.add(0, 1000, 100, noexp)
+ // transfer 25% of the contents of the basket
+ transfer2 := s.transfer(0.25)
+ checkU64(t, "transfer2[0].amount", transfer2.items[0].amount, (2500+1000)/4*basketFactor)
+ checkU64(t, "transfer2[0].value", transfer2.items[0].value, (25000+10000)/4)
+ checkU64(t, "transfer2[1].amount", transfer2.items[1].amount, 2500/4*basketFactor)
+ checkU64(t, "transfer2[1].value", transfer2.items[1].value, 50000/4)
+}
+
+func TestConvertMapping(t *testing.T) {
+ b := requestBasket{items: []basketItem{{3, 3}, {1, 1}, {2, 2}}}
+ oldMap := []string{"req3", "req1", "req2"}
+ newMap := []string{"req1", "req2", "req3", "req4"}
+ init := requestBasket{items: []basketItem{{2, 2}, {4, 4}, {6, 6}, {8, 8}}}
+ bc := b.convertMapping(oldMap, newMap, init)
+ checkU64(t, "bc[0].amount", bc.items[0].amount, 1)
+ checkU64(t, "bc[1].amount", bc.items[1].amount, 2)
+ checkU64(t, "bc[2].amount", bc.items[2].amount, 3)
+ checkU64(t, "bc[3].amount", bc.items[3].amount, 4) // 8 should be scaled down to 4
+}
+
+func TestReqValueFactor(t *testing.T) {
+ var ref referenceBasket
+ ref.basket = requestBasket{items: make([]basketItem, 4)}
+ for i := range ref.basket.items {
+ ref.basket.items[i].amount = uint64(i+1) * basketFactor
+ ref.basket.items[i].value = uint64(i+1) * basketFactor
+ }
+ ref.init(4)
+ rvf := ref.reqValueFactor([]uint64{1000, 2000, 3000, 4000})
+ // expected value is (1000000+2000000+3000000+4000000) / (1*1000+2*2000+3*3000+4*4000) = 10000000/30000 = 333.333
+ checkF64(t, "reqValueFactor", rvf, 333.333, 1)
+}
+
+func TestNormalize(t *testing.T) {
+ for cycle := 0; cycle < 100; cycle += 1 {
+ // Initialize data for testing
+ valueRange, lower := 1000000, 1000000
+ ref := referenceBasket{basket: requestBasket{items: make([]basketItem, 10)}}
+ for i := 0; i < 10; i++ {
+ ref.basket.items[i].amount = uint64(rand.Intn(valueRange) + lower)
+ ref.basket.items[i].value = uint64(rand.Intn(valueRange) + lower)
+ }
+ ref.normalize()
+
+ // Check whether SUM(amount) ~= SUM(value)
+ var sumAmount, sumValue uint64
+ for i := 0; i < 10; i++ {
+ sumAmount += ref.basket.items[i].amount
+ sumValue += ref.basket.items[i].value
+ }
+ var epsilon = 0.01
+ if float64(sumAmount)*(1+epsilon) < float64(sumValue) || float64(sumAmount)*(1-epsilon) > float64(sumValue) {
+ t.Fatalf("Failed to normalize sumAmount: %d sumValue: %d", sumAmount, sumValue)
+ }
+ }
+}
+
+func TestReqValueAdjustment(t *testing.T) {
+ var s1, s2 serverBasket
+ s1.init(3)
+ s2.init(3)
+ cost1 := []uint64{30000, 60000, 90000}
+ cost2 := []uint64{100000, 200000, 300000}
+ var ref referenceBasket
+ ref.basket = requestBasket{items: make([]basketItem, 3)}
+ for i := range ref.basket.items {
+ ref.basket.items[i].amount = 123 * basketFactor
+ ref.basket.items[i].value = 123 * basketFactor
+ }
+ ref.init(3)
+ // initial reqValues are expected to be {1, 1, 1}
+ checkF64(t, "reqValues[0]", ref.reqValues[0], 1, 0.01)
+ checkF64(t, "reqValues[1]", ref.reqValues[1], 1, 0.01)
+ checkF64(t, "reqValues[2]", ref.reqValues[2], 1, 0.01)
+ var logOffset utils.Fixed64
+ for period := 0; period < 1000; period++ {
+ exp := utils.ExpFactor(logOffset)
+ s1.updateRvFactor(ref.reqValueFactor(cost1))
+ s2.updateRvFactor(ref.reqValueFactor(cost2))
+ // throw in random requests into each basket using their internal pricing
+ for i := 0; i < 1000; i++ {
+ reqType, reqAmount := uint32(rand.Intn(3)), uint32(rand.Intn(10)+1)
+ reqCost := uint64(reqAmount) * cost1[reqType]
+ s1.add(reqType, reqAmount, reqCost, exp)
+ reqType, reqAmount = uint32(rand.Intn(3)), uint32(rand.Intn(10)+1)
+ reqCost = uint64(reqAmount) * cost2[reqType]
+ s2.add(reqType, reqAmount, reqCost, exp)
+ }
+ ref.add(s1.transfer(0.1))
+ ref.add(s2.transfer(0.1))
+ ref.normalize()
+ ref.updateReqValues()
+ logOffset += utils.Float64ToFixed64(0.1)
+ }
+ checkF64(t, "reqValues[0]", ref.reqValues[0], 0.5, 0.01)
+ checkF64(t, "reqValues[1]", ref.reqValues[1], 1, 0.01)
+ checkF64(t, "reqValues[2]", ref.reqValues[2], 1.5, 0.01)
+}
diff --git a/les/lespay/client/timestats.go b/les/lespay/client/timestats.go
new file mode 100644
index 000000000..cbb0eb61f
--- /dev/null
+++ b/les/lespay/client/timestats.go
@@ -0,0 +1,236 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/les/utils"
+ "github.com/core-coin/go-core/rlp"
+ "io"
+ "math"
+ "time"
+)
+
+const (
+ minResponseTime = time.Millisecond * 50
+ maxResponseTime = time.Second * 10
+ timeStatLength = 32
+ weightScaleFactor = 1000000
+)
+
+// ResponseTimeStats is the response time distribution of a set of answered requests,
+// weighted with request value, either served by a single server or aggregated for
+// multiple servers.
+// It it a fixed length (timeStatLength) distribution vector with linear interpolation.
+// The X axis (the time values) are not linear, they should be transformed with
+// TimeToStatScale and StatScaleToTime.
+type (
+ ResponseTimeStats struct {
+ stats [timeStatLength]uint64
+ exp uint64
+ }
+ ResponseTimeWeights [timeStatLength]float64
+)
+
+var timeStatsLogFactor = (timeStatLength - 1) / (math.Log(float64(maxResponseTime)/float64(minResponseTime)) + 1)
+
+// TimeToStatScale converts a response time to a distribution vector index. The index
+// is represented by a float64 so that linear interpolation can be applied.
+func TimeToStatScale(d time.Duration) float64 {
+ if d < 0 {
+ return 0
+ }
+ r := float64(d) / float64(minResponseTime)
+ if r > 1 {
+ r = math.Log(r) + 1
+ }
+ r *= timeStatsLogFactor
+ if r > timeStatLength-1 {
+ return timeStatLength - 1
+ }
+ return r
+}
+
+// StatScaleToTime converts a distribution vector index to a response time. The index
+// is represented by a float64 so that linear interpolation can be applied.
+func StatScaleToTime(r float64) time.Duration {
+ r /= timeStatsLogFactor
+ if r > 1 {
+ r = math.Exp(r - 1)
+ }
+ return time.Duration(r * float64(minResponseTime))
+}
+
+// TimeoutWeights calculates the weight function used for calculating service value
+// based on the response time distribution of the received service.
+// It is based on the request timeout value of the system. It consists of a half cosine
+// function starting with 1, crossing zero at timeout and reaching -1 at 2*timeout.
+// After 2*timeout the weight is constant -1.
+func TimeoutWeights(timeout time.Duration) (res ResponseTimeWeights) {
+ for i := range res {
+ t := StatScaleToTime(float64(i))
+ if t < 2*timeout {
+ res[i] = math.Cos(math.Pi / 2 * float64(t) / float64(timeout))
+ } else {
+ res[i] = -1
+ }
+ }
+ return
+}
+
+// EncodeRLP implements rlp.Encoder
+func (rt *ResponseTimeStats) EncodeRLP(w io.Writer) error {
+ enc := struct {
+ Stats [timeStatLength]uint64
+ Exp uint64
+ }{rt.stats, rt.exp}
+ return rlp.Encode(w, &enc)
+}
+
+// DecodeRLP implements rlp.Decoder
+func (rt *ResponseTimeStats) DecodeRLP(s *rlp.Stream) error {
+ var enc struct {
+ Stats [timeStatLength]uint64
+ Exp uint64
+ }
+ if err := s.Decode(&enc); err != nil {
+ return err
+ }
+ rt.stats, rt.exp = enc.Stats, enc.Exp
+ return nil
+}
+
+// Add adds a new response time with the given weight to the distribution.
+func (rt *ResponseTimeStats) Add(respTime time.Duration, weight float64, expFactor utils.ExpirationFactor) {
+ rt.setExp(expFactor.Exp)
+ weight *= expFactor.Factor * weightScaleFactor
+ r := TimeToStatScale(respTime)
+ i := int(r)
+ r -= float64(i)
+ rt.stats[i] += uint64(weight * (1 - r))
+ if i < timeStatLength-1 {
+ rt.stats[i+1] += uint64(weight * r)
+ }
+}
+
+// setExp sets the power of 2 exponent of the structure, scaling base values (the vector
+// itself) up or down if necessary.
+func (rt *ResponseTimeStats) setExp(exp uint64) {
+ if exp > rt.exp {
+ shift := exp - rt.exp
+ for i, v := range rt.stats {
+ rt.stats[i] = v >> shift
+ }
+ rt.exp = exp
+ }
+ if exp < rt.exp {
+ shift := rt.exp - exp
+ for i, v := range rt.stats {
+ rt.stats[i] = v << shift
+ }
+ rt.exp = exp
+ }
+}
+
+// Value calculates the total service value based on the given distribution, using the
+// specified weight function.
+func (rt ResponseTimeStats) Value(weights ResponseTimeWeights, expFactor utils.ExpirationFactor) float64 {
+ var v float64
+ for i, s := range rt.stats {
+ v += float64(s) * weights[i]
+ }
+ if v < 0 {
+ return 0
+ }
+ return expFactor.Value(v, rt.exp) / weightScaleFactor
+}
+
+// AddStats adds the given ResponseTimeStats to the current one.
+func (rt *ResponseTimeStats) AddStats(s *ResponseTimeStats) {
+ rt.setExp(s.exp)
+ for i, v := range s.stats {
+ rt.stats[i] += v
+ }
+}
+
+// SubStats subtracts the given ResponseTimeStats from the current one.
+func (rt *ResponseTimeStats) SubStats(s *ResponseTimeStats) {
+ rt.setExp(s.exp)
+ for i, v := range s.stats {
+ if v < rt.stats[i] {
+ rt.stats[i] -= v
+ } else {
+ rt.stats[i] = 0
+ }
+ }
+}
+
+// Timeout suggests a timeout value based on the previous distribution. The parameter
+// is the desired rate of timeouts assuming a similar distribution in the future.
+// Note that the actual timeout should have a sensible minimum bound so that operating
+// under ideal working conditions for a long time (for example, using a local server
+// with very low response times) will not make it very hard for the system to accommodate
+// longer response times in the future.
+func (rt ResponseTimeStats) Timeout(failRatio float64) time.Duration {
+ var sum uint64
+ for _, v := range rt.stats {
+ sum += v
+ }
+ s := uint64(float64(sum) * failRatio)
+ i := timeStatLength - 1
+ for i > 0 && s >= rt.stats[i] {
+ s -= rt.stats[i]
+ i--
+ }
+ r := float64(i) + 0.5
+ if rt.stats[i] > 0 {
+ r -= float64(s) / float64(rt.stats[i])
+ }
+ if r < 0 {
+ r = 0
+ }
+ th := StatScaleToTime(r)
+ if th > maxResponseTime {
+ th = maxResponseTime
+ }
+ return th
+}
+
+// RtDistribution represents a distribution as a series of (X, Y) chart coordinates,
+// where the X axis is the response time in seconds while the Y axis is the amount of
+// service value received with a response time close to the X coordinate.
+type RtDistribution [timeStatLength][2]float64
+
+// Distribution returns a RtDistribution, optionally normalized to a sum of 1.
+func (rt ResponseTimeStats) Distribution(normalized bool, expFactor utils.ExpirationFactor) (res RtDistribution) {
+ var mul float64
+ if normalized {
+ var sum uint64
+ for _, v := range rt.stats {
+ sum += v
+ }
+ if sum > 0 {
+ mul = 1 / float64(sum)
+ }
+ } else {
+ mul = expFactor.Value(float64(1)/weightScaleFactor, rt.exp)
+ }
+ for i, v := range rt.stats {
+ res[i][0] = float64(StatScaleToTime(float64(i))) / float64(time.Second)
+ res[i][1] = float64(v) * mul
+ }
+ return
+}
diff --git a/les/lespay/client/timestats_test.go b/les/lespay/client/timestats_test.go
new file mode 100644
index 000000000..900c6f99c
--- /dev/null
+++ b/les/lespay/client/timestats_test.go
@@ -0,0 +1,136 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/les/utils"
+ "math"
+ "math/rand"
+ "testing"
+ "time"
+)
+
+func TestTransition(t *testing.T) {
+ var epsilon = 0.01
+ var cases = []time.Duration{
+ time.Millisecond, minResponseTime,
+ time.Second, time.Second * 5, maxResponseTime,
+ }
+ for _, c := range cases {
+ got := StatScaleToTime(TimeToStatScale(c))
+ if float64(got)*(1+epsilon) < float64(c) || float64(got)*(1-epsilon) > float64(c) {
+ t.Fatalf("Failed to transition back")
+ }
+ }
+ // If the time is too large(exceeds the max response time.
+ got := StatScaleToTime(TimeToStatScale(2 * maxResponseTime))
+ if float64(got)*(1+epsilon) < float64(maxResponseTime) || float64(got)*(1-epsilon) > float64(maxResponseTime) {
+ t.Fatalf("Failed to transition back")
+ }
+}
+
+var maxResponseWeights = TimeoutWeights(maxResponseTime)
+
+func TestValue(t *testing.T) {
+ noexp := utils.ExpirationFactor{Factor: 1}
+ for i := 0; i < 1000; i++ {
+ max := minResponseTime + time.Duration(rand.Int63n(int64(maxResponseTime-minResponseTime)))
+ min := minResponseTime + time.Duration(rand.Int63n(int64(max-minResponseTime)))
+ timeout := max/2 + time.Duration(rand.Int63n(int64(maxResponseTime-max/2)))
+ s := makeRangeStats(min, max, 1000, noexp)
+ value := s.Value(TimeoutWeights(timeout), noexp)
+ // calculate the average weight (the average of the given range of the half cosine
+ // weight function).
+ minx := math.Pi / 2 * float64(min) / float64(timeout)
+ maxx := math.Pi / 2 * float64(max) / float64(timeout)
+ avgWeight := (math.Sin(maxx) - math.Sin(minx)) / (maxx - minx)
+ expv := 1000 * avgWeight
+ if expv < 0 {
+ expv = 0
+ }
+ if value < expv-10 || value > expv+10 {
+ t.Errorf("Value failed (expected %v, got %v)", expv, value)
+ }
+ }
+}
+
+func TestAddSubExpire(t *testing.T) {
+ var (
+ sum1, sum2 ResponseTimeStats
+ sum1ValueExp, sum2ValueExp float64
+ logOffset utils.Fixed64
+ )
+ for i := 0; i < 1000; i++ {
+ exp := utils.ExpFactor(logOffset)
+ max := minResponseTime + time.Duration(rand.Int63n(int64(maxResponseTime-minResponseTime)))
+ min := minResponseTime + time.Duration(rand.Int63n(int64(max-minResponseTime)))
+ s := makeRangeStats(min, max, 1000, exp)
+ value := s.Value(maxResponseWeights, exp)
+ sum1.AddStats(&s)
+ sum1ValueExp += value
+ if rand.Intn(2) == 1 {
+ sum2.AddStats(&s)
+ sum2ValueExp += value
+ }
+ logOffset += utils.Float64ToFixed64(0.001 / math.Log(2))
+ sum1ValueExp -= sum1ValueExp * 0.001
+ sum2ValueExp -= sum2ValueExp * 0.001
+ }
+ exp := utils.ExpFactor(logOffset)
+ sum1Value := sum1.Value(maxResponseWeights, exp)
+ if sum1Value < sum1ValueExp*0.99 || sum1Value > sum1ValueExp*1.01 {
+ t.Errorf("sum1Value failed (expected %v, got %v)", sum1ValueExp, sum1Value)
+ }
+ sum2Value := sum2.Value(maxResponseWeights, exp)
+ if sum2Value < sum2ValueExp*0.99 || sum2Value > sum2ValueExp*1.01 {
+ t.Errorf("sum2Value failed (expected %v, got %v)", sum2ValueExp, sum2Value)
+ }
+ diff := sum1
+ diff.SubStats(&sum2)
+ diffValue := diff.Value(maxResponseWeights, exp)
+ diffValueExp := sum1ValueExp - sum2ValueExp
+ if diffValue < diffValueExp*0.99 || diffValue > diffValueExp*1.01 {
+ t.Errorf("diffValue failed (expected %v, got %v)", diffValueExp, diffValue)
+ }
+}
+
+func TestTimeout(t *testing.T) {
+ testTimeoutRange(t, 0, time.Second)
+ testTimeoutRange(t, time.Second, time.Second*2)
+ testTimeoutRange(t, time.Second, maxResponseTime)
+}
+
+func testTimeoutRange(t *testing.T, min, max time.Duration) {
+ s := makeRangeStats(min, max, 1000, utils.ExpirationFactor{Factor: 1})
+ for i := 2; i < 9; i++ {
+ to := s.Timeout(float64(i) / 10)
+ exp := max - (max-min)*time.Duration(i)/10
+ tol := (max - min) / 50
+ if to < exp-tol || to > exp+tol {
+ t.Errorf("Timeout failed (expected %v, got %v)", exp, to)
+ }
+ }
+}
+
+func makeRangeStats(min, max time.Duration, amount float64, exp utils.ExpirationFactor) ResponseTimeStats {
+ var s ResponseTimeStats
+ amount /= 1000
+ for i := 0; i < 1000; i++ {
+ s.Add(min+(max-min)*time.Duration(i)/999, amount, exp)
+ }
+ return s
+}
diff --git a/les/lespay/client/valuetracker.go b/les/lespay/client/valuetracker.go
new file mode 100644
index 000000000..ab895a779
--- /dev/null
+++ b/les/lespay/client/valuetracker.go
@@ -0,0 +1,510 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/les/utils"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/rlp"
+ "github.com/core-coin/go-core/xcbdb"
+ "math"
+ "sync"
+ "time"
+)
+
+const (
+ vtVersion = 1 // database encoding format for ValueTracker
+ nvtVersion = 1 // database encoding format for NodeValueTracker
+)
+
+var (
+ vtKey = []byte("vt:")
+ vtNodeKey = []byte("vtNode:")
+)
+
+// NodeValueTracker collects service value statistics for a specific server node
+type NodeValueTracker struct {
+ lock sync.Mutex
+
+ rtStats, lastRtStats ResponseTimeStats
+ lastTransfer mclock.AbsTime
+ basket serverBasket
+ reqCosts []uint64
+ reqValues *[]float64
+}
+
+// init initializes a NodeValueTracker.
+// Note that the contents of the referenced reqValues slice will not change; a new
+// reference is passed if the values are updated by ValueTracker.
+func (nv *NodeValueTracker) init(now mclock.AbsTime, reqValues *[]float64) {
+ reqTypeCount := len(*reqValues)
+ nv.reqCosts = make([]uint64, reqTypeCount)
+ nv.lastTransfer = now
+ nv.reqValues = reqValues
+ nv.basket.init(reqTypeCount)
+}
+
+// updateCosts updates the request cost table of the server. The request value factor
+// is also updated based on the given cost table and the current reference basket.
+// Note that the contents of the referenced reqValues slice will not change; a new
+// reference is passed if the values are updated by ValueTracker.
+func (nv *NodeValueTracker) updateCosts(reqCosts []uint64, reqValues *[]float64, rvFactor float64) {
+ nv.lock.Lock()
+ defer nv.lock.Unlock()
+
+ nv.reqCosts = reqCosts
+ nv.reqValues = reqValues
+ nv.basket.updateRvFactor(rvFactor)
+}
+
+// transferStats returns request basket and response time statistics that should be
+// added to the global statistics. The contents of the server's own request basket are
+// gradually transferred to the main reference basket and removed from the server basket
+// with the specified transfer rate.
+// The response time statistics are retained at both places and therefore the global
+// distribution is always the sum of the individual server distributions.
+func (nv *NodeValueTracker) transferStats(now mclock.AbsTime, transferRate float64) (requestBasket, ResponseTimeStats) {
+ nv.lock.Lock()
+ defer nv.lock.Unlock()
+
+ dt := now - nv.lastTransfer
+ nv.lastTransfer = now
+ if dt < 0 {
+ dt = 0
+ }
+ recentRtStats := nv.rtStats
+ recentRtStats.SubStats(&nv.lastRtStats)
+ nv.lastRtStats = nv.rtStats
+ return nv.basket.transfer(-math.Expm1(-transferRate * float64(dt))), recentRtStats
+}
+
+// RtStats returns the node's own response time distribution statistics
+func (nv *NodeValueTracker) RtStats() ResponseTimeStats {
+ nv.lock.Lock()
+ defer nv.lock.Unlock()
+
+ return nv.rtStats
+}
+
+// ValueTracker coordinates service value calculation for individual servers and updates
+// global statistics
+type ValueTracker struct {
+ clock mclock.Clock
+ lock sync.Mutex
+ quit chan chan struct{}
+ db xcbdb.KeyValueStore
+ connected map[enode.ID]*NodeValueTracker
+ reqTypeCount int
+
+ refBasket referenceBasket
+ mappings [][]string
+ currentMapping int
+ initRefBasket requestBasket
+ rtStats ResponseTimeStats
+
+ transferRate float64
+ statsExpLock sync.RWMutex
+ statsExpRate, offlineExpRate float64
+ statsExpirer utils.Expirer
+ statsExpFactor utils.ExpirationFactor
+}
+
+type valueTrackerEncV1 struct {
+ Mappings [][]string
+ RefBasketMapping uint
+ RefBasket requestBasket
+ RtStats ResponseTimeStats
+ ExpOffset, SavedAt uint64
+}
+
+type nodeValueTrackerEncV1 struct {
+ RtStats ResponseTimeStats
+ ServerBasketMapping uint
+ ServerBasket requestBasket
+}
+
+// RequestInfo is an initializer structure for the service vector.
+type RequestInfo struct {
+ // Name identifies the request type and is used for re-mapping the service vector if necessary
+ Name string
+ // InitAmount and InitValue are used to initialize the reference basket
+ InitAmount, InitValue float64
+}
+
+// NewValueTracker creates a new ValueTracker and loads its previously saved state from
+// the database if possible.
+func NewValueTracker(db xcbdb.KeyValueStore, clock mclock.Clock, reqInfo []RequestInfo, updatePeriod time.Duration, transferRate, statsExpRate, offlineExpRate float64) *ValueTracker {
+ now := clock.Now()
+
+ initRefBasket := requestBasket{items: make([]basketItem, len(reqInfo))}
+ mapping := make([]string, len(reqInfo))
+
+ var sumAmount, sumValue float64
+ for _, req := range reqInfo {
+ sumAmount += req.InitAmount
+ sumValue += req.InitAmount * req.InitValue
+ }
+ scaleValues := sumAmount * basketFactor / sumValue
+ for i, req := range reqInfo {
+ mapping[i] = req.Name
+ initRefBasket.items[i].amount = uint64(req.InitAmount * basketFactor)
+ initRefBasket.items[i].value = uint64(req.InitAmount * req.InitValue * scaleValues)
+ }
+
+ vt := &ValueTracker{
+ clock: clock,
+ connected: make(map[enode.ID]*NodeValueTracker),
+ quit: make(chan chan struct{}),
+ db: db,
+ reqTypeCount: len(initRefBasket.items),
+ initRefBasket: initRefBasket,
+ transferRate: transferRate,
+ statsExpRate: statsExpRate,
+ offlineExpRate: offlineExpRate,
+ }
+ if vt.loadFromDb(mapping) != nil {
+ // previous state not saved or invalid, init with default values
+ vt.refBasket.basket = initRefBasket
+ vt.mappings = [][]string{mapping}
+ vt.currentMapping = 0
+ }
+ vt.statsExpirer.SetRate(now, statsExpRate)
+ vt.refBasket.init(vt.reqTypeCount)
+ vt.periodicUpdate()
+
+ go func() {
+ for {
+ select {
+ case <-clock.After(updatePeriod):
+ vt.lock.Lock()
+ vt.periodicUpdate()
+ vt.lock.Unlock()
+ case quit := <-vt.quit:
+ close(quit)
+ return
+ }
+ }
+ }()
+ return vt
+}
+
+// StatsExpirer returns the statistics expirer so that other values can be expired
+// with the same rate as the service value statistics.
+func (vt *ValueTracker) StatsExpirer() *utils.Expirer {
+ return &vt.statsExpirer
+}
+
+// StatsExpirer returns the current expiration factor so that other values can be expired
+// with the same rate as the service value statistics.
+func (vt *ValueTracker) StatsExpFactor() utils.ExpirationFactor {
+ vt.statsExpLock.RLock()
+ defer vt.statsExpLock.RUnlock()
+
+ return vt.statsExpFactor
+}
+
+// loadFromDb loads the value tracker's state from the database and converts saved
+// request basket index mapping if it does not match the specified index to name mapping.
+func (vt *ValueTracker) loadFromDb(mapping []string) error {
+ enc, err := vt.db.Get(vtKey)
+ if err != nil {
+ return err
+ }
+ r := bytes.NewReader(enc)
+ var version uint
+ if err := rlp.Decode(r, &version); err != nil {
+ log.Error("Decoding value tracker state failed", "err", err)
+ return err
+ }
+ if version != vtVersion {
+ log.Error("Unknown ValueTracker version", "stored", version, "current", nvtVersion)
+ return fmt.Errorf("Unknown ValueTracker version %d (current version is %d)", version, vtVersion)
+ }
+ var vte valueTrackerEncV1
+ if err := rlp.Decode(r, &vte); err != nil {
+ log.Error("Decoding value tracker state failed", "err", err)
+ return err
+ }
+ logOffset := utils.Fixed64(vte.ExpOffset)
+ dt := time.Now().UnixNano() - int64(vte.SavedAt)
+ if dt > 0 {
+ logOffset += utils.Float64ToFixed64(float64(dt) * vt.offlineExpRate / math.Log(2))
+ }
+ vt.statsExpirer.SetLogOffset(vt.clock.Now(), logOffset)
+ vt.rtStats = vte.RtStats
+ vt.mappings = vte.Mappings
+ vt.currentMapping = -1
+loop:
+ for i, m := range vt.mappings {
+ if len(m) != len(mapping) {
+ continue loop
+ }
+ for j, s := range mapping {
+ if m[j] != s {
+ continue loop
+ }
+ }
+ vt.currentMapping = i
+ break
+ }
+ if vt.currentMapping == -1 {
+ vt.currentMapping = len(vt.mappings)
+ vt.mappings = append(vt.mappings, mapping)
+ }
+ if int(vte.RefBasketMapping) == vt.currentMapping {
+ vt.refBasket.basket = vte.RefBasket
+ } else {
+ if vte.RefBasketMapping >= uint(len(vt.mappings)) {
+ log.Error("Unknown request basket mapping", "stored", vte.RefBasketMapping, "current", vt.currentMapping)
+ return fmt.Errorf("Unknown request basket mapping %d (current version is %d)", vte.RefBasketMapping, vt.currentMapping)
+ }
+ vt.refBasket.basket = vte.RefBasket.convertMapping(vt.mappings[vte.RefBasketMapping], mapping, vt.initRefBasket)
+ }
+ return nil
+}
+
+// saveToDb saves the value tracker's state to the database
+func (vt *ValueTracker) saveToDb() {
+ vte := valueTrackerEncV1{
+ Mappings: vt.mappings,
+ RefBasketMapping: uint(vt.currentMapping),
+ RefBasket: vt.refBasket.basket,
+ RtStats: vt.rtStats,
+ ExpOffset: uint64(vt.statsExpirer.LogOffset(vt.clock.Now())),
+ SavedAt: uint64(time.Now().UnixNano()),
+ }
+ enc1, err := rlp.EncodeToBytes(uint(vtVersion))
+ if err != nil {
+ log.Error("Encoding value tracker state failed", "err", err)
+ return
+ }
+ enc2, err := rlp.EncodeToBytes(&vte)
+ if err != nil {
+ log.Error("Encoding value tracker state failed", "err", err)
+ return
+ }
+ if err := vt.db.Put(vtKey, append(enc1, enc2...)); err != nil {
+ log.Error("Saving value tracker state failed", "err", err)
+ }
+}
+
+// Stop saves the value tracker's state and each loaded node's individual state and
+// returns after shutting the internal goroutines down.
+func (vt *ValueTracker) Stop() {
+ quit := make(chan struct{})
+ vt.quit <- quit
+ <-quit
+ vt.lock.Lock()
+ vt.periodicUpdate()
+ for id, nv := range vt.connected {
+ vt.saveNode(id, nv)
+ }
+ vt.connected = nil
+ vt.saveToDb()
+ vt.lock.Unlock()
+}
+
+// Register adds a server node to the value tracker
+func (vt *ValueTracker) Register(id enode.ID) *NodeValueTracker {
+ vt.lock.Lock()
+ defer vt.lock.Unlock()
+
+ if vt.connected == nil {
+ // ValueTracker has already been stopped
+ return nil
+ }
+ nv := vt.loadOrNewNode(id)
+ nv.init(vt.clock.Now(), &vt.refBasket.reqValues)
+ vt.connected[id] = nv
+ return nv
+}
+
+// Unregister removes a server node from the value tracker
+func (vt *ValueTracker) Unregister(id enode.ID) {
+ vt.lock.Lock()
+ defer vt.lock.Unlock()
+
+ if nv := vt.connected[id]; nv != nil {
+ vt.saveNode(id, nv)
+ delete(vt.connected, id)
+ }
+}
+
+// GetNode returns an individual server node's value tracker. If it did not exist before
+// then a new node is created.
+func (vt *ValueTracker) GetNode(id enode.ID) *NodeValueTracker {
+ vt.lock.Lock()
+ defer vt.lock.Unlock()
+
+ return vt.loadOrNewNode(id)
+}
+
+// loadOrNewNode returns an individual server node's value tracker. If it did not exist before
+// then a new node is created.
+func (vt *ValueTracker) loadOrNewNode(id enode.ID) *NodeValueTracker {
+ if nv, ok := vt.connected[id]; ok {
+ return nv
+ }
+ nv := &NodeValueTracker{lastTransfer: vt.clock.Now()}
+ enc, err := vt.db.Get(append(vtNodeKey, id[:]...))
+ if err != nil {
+ return nv
+ }
+ r := bytes.NewReader(enc)
+ var version uint
+ if err := rlp.Decode(r, &version); err != nil {
+ log.Error("Failed to decode node value tracker", "id", id, "err", err)
+ return nv
+ }
+ if version != nvtVersion {
+ log.Error("Unknown NodeValueTracker version", "stored", version, "current", nvtVersion)
+ return nv
+ }
+ var nve nodeValueTrackerEncV1
+ if err := rlp.Decode(r, &nve); err != nil {
+ log.Error("Failed to decode node value tracker", "id", id, "err", err)
+ return nv
+ }
+ nv.rtStats = nve.RtStats
+ nv.lastRtStats = nve.RtStats
+ if int(nve.ServerBasketMapping) == vt.currentMapping {
+ nv.basket.basket = nve.ServerBasket
+ } else {
+ if nve.ServerBasketMapping >= uint(len(vt.mappings)) {
+ log.Error("Unknown request basket mapping", "stored", nve.ServerBasketMapping, "current", vt.currentMapping)
+ return nv
+ }
+ nv.basket.basket = nve.ServerBasket.convertMapping(vt.mappings[nve.ServerBasketMapping], vt.mappings[vt.currentMapping], vt.initRefBasket)
+ }
+ return nv
+}
+
+// saveNode saves a server node's value tracker to the database
+func (vt *ValueTracker) saveNode(id enode.ID, nv *NodeValueTracker) {
+ recentRtStats := nv.rtStats
+ recentRtStats.SubStats(&nv.lastRtStats)
+ vt.rtStats.AddStats(&recentRtStats)
+ nv.lastRtStats = nv.rtStats
+
+ nve := nodeValueTrackerEncV1{
+ RtStats: nv.rtStats,
+ ServerBasketMapping: uint(vt.currentMapping),
+ ServerBasket: nv.basket.basket,
+ }
+ enc1, err := rlp.EncodeToBytes(uint(nvtVersion))
+ if err != nil {
+ log.Error("Failed to encode service value information", "id", id, "err", err)
+ return
+ }
+ enc2, err := rlp.EncodeToBytes(&nve)
+ if err != nil {
+ log.Error("Failed to encode service value information", "id", id, "err", err)
+ return
+ }
+ if err := vt.db.Put(append(vtNodeKey, id[:]...), append(enc1, enc2...)); err != nil {
+ log.Error("Failed to save service value information", "id", id, "err", err)
+ }
+}
+
+// UpdateCosts updates the node value tracker's request cost table
+func (vt *ValueTracker) UpdateCosts(nv *NodeValueTracker, reqCosts []uint64) {
+ vt.lock.Lock()
+ defer vt.lock.Unlock()
+
+ nv.updateCosts(reqCosts, &vt.refBasket.reqValues, vt.refBasket.reqValueFactor(reqCosts))
+}
+
+// RtStats returns the global response time distribution statistics
+func (vt *ValueTracker) RtStats() ResponseTimeStats {
+ vt.lock.Lock()
+ defer vt.lock.Unlock()
+
+ vt.periodicUpdate()
+ return vt.rtStats
+}
+
+// periodicUpdate transfers individual node data to the global statistics, normalizes
+// the reference basket and updates request values. The global state is also saved to
+// the database with each update.
+func (vt *ValueTracker) periodicUpdate() {
+ now := vt.clock.Now()
+ vt.statsExpLock.Lock()
+ vt.statsExpFactor = utils.ExpFactor(vt.statsExpirer.LogOffset(now))
+ vt.statsExpLock.Unlock()
+
+ for _, nv := range vt.connected {
+ basket, rtStats := nv.transferStats(now, vt.transferRate)
+ vt.refBasket.add(basket)
+ vt.rtStats.AddStats(&rtStats)
+ }
+ vt.refBasket.normalize()
+ vt.refBasket.updateReqValues()
+ for _, nv := range vt.connected {
+ nv.updateCosts(nv.reqCosts, &vt.refBasket.reqValues, vt.refBasket.reqValueFactor(nv.reqCosts))
+ }
+ vt.saveToDb()
+}
+
+type ServedRequest struct {
+ ReqType, Amount uint32
+}
+
+// Served adds a served request to the node's statistics. An actual request may be composed
+// of one or more request types (service vector indices).
+func (vt *ValueTracker) Served(nv *NodeValueTracker, reqs []ServedRequest, respTime time.Duration) {
+ vt.statsExpLock.RLock()
+ expFactor := vt.statsExpFactor
+ vt.statsExpLock.RUnlock()
+
+ nv.lock.Lock()
+ defer nv.lock.Unlock()
+
+ var value float64
+ for _, r := range reqs {
+ nv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor)
+ value += (*nv.reqValues)[r.ReqType] * float64(r.Amount)
+ }
+ nv.rtStats.Add(respTime, value, vt.statsExpFactor)
+}
+
+type RequestStatsItem struct {
+ Name string
+ ReqAmount, ReqValue float64
+}
+
+// RequestStats returns the current contents of the reference request basket, with
+// request values meaning average per request rather than total.
+func (vt *ValueTracker) RequestStats() []RequestStatsItem {
+ vt.statsExpLock.RLock()
+ expFactor := vt.statsExpFactor
+ vt.statsExpLock.RUnlock()
+ vt.lock.Lock()
+ defer vt.lock.Unlock()
+
+ vt.periodicUpdate()
+ res := make([]RequestStatsItem, len(vt.refBasket.basket.items))
+ for i, item := range vt.refBasket.basket.items {
+ res[i].Name = vt.mappings[vt.currentMapping][i]
+ res[i].ReqAmount = expFactor.Value(float64(item.amount)/basketFactor, vt.refBasket.basket.exp)
+ res[i].ReqValue = vt.refBasket.reqValues[i]
+ }
+ return res
+}
diff --git a/les/lespay/client/valuetracker_test.go b/les/lespay/client/valuetracker_test.go
new file mode 100644
index 000000000..712dbe50a
--- /dev/null
+++ b/les/lespay/client/valuetracker_test.go
@@ -0,0 +1,133 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/les/utils"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/xcbdb/memorydb"
+ "math"
+ "math/rand"
+ "strconv"
+ "testing"
+ "time"
+)
+
+const (
+ testReqTypes = 3
+ testNodeCount = 5
+ testReqCount = 10000
+ testRounds = 10
+)
+
+func TestValueTracker(t *testing.T) {
+ db := memorydb.New()
+ clock := &mclock.Simulated{}
+ requestList := make([]RequestInfo, testReqTypes)
+ relPrices := make([]float64, testReqTypes)
+ totalAmount := make([]uint64, testReqTypes)
+ for i := range requestList {
+ requestList[i] = RequestInfo{Name: "testreq" + strconv.Itoa(i), InitAmount: 1, InitValue: 1}
+ totalAmount[i] = 1
+ relPrices[i] = rand.Float64() + 0.1
+ }
+ nodes := make([]*NodeValueTracker, testNodeCount)
+ for round := 0; round < testRounds; round++ {
+ makeRequests := round < testRounds-2
+ useExpiration := round == testRounds-1
+ var expRate float64
+ if useExpiration {
+ expRate = math.Log(2) / float64(time.Hour*100)
+ }
+
+ vt := NewValueTracker(db, clock, requestList, time.Minute, 1/float64(time.Hour), expRate, expRate)
+ updateCosts := func(i int) {
+ costList := make([]uint64, testReqTypes)
+ baseCost := rand.Float64()*10000000 + 100000
+ for j := range costList {
+ costList[j] = uint64(baseCost * relPrices[j])
+ }
+ vt.UpdateCosts(nodes[i], costList)
+ }
+ for i := range nodes {
+ nodes[i] = vt.Register(enode.ID{byte(i)})
+ updateCosts(i)
+ }
+ if makeRequests {
+ for i := 0; i < testReqCount; i++ {
+ reqType := rand.Intn(testReqTypes)
+ reqAmount := rand.Intn(10) + 1
+ node := rand.Intn(testNodeCount)
+ respTime := time.Duration((rand.Float64() + 1) * float64(time.Second) * float64(node+1) / testNodeCount)
+ totalAmount[reqType] += uint64(reqAmount)
+ vt.Served(nodes[node], []ServedRequest{{uint32(reqType), uint32(reqAmount)}}, respTime)
+ clock.Run(time.Second)
+ }
+ } else {
+ clock.Run(time.Hour * 100)
+ if useExpiration {
+ for i, a := range totalAmount {
+ totalAmount[i] = a / 2
+ }
+ }
+ }
+ vt.Stop()
+ var sumrp, sumrv float64
+ for i, rp := range relPrices {
+ sumrp += rp
+ sumrv += vt.refBasket.reqValues[i]
+ }
+ for i, rp := range relPrices {
+ ratio := vt.refBasket.reqValues[i] * sumrp / (rp * sumrv)
+ if ratio < 0.99 || ratio > 1.01 {
+ t.Errorf("reqValues (%v) does not match relPrices (%v)", vt.refBasket.reqValues, relPrices)
+ break
+ }
+ }
+ exp := utils.ExpFactor(vt.StatsExpirer().LogOffset(clock.Now()))
+ basketAmount := make([]uint64, testReqTypes)
+ for i, bi := range vt.refBasket.basket.items {
+ basketAmount[i] += uint64(exp.Value(float64(bi.amount), vt.refBasket.basket.exp))
+ }
+ if makeRequests {
+ // if we did not make requests in this round then we expect all amounts to be
+ // in the reference basket
+ for _, node := range nodes {
+ for i, bi := range node.basket.basket.items {
+ basketAmount[i] += uint64(exp.Value(float64(bi.amount), node.basket.basket.exp))
+ }
+ }
+ }
+ for i, a := range basketAmount {
+ amount := a / basketFactor
+ if amount+10 < totalAmount[i] || amount > totalAmount[i]+10 {
+ t.Errorf("totalAmount[%d] mismatch in round %d (expected %d, got %d)", i, round, totalAmount[i], amount)
+ }
+ }
+ var sumValue float64
+ for _, node := range nodes {
+ s := node.RtStats()
+ sumValue += s.Value(maxResponseWeights, exp)
+ }
+ s := vt.RtStats()
+ mainValue := s.Value(maxResponseWeights, exp)
+ if sumValue < mainValue-10 || sumValue > mainValue+10 {
+ t.Errorf("Main rtStats value does not match sum of node rtStats values in round %d (main %v, sum %v)", round, mainValue, sumValue)
+ }
+ }
+}
diff --git a/les/lespay/client/wrsiterator.go b/les/lespay/client/wrsiterator.go
new file mode 100644
index 000000000..f768be7e5
--- /dev/null
+++ b/les/lespay/client/wrsiterator.go
@@ -0,0 +1,127 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/les/utils"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/nodestate"
+ "sync"
+)
+
+// WrsIterator returns nodes from the specified selectable set with a weighted random
+// selection. Selection weights are provided by a callback function.
+type WrsIterator struct {
+ lock sync.Mutex
+ cond *sync.Cond
+
+ ns *nodestate.NodeStateMachine
+ wrs *utils.WeightedRandomSelect
+ nextNode *enode.Node
+ closed bool
+}
+
+// NewWrsIterator creates a new WrsIterator. Nodes are selectable if they have all the required
+// and none of the disabled flags set. When a node is selected the selectedFlag is set which also
+// disables further selectability until it is removed or times out.
+func NewWrsIterator(ns *nodestate.NodeStateMachine, requireFlags, disableFlags nodestate.Flags, weightField nodestate.Field) *WrsIterator {
+ wfn := func(i interface{}) uint64 {
+ n := ns.GetNode(i.(enode.ID))
+ if n == nil {
+ return 0
+ }
+ wt, _ := ns.GetField(n, weightField).(uint64)
+ return wt
+ }
+
+ w := &WrsIterator{
+ ns: ns,
+ wrs: utils.NewWeightedRandomSelect(wfn),
+ }
+ w.cond = sync.NewCond(&w.lock)
+
+ ns.SubscribeField(weightField, func(n *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
+ if state.HasAll(requireFlags) && state.HasNone(disableFlags) {
+ w.lock.Lock()
+ w.wrs.Update(n.ID())
+ w.lock.Unlock()
+ w.cond.Signal()
+ }
+ })
+
+ ns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState nodestate.Flags) {
+ oldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags)
+ newMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags)
+ if newMatch == oldMatch {
+ return
+ }
+
+ w.lock.Lock()
+ if newMatch {
+ w.wrs.Update(n.ID())
+ } else {
+ w.wrs.Remove(n.ID())
+ }
+ w.lock.Unlock()
+ w.cond.Signal()
+ })
+ return w
+}
+
+// Next selects the next node.
+func (w *WrsIterator) Next() bool {
+ w.nextNode = w.chooseNode()
+ return w.nextNode != nil
+}
+
+func (w *WrsIterator) chooseNode() *enode.Node {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ for {
+ for !w.closed && w.wrs.IsEmpty() {
+ w.cond.Wait()
+ }
+ if w.closed {
+ return nil
+ }
+ // Choose the next node at random. Even though w.wrs is guaranteed
+ // non-empty here, Choose might return nil if all items have weight
+ // zero.
+ if c := w.wrs.Choose(); c != nil {
+ id := c.(enode.ID)
+ w.wrs.Remove(id)
+ return w.ns.GetNode(id)
+ }
+ }
+
+}
+
+// Close ends the iterator.
+func (w *WrsIterator) Close() {
+ w.lock.Lock()
+ w.closed = true
+ w.lock.Unlock()
+ w.cond.Signal()
+}
+
+// Node returns the current node.
+func (w *WrsIterator) Node() *enode.Node {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ return w.nextNode
+}
diff --git a/les/lespay/client/wrsiterator_test.go b/les/lespay/client/wrsiterator_test.go
new file mode 100644
index 000000000..0b49aa58c
--- /dev/null
+++ b/les/lespay/client/wrsiterator_test.go
@@ -0,0 +1,102 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package client
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/p2p/nodestate"
+ "reflect"
+ "testing"
+ "time"
+)
+
+var (
+ testSetup = &nodestate.Setup{}
+ sfTest1 = testSetup.NewFlag("test1")
+ sfTest2 = testSetup.NewFlag("test2")
+ sfTest3 = testSetup.NewFlag("test3")
+ sfTest4 = testSetup.NewFlag("test4")
+ sfiTestWeight = testSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0)))
+)
+
+const iterTestNodeCount = 6
+
+func TestWrsIterator(t *testing.T) {
+ ns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)
+ w := NewWrsIterator(ns, sfTest2, sfTest3.Or(sfTest4), sfiTestWeight)
+ ns.Start()
+ for i := 1; i <= iterTestNodeCount; i++ {
+ ns.SetState(testNode(i), sfTest1, nodestate.Flags{}, 0)
+ ns.SetField(testNode(i), sfiTestWeight, uint64(1))
+ }
+ next := func() int {
+ ch := make(chan struct{})
+ go func() {
+ w.Next()
+ close(ch)
+ }()
+ select {
+ case <-ch:
+ case <-time.After(time.Second * 5):
+ t.Fatalf("Iterator.Next() timeout")
+ }
+ node := w.Node()
+ ns.SetState(node, sfTest4, nodestate.Flags{}, 0)
+ return testNodeIndex(node.ID())
+ }
+ set := make(map[int]bool)
+ expset := func() {
+ for len(set) > 0 {
+ n := next()
+ if !set[n] {
+ t.Errorf("Item returned by iterator not in the expected set (got %d)", n)
+ }
+ delete(set, n)
+ }
+ }
+
+ ns.SetState(testNode(1), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(2), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(3), sfTest2, nodestate.Flags{}, 0)
+ set[1] = true
+ set[2] = true
+ set[3] = true
+ expset()
+ ns.SetState(testNode(4), sfTest2, nodestate.Flags{}, 0)
+ ns.SetState(testNode(5), sfTest2.Or(sfTest3), nodestate.Flags{}, 0)
+ ns.SetState(testNode(6), sfTest2, nodestate.Flags{}, 0)
+ set[4] = true
+ set[6] = true
+ expset()
+ ns.SetField(testNode(2), sfiTestWeight, uint64(0))
+ ns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)
+ ns.SetState(testNode(2), nodestate.Flags{}, sfTest4, 0)
+ ns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)
+ set[1] = true
+ set[3] = true
+ expset()
+ ns.SetField(testNode(2), sfiTestWeight, uint64(1))
+ ns.SetState(testNode(2), nodestate.Flags{}, sfTest2, 0)
+ ns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)
+ ns.SetState(testNode(2), sfTest2, sfTest4, 0)
+ ns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)
+ set[1] = true
+ set[2] = true
+ set[3] = true
+ expset()
+ ns.Stop()
+}
diff --git a/les/metrics.go b/les/metrics.go
index ab48585ca..db54874d4 100644
--- a/les/metrics.go
+++ b/les/metrics.go
@@ -107,6 +107,13 @@ var (
requestRTT = metrics.NewRegisteredTimer("les/client/req/rtt", nil)
requestSendDelay = metrics.NewRegisteredTimer("les/client/req/sendDelay", nil)
+
+ serverSelectableGauge = metrics.NewRegisteredGauge("les/client/serverPool/selectable", nil)
+ serverDialedMeter = metrics.NewRegisteredMeter("les/client/serverPool/dialed", nil)
+ serverConnectedGauge = metrics.NewRegisteredGauge("les/client/serverPool/connected", nil)
+ sessionValueMeter = metrics.NewRegisteredMeter("les/client/serverPool/sessionValue", nil)
+ totalValueGauge = metrics.NewRegisteredGauge("les/client/serverPool/totalValue", nil)
+ suggestedTimeoutGauge = metrics.NewRegisteredGauge("les/client/serverPool/timeout", nil)
)
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
diff --git a/les/odr_test.go b/les/odr_test.go
index cf1e0f26a..8f3da92a8 100644
--- a/les/odr_test.go
+++ b/les/odr_test.go
@@ -138,8 +138,8 @@ func odrContractCall(ctx context.Context, db xcbdb.Database, config *params.Chai
//vmenv := core.NewEnv(statedb, config, bc, msg, header, vm.Config{})
gp := new(core.EnergyPool).AddEnergy(math.MaxUint64)
- ret, _, _, _ := core.ApplyMessage(vmenv, msg, gp)
- res = append(res, ret...)
+ result, _ := core.ApplyMessage(vmenv, msg, gp)
+ res = append(res, result.Return()...)
}
} else {
header := lc.GetHeaderByHash(bhash)
@@ -149,9 +149,9 @@ func odrContractCall(ctx context.Context, db xcbdb.Database, config *params.Chai
context := core.NewCVMContext(msg, header, lc, nil)
vmenv := vm.NewCVM(context, state, config, vm.Config{})
gp := new(core.EnergyPool).AddEnergy(math.MaxUint64)
- ret, _, _, _ := core.ApplyMessage(vmenv, msg, gp)
+ result, _ := core.ApplyMessage(vmenv, msg, gp)
if state.Error() == nil {
- res = append(res, ret...)
+ res = append(res, result.Return()...)
}
}
}
@@ -208,7 +208,7 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
// Set the timeout as 1 second here, ensure there is enough time
// for travis to make the action.
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash)
cancel()
@@ -225,20 +225,20 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
// expect retrievals to fail (except genesis block) without a les peer
client.handler.backend.peers.lock.Lock()
- client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return false }
+ client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return false }
client.handler.backend.peers.lock.Unlock()
test(expFail)
// expect all retrievals to pass
client.handler.backend.peers.lock.Lock()
- client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return true }
+ client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return true }
client.handler.backend.peers.lock.Unlock()
test(5)
// still expect all retrievals to pass, now data should be cached locally
if checkCached {
client.handler.backend.peers.unregister(client.peer.speer.id)
- time.Sleep(time.Second * 1) // ensure that all peerSetNotify callbacks are executed
+ time.Sleep(time.Second * 3) // ensure that all peerSetNotify callbacks are executed
test(5)
}
}
diff --git a/les/peer.go b/les/peer.go
index 02b36e7dd..30c3d58d5 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -19,6 +19,8 @@ package les
import (
"errors"
"fmt"
+ lpc "github.com/core-coin/go-core/les/lespay/client"
+ "github.com/core-coin/go-core/xcb"
"math/big"
"math/rand"
"net"
@@ -34,10 +36,8 @@ import (
"github.com/core-coin/go-core/les/utils"
"github.com/core-coin/go-core/light"
"github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/p2p/enode"
"github.com/core-coin/go-core/params"
"github.com/core-coin/go-core/rlp"
- "github.com/core-coin/go-core/xcb"
)
var (
@@ -117,11 +117,6 @@ func (m keyValueMap) get(key string, val interface{}) error {
return rlp.DecodeBytes(enc, val)
}
-// peerIdToString converts enode.ID to a string form
-func peerIdToString(id enode.ID) string {
- return fmt.Sprintf("%x", id.Bytes())
-}
-
// peerCommons contains fields needed by both server peer and client peer.
type peerCommons struct {
*p2p.Peer
@@ -356,16 +351,19 @@ type serverPeer struct {
checkpointNumber uint64 // The block height which the checkpoint is registered.
checkpoint params.TrustedCheckpoint // The advertised checkpoint sent by server.
- poolEntry *poolEntry // Statistic for server peer.
- fcServer *flowcontrol.ServerNode // Client side mirror token bucket.
+ fcServer *flowcontrol.ServerNode // Client side mirror token bucket.
+ vtLock sync.Mutex
+ valueTracker *lpc.ValueTracker
+ nodeValueTracker *lpc.NodeValueTracker
+ sentReqs map[uint64]sentReqEntry
// Statistics
- errCount int // Counter the invalid responses server has replied
+ errCount utils.LinearExpiredValue // Counter the invalid responses server has replied
updateCount uint64
updateTime mclock.AbsTime
- // Callbacks
- hasBlock func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.
+ // Test callback hooks
+ hasBlockHook func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.
}
func newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *serverPeer {
@@ -373,13 +371,14 @@ func newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2
peerCommons: peerCommons{
Peer: p,
rw: rw,
- id: peerIdToString(p.ID()),
+ id: p.ID().String(),
version: version,
network: network,
sendQueue: utils.NewExecQueue(100),
closeCh: make(chan struct{}),
},
- trusted: trusted,
+ trusted: trusted,
+ errCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)},
}
}
@@ -428,62 +427,71 @@ func sendRequest(w p2p.MsgWriter, msgcode, reqID uint64, data interface{}) error
return p2p.Send(w, msgcode, req{reqID, data})
}
+func (p *serverPeer) sendRequest(msgcode, reqID uint64, data interface{}, amount int) error {
+ p.sentRequest(reqID, uint32(msgcode), uint32(amount))
+ return sendRequest(p.rw, msgcode, reqID, data)
+}
+
// requestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block.
func (p *serverPeer) requestHeadersByHash(reqID uint64, origin common.Hash, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
- return sendRequest(p.rw, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
+ return p.sendRequest(GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount)
}
// requestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block.
func (p *serverPeer) requestHeadersByNumber(reqID, origin uint64, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
- return sendRequest(p.rw, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
+ return p.sendRequest(GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount)
}
// requestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified.
func (p *serverPeer) requestBodies(reqID uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
- return sendRequest(p.rw, GetBlockBodiesMsg, reqID, hashes)
+ return p.sendRequest(GetBlockBodiesMsg, reqID, hashes, len(hashes))
}
// requestCode fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *serverPeer) requestCode(reqID uint64, reqs []CodeReq) error {
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
- return sendRequest(p.rw, GetCodeMsg, reqID, reqs)
+ return p.sendRequest(GetCodeMsg, reqID, reqs, len(reqs))
}
// requestReceipts fetches a batch of transaction receipts from a remote node.
func (p *serverPeer) requestReceipts(reqID uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
- return sendRequest(p.rw, GetReceiptsMsg, reqID, hashes)
+ return p.sendRequest(GetReceiptsMsg, reqID, hashes, len(hashes))
}
// requestProofs fetches a batch of merkle proofs from a remote node.
func (p *serverPeer) requestProofs(reqID uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
- return sendRequest(p.rw, GetProofsV2Msg, reqID, reqs)
+ return p.sendRequest(GetProofsV2Msg, reqID, reqs, len(reqs))
}
// requestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
func (p *serverPeer) requestHelperTrieProofs(reqID uint64, reqs []HelperTrieReq) error {
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
- return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, reqs)
+ return p.sendRequest(GetHelperTrieProofsMsg, reqID, reqs, len(reqs))
}
// requestTxStatus fetches a batch of transaction status records from a remote node.
func (p *serverPeer) requestTxStatus(reqID uint64, txHashes []common.Hash) error {
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
- return sendRequest(p.rw, GetTxStatusMsg, reqID, txHashes)
+ return p.sendRequest(GetTxStatusMsg, reqID, txHashes, len(txHashes))
}
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
-func (p *serverPeer) sendTxs(reqID uint64, txs rlp.RawValue) error {
- p.Log().Debug("Sending batch of transactions", "size", len(txs))
- return sendRequest(p.rw, SendTxV2Msg, reqID, txs)
+func (p *serverPeer) sendTxs(reqID uint64, amount int, txs rlp.RawValue) error {
+ p.Log().Debug("Sending batch of transactions", "amount", amount, "size", len(txs))
+ sizeFactor := (len(txs) + txSizeCostLimit/2) / txSizeCostLimit
+ if sizeFactor > amount {
+ amount = sizeFactor
+ }
+ return p.sendRequest(SendTxV2Msg, reqID, txs, amount)
}
// waitBefore implements distPeer interface
@@ -529,10 +537,13 @@ func (p *serverPeer) getTxRelayCost(amount, size int) uint64 {
return cost
}
-// HasBlock checks if the peer has a given block
func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
p.lock.RLock()
- p.lock.RUnlock()
+ defer p.lock.RUnlock()
+
+ if p.hasBlockHook != nil {
+ return p.hasBlockHook(hash, number, hasState)
+ }
head := p.headInfo.Number
var since, recent uint64
if hasState {
@@ -542,9 +553,7 @@ func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bo
since = p.chainSince
recent = p.chainRecent
}
- hasBlock := p.hasBlock
-
- return head >= number && number >= since && (recent == 0 || number+recent+4 > head) && hasBlock != nil && hasBlock(hash, number, hasState)
+ return head >= number && number >= since && (recent == 0 || number+recent+4 > head)
}
// updateFlowControl updates the flow control parameters belonging to the server
@@ -569,6 +578,15 @@ func (p *serverPeer) updateFlowControl(update keyValueMap) {
}
}
+// updateHead updates the head information based on the announcement from
+// the peer.
+func (p *serverPeer) updateHead(hash common.Hash, number uint64, td *big.Int) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.headInfo = blockInfo{Hash: hash, Number: number, Td: td}
+}
+
// Handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
@@ -630,6 +648,87 @@ func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge
})
}
+// setValueTracker sets the value tracker references for connected servers. Note that the
+// references should be removed upon disconnection by setValueTracker(nil, nil).
+func (p *serverPeer) setValueTracker(vt *lpc.ValueTracker, nvt *lpc.NodeValueTracker) {
+ p.vtLock.Lock()
+ p.valueTracker = vt
+ p.nodeValueTracker = nvt
+ if nvt != nil {
+ p.sentReqs = make(map[uint64]sentReqEntry)
+ } else {
+ p.sentReqs = nil
+ }
+ p.vtLock.Unlock()
+}
+
+// updateVtParams updates the server's price table in the value tracker.
+func (p *serverPeer) updateVtParams() {
+ p.vtLock.Lock()
+ defer p.vtLock.Unlock()
+
+ if p.nodeValueTracker == nil {
+ return
+ }
+ reqCosts := make([]uint64, len(requestList))
+ for code, costs := range p.fcCosts {
+ if m, ok := requestMapping[uint32(code)]; ok {
+ reqCosts[m.first] = costs.baseCost + costs.reqCost
+ if m.rest != -1 {
+ reqCosts[m.rest] = costs.reqCost
+ }
+ }
+ }
+ p.valueTracker.UpdateCosts(p.nodeValueTracker, reqCosts)
+}
+
+// sentReqEntry remembers sent requests and their sending times
+type sentReqEntry struct {
+ reqType, amount uint32
+ at mclock.AbsTime
+}
+
+// sentRequest marks a request sent at the current moment to this server.
+func (p *serverPeer) sentRequest(id uint64, reqType, amount uint32) {
+ p.vtLock.Lock()
+ if p.sentReqs != nil {
+ p.sentReqs[id] = sentReqEntry{reqType, amount, mclock.Now()}
+ }
+ p.vtLock.Unlock()
+}
+
+// answeredRequest marks a request answered at the current moment by this server.
+func (p *serverPeer) answeredRequest(id uint64) {
+ p.vtLock.Lock()
+ if p.sentReqs == nil {
+ p.vtLock.Unlock()
+ return
+ }
+ e, ok := p.sentReqs[id]
+ delete(p.sentReqs, id)
+ vt := p.valueTracker
+ nvt := p.nodeValueTracker
+ p.vtLock.Unlock()
+ if !ok {
+ return
+ }
+ var (
+ vtReqs [2]lpc.ServedRequest
+ reqCount int
+ )
+ m := requestMapping[e.reqType]
+ if m.rest == -1 || e.amount <= 1 {
+ reqCount = 1
+ vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: e.amount}
+ } else {
+ reqCount = 2
+ vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: 1}
+ vtReqs[1] = lpc.ServedRequest{ReqType: uint32(m.rest), Amount: e.amount - 1}
+ }
+ dt := time.Duration(mclock.Now() - e.at)
+ vt.Served(nvt, vtReqs[:reqCount], dt)
+}
+
// clientPeer represents each node to which the les server is connected.
// The node here refers to the light client.
type clientPeer struct {
@@ -638,11 +737,15 @@ type clientPeer struct {
// responseLock ensures that responses are queued in the same order as
// RequestProcessed is called
responseLock sync.Mutex
- server bool
- invalidCount uint32 // Counter the invalid request the client peer has made.
responseCount uint64 // Counter to generate an unique id for request processing.
- errCh chan error
- fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
+
+ // invalidLock is used for protecting invalidCount.
+ invalidLock sync.RWMutex
+ invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made.
+
+ server bool
+ errCh chan error
+ fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
}
func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer {
@@ -650,13 +753,14 @@ func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWrite
peerCommons: peerCommons{
Peer: p,
rw: rw,
- id: peerIdToString(p.ID()),
+ id: p.ID().String(),
version: version,
network: network,
sendQueue: utils.NewExecQueue(100),
closeCh: make(chan struct{}),
},
- errCh: make(chan error, 1),
+ invalidCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)},
+ errCh: make(chan error, 1),
}
}
@@ -896,6 +1000,18 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge
})
}
+func (p *clientPeer) bumpInvalid() {
+ p.invalidLock.Lock()
+ p.invalidCount.Add(1, mclock.Now())
+ p.invalidLock.Unlock()
+}
+
+func (p *clientPeer) getInvalid() uint64 {
+ p.invalidLock.RLock()
+ defer p.invalidLock.RUnlock()
+ return p.invalidCount.Value(mclock.Now())
+}
+
// serverPeerSubscriber is an interface to notify services about added or
// removed server peers
type serverPeerSubscriber interface {
diff --git a/les/protocol.go b/les/protocol.go
index f9e540525..6b5d21dac 100644
--- a/les/protocol.go
+++ b/les/protocol.go
@@ -19,6 +19,8 @@ package les
import (
"errors"
"fmt"
+ lpc "github.com/core-coin/go-core/les/lespay/client"
+ "github.com/core-coin/go-core/rlp"
eddsa "github.com/core-coin/go-goldilocks"
"io"
"math/big"
@@ -26,7 +28,6 @@ import (
"github.com/core-coin/go-core/common"
"github.com/core-coin/go-core/crypto"
"github.com/core-coin/go-core/p2p/enode"
- "github.com/core-coin/go-core/rlp"
)
// Constants to match up protocol versions and messages
@@ -77,19 +78,59 @@ const (
)
type requestInfo struct {
- name string
- maxCount uint64
-}
-
-var requests = map[uint64]requestInfo{
- GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch},
- GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch},
- GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch},
- GetCodeMsg: {"GetCode", MaxCodeFetch},
- GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch},
- GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch},
- SendTxV2Msg: {"SendTxV2", MaxTxSend},
- GetTxStatusMsg: {"GetTxStatus", MaxTxStatus},
+ name string
+ maxCount uint64
+ refBasketFirst, refBasketRest float64
+}
+
+// reqMapping maps an LES request to one or two lespay service vector entries.
+// If rest != -1 and the request type is used with amounts larger than one then the
+// first one of the multi-request is mapped to first while the rest is mapped to rest.
+type reqMapping struct {
+ first, rest int
+}
+
+var (
+ // requests describes the available LES request types and their initializing amounts
+ // in the lespay/client.ValueTracker reference basket. Initial values are estimates
+ // based on the same values as the server's default cost estimates (reqAvgTimeCost).
+ requests = map[uint64]requestInfo{
+ GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch, 10, 1000},
+ GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch, 1, 0},
+ GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch, 1, 0},
+ GetCodeMsg: {"GetCode", MaxCodeFetch, 1, 0},
+ GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch, 10, 0},
+ GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch, 10, 100},
+ SendTxV2Msg: {"SendTxV2", MaxTxSend, 1, 0},
+ GetTxStatusMsg: {"GetTxStatus", MaxTxStatus, 10, 0},
+ }
+ requestList []lpc.RequestInfo
+ requestMapping map[uint32]reqMapping
+)
+
+// init creates a request list and mapping between protocol message codes and lespay
+// service vector indices.
+func init() {
+ requestMapping = make(map[uint32]reqMapping)
+ for code, req := range requests {
+ cost := reqAvgTimeCost[code]
+ rm := reqMapping{len(requestList), -1}
+ requestList = append(requestList, lpc.RequestInfo{
+ Name: req.name + ".first",
+ InitAmount: req.refBasketFirst,
+ InitValue: float64(cost.baseCost + cost.reqCost),
+ })
+ if req.refBasketRest != 0 {
+ rm.rest = len(requestList)
+ requestList = append(requestList, lpc.RequestInfo{
+ Name: req.name + ".rest",
+ InitAmount: req.refBasketRest,
+ InitValue: float64(cost.reqCost),
+ })
+ }
+ requestMapping[uint32(code)] = rm
+ }
+
}
type errCode int
diff --git a/les/pruner.go b/les/pruner.go
index 951561a45..9517b0f0d 100644
--- a/les/pruner.go
+++ b/les/pruner.go
@@ -53,7 +53,7 @@ func (p *pruner) close() {
}
// loop periodically queries the status of chain indexers and prunes useless
-// historical chain data. Notably, whenever Geth restarts, it will iterate
+// historical chain data. Notably, whenever Gocore restarts, it will iterate
// all historical sections even they don't exist at all(below checkpoint) so
// that light client can prune cached chain data that was ODRed after pruning
// that section.
@@ -65,7 +65,7 @@ func (p *pruner) loop() {
// pruning finds the sections that have been processed by all indexers
// and deletes all historical chain data.
- // Note, if some indexers don't support pruning(e.g. eth.BloomIndexer),
+ // Note, if some indexers don't support pruning(e.g. xcb.BloomIndexer),
// pruning operations can be silently ignored.
pruning := func() {
min := uint64(math.MaxUint64)
diff --git a/les/pruner_test.go b/les/pruner_test.go
index 2ce03ec52..ec8f2bd66 100644
--- a/les/pruner_test.go
+++ b/les/pruner_test.go
@@ -37,7 +37,7 @@ func TestLightPruner(t *testing.T) {
if cs >= 3 && bts >= 3 {
break
}
- time.Sleep(10 * time.Millisecond)
+ time.Sleep(15 * time.Millisecond)
}
}
server, client, tearDown := newClientServerEnv(t, int(3*config.ChtSize+config.ChtConfirms), 2, waitIndexers, nil, 0, false, true, false)
@@ -46,7 +46,7 @@ func TestLightPruner(t *testing.T) {
// checkDB iterates the chain with given prefix, resolves the block number
// with given callback and ensures this entry should exist or not.
checkDB := func(from, to uint64, prefix []byte, resolve func(key, value []byte) *uint64, exist bool) bool {
- it := client.db.NewIteratorWithPrefix(prefix)
+ it := client.db.NewIterator(prefix, nil)
defer it.Release()
var next = from
@@ -122,10 +122,10 @@ func TestLightPruner(t *testing.T) {
}
}
// Start light pruner.
- time.Sleep(1500 * time.Millisecond) // Ensure light client has finished the syncing and indexing
+ time.Sleep(2500 * time.Millisecond) // Ensure light client has finished the syncing and indexing
newPruner(client.db, client.chtIndexer, client.bloomTrieIndexer)
- time.Sleep(1500 * time.Millisecond) // Ensure pruner have enough time to prune data.
+ time.Sleep(2500 * time.Millisecond) // Ensure pruner have enough time to prune data.
checkPruned(1, config.ChtSize-1)
// Ensure all APIs still work after pruning.
@@ -192,6 +192,6 @@ func TestLightPruner(t *testing.T) {
// Ensure the ODR cached data can be cleaned by pruner.
newPruner(client.db, client.chtIndexer, client.bloomTrieIndexer)
- time.Sleep(50 * time.Millisecond) // Ensure pruner have enough time to prune data.
- checkPruned(1, config.ChtSize-1) // Ensure all cached data(by odr) is cleaned.
+ time.Sleep(150 * time.Millisecond) // Ensure pruner have enough time to prune data.
+ checkPruned(1, config.ChtSize-1) // Ensure all cached data(by odr) is cleaned.
}
diff --git a/les/request_test.go b/les/request_test.go
index 46425eb22..d9bd99972 100644
--- a/les/request_test.go
+++ b/les/request_test.go
@@ -78,6 +78,7 @@ func tfCodeAccess(db xcbdb.Database, bhash common.Hash, num uint64) light.OdrReq
}
func testAccess(t *testing.T, protocol int, fn accessTestFn) {
+ t.Skip("skip long-running tests")
// Assemble the test environment
server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true, true)
defer tearDown()
@@ -92,7 +93,7 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) {
for i := uint64(0); i <= server.handler.blockchain.CurrentHeader().Number.Uint64(); i++ {
bhash := rawdb.ReadCanonicalHash(server.db, i)
if req := fn(client.db, bhash, i); req != nil {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
err := client.handler.backend.odr.Retrieve(ctx, req)
cancel()
diff --git a/les/retrieve.go b/les/retrieve.go
index 3971e93ae..9f52425f1 100644
--- a/les/retrieve.go
+++ b/les/retrieve.go
@@ -24,22 +24,20 @@ import (
"sync"
"time"
- "github.com/core-coin/go-core/common/mclock"
"github.com/core-coin/go-core/light"
)
var (
retryQueue = time.Millisecond * 100
- softRequestTimeout = time.Second
hardRequestTimeout = time.Second * 30
)
// retrieveManager is a layer on top of requestDistributor which takes care of
// matching replies by request ID and handles timeouts and resends if necessary.
type retrieveManager struct {
- dist *requestDistributor
- peers *serverPeerSet
- serverPool peerSelector
+ dist *requestDistributor
+ peers *serverPeerSet
+ softRequestTimeout func() time.Duration
lock sync.RWMutex
sentReqs map[uint64]*sentReq
@@ -48,11 +46,6 @@ type retrieveManager struct {
// validatorFunc is a function that processes a reply message
type validatorFunc func(distPeer, *Msg) error
-// peerSelector receives feedback info about response times and timeouts
-type peerSelector interface {
- adjustResponseTime(*poolEntry, time.Duration, bool)
-}
-
// sentReq represents a request sent and tracked by retrieveManager
type sentReq struct {
rm *retrieveManager
@@ -99,12 +92,12 @@ const (
)
// newRetrieveManager creates the retrieve manager
-func newRetrieveManager(peers *serverPeerSet, dist *requestDistributor, serverPool peerSelector) *retrieveManager {
+func newRetrieveManager(peers *serverPeerSet, dist *requestDistributor, srto func() time.Duration) *retrieveManager {
return &retrieveManager{
- peers: peers,
- dist: dist,
- serverPool: serverPool,
- sentReqs: make(map[uint64]*sentReq),
+ peers: peers,
+ dist: dist,
+ sentReqs: make(map[uint64]*sentReq),
+ softRequestTimeout: srto,
}
}
@@ -325,8 +318,7 @@ func (r *sentReq) tryRequest() {
return
}
- reqSent := mclock.Now()
- srto, hrto := false, false
+ hrto := false
r.lock.RLock()
s, ok := r.sentTo[p]
@@ -338,11 +330,7 @@ func (r *sentReq) tryRequest() {
defer func() {
// send feedback to server pool and remove peer if hard timeout happened
pp, ok := p.(*serverPeer)
- if ok && r.rm.serverPool != nil {
- respTime := time.Duration(mclock.Now() - reqSent)
- r.rm.serverPool.adjustResponseTime(pp.poolEntry, respTime, srto)
- }
- if hrto {
+ if hrto && ok {
pp.Log().Debug("Request timed out hard")
if r.rm.peers != nil {
r.rm.peers.unregister(pp.id)
@@ -363,8 +351,7 @@ func (r *sentReq) tryRequest() {
}
r.eventsCh <- reqPeerEvent{event, p}
return
- case <-time.After(softRequestTimeout):
- srto = true
+ case <-time.After(r.rm.softRequestTimeout()):
r.eventsCh <- reqPeerEvent{rpSoftTimeout, p}
}
diff --git a/les/server.go b/les/server.go
index f2784747a..0743d7e1c 100644
--- a/les/server.go
+++ b/les/server.go
@@ -17,13 +17,11 @@
package les
import (
+ "github.com/core-coin/go-core/node"
eddsa "github.com/core-coin/go-goldilocks"
"time"
- "github.com/core-coin/go-core/accounts/abi/bind"
"github.com/core-coin/go-core/common/mclock"
- "github.com/core-coin/go-core/core"
- "github.com/core-coin/go-core/les/checkpointoracle"
"github.com/core-coin/go-core/les/flowcontrol"
"github.com/core-coin/go-core/light"
"github.com/core-coin/go-core/log"
@@ -56,9 +54,11 @@ type LesServer struct {
minCapacity, maxCapacity, freeCapacity uint64
threadsIdle int // Request serving threads count when system is idle.
threadsBusy int // Request serving threads count when system is busy(block insertion).
+
+ p2pSrv *p2p.Server
}
-func NewLesServer(e *xcb.Core, config *xcb.Config) (*LesServer, error) {
+func NewLesServer(node *node.Node, e *xcb.Core, config *xcb.Config) (*LesServer, error) {
// Collect les protocol version information supported by local node.
lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions))
for i, pv := range AdvertiseProtocolVersions {
@@ -90,17 +90,16 @@ func NewLesServer(e *xcb.Core, config *xcb.Config) (*LesServer, error) {
servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),
threadsBusy: config.LightServ/100 + 1,
threadsIdle: threads,
+ p2pSrv: node.Server(),
}
srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), e.Synced)
srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
srv.freeCapacity = srv.minCapacity
- // Set up checkpoint oracle.
- oracle := config.CheckpointOracle
- if oracle == nil {
- oracle = params.CheckpointOracles[e.BlockChain().Genesis().Hash()]
- }
- srv.oracle = checkpointoracle.New(oracle, srv.localCheckpoint)
+ srv.oracle = srv.setupOracle(node, e.BlockChain().Genesis().Hash(), config)
+
+ // Initialize the bloom trie indexer.
+ e.BloomIndexer().AddChildIndexer(srv.bloomTrieIndexer)
// Initialize server capacity management fields.
srv.defParams = flowcontrol.ServerParams{
@@ -118,7 +117,7 @@ func NewLesServer(e *xcb.Core, config *xcb.Config) (*LesServer, error) {
srv.maxCapacity = totalRecharge
}
srv.fcManager.SetCapacityLimits(srv.freeCapacity, srv.maxCapacity, srv.freeCapacity*2)
- srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(peerIdToString(id)) })
+ srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(id.String()) })
srv.clientPool.setDefaultFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1})
checkpoint := srv.latestLocalCheckpoint()
@@ -127,6 +126,11 @@ func NewLesServer(e *xcb.Core, config *xcb.Config) (*LesServer, error) {
"chtroot", checkpoint.CHTRoot, "bloomroot", checkpoint.BloomRoot)
}
srv.chtIndexer.Start(e.BlockChain())
+
+ node.RegisterProtocols(srv.Protocols())
+ node.RegisterAPIs(srv.APIs())
+ node.RegisterLifecycle(srv)
+
return srv, nil
}
@@ -155,11 +159,11 @@ func (s *LesServer) APIs() []rpc.API {
func (s *LesServer) Protocols() []p2p.Protocol {
ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
- if p := s.peers.peer(peerIdToString(id)); p != nil {
+ if p := s.peers.peer(id.String()); p != nil {
return p.Info()
}
return nil
- })
+ }, nil)
// Add "les" ENR entries.
for i := range ps {
ps[i].Attributes = []enr.Entry{&lesEntry{}}
@@ -168,14 +172,14 @@ func (s *LesServer) Protocols() []p2p.Protocol {
}
// Start starts the LES server
-func (s *LesServer) Start(srvr *p2p.Server) {
- s.privateKey = srvr.PrivateKey
+func (s *LesServer) Start() error {
+ s.privateKey = s.p2pSrv.PrivateKey
s.handler.start()
s.wg.Add(1)
go s.capacityManagement()
- if srvr.DiscV5 != nil {
+ if s.p2pSrv.DiscV5 != nil {
for _, topic := range s.lesTopics {
topic := topic
go func() {
@@ -183,14 +187,16 @@ func (s *LesServer) Start(srvr *p2p.Server) {
logger.Info("Starting topic registration")
defer logger.Info("Terminated topic registration")
- srvr.DiscV5.RegisterTopic(topic, s.closeCh)
+ s.p2pSrv.DiscV5.RegisterTopic(topic, s.closeCh)
}()
}
}
+
+ return nil
}
// Stop stops the LES service
-func (s *LesServer) Stop() {
+func (s *LesServer) Stop() error {
close(s.closeCh)
// Disconnect existing connections with other LES servers.
@@ -212,18 +218,8 @@ func (s *LesServer) Stop() {
s.chtIndexer.Close()
s.wg.Wait()
log.Info("Les server stopped")
-}
-func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) {
- bloomIndexer.AddChildIndexer(s.bloomTrieIndexer)
-}
-
-// SetClient sets the rpc client and starts running checkpoint contract if it is not yet watched.
-func (s *LesServer) SetContractBackend(backend bind.ContractBackend) {
- if s.oracle == nil {
- return
- }
- s.oracle.Start(backend)
+ return nil
}
// capacityManagement starts an event handler loop that updates the recharge curve of
diff --git a/les/server_handler.go b/les/server_handler.go
index 4e92df012..a215c14c0 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -325,7 +325,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
origin = h.blockchain.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
break
}
headers = append(headers, origin)
@@ -422,7 +422,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
}
body := h.blockchain.GetBodyRLP(hash)
if body == nil {
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
bodies = append(bodies, body)
@@ -470,7 +470,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
header := h.blockchain.GetHeaderByHash(request.BHash)
if header == nil {
p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash)
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
// Refuse to search stale state data in the database since looking for
@@ -478,7 +478,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
local := h.blockchain.CurrentHeader().Number.Uint64()
if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local)
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
triedb := h.blockchain.StateCache().TrieDB()
@@ -486,7 +486,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
code, err := h.blockchain.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash))
@@ -545,7 +545,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
results := h.blockchain.GetReceiptsByHash(hash)
if results == nil {
if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
}
@@ -608,7 +608,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil {
p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash)
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
// Refuse to search stale state data in the database since looking for
@@ -616,14 +616,14 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
local := h.blockchain.CurrentHeader().Number.Uint64()
if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local)
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
root = header.Root
}
// If a header lookup failed (non existent), ignore subsequent requests for the same header
if root == (common.Hash{}) {
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
// Open the account or storage trie for the request
@@ -642,7 +642,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
- atomic.AddUint32(&p.invalidCount, 1)
+ p.bumpInvalid()
continue
}
trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root)
@@ -836,9 +836,9 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
clientErrorMeter.Mark(1)
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
- // If the client has made too much invalid request(e.g. request a non-exist data),
+ // If the client has made too much invalid request(e.g. request a non-existent data),
// reject them to prevent SPAM attack.
- if atomic.LoadUint32(&p.invalidCount) > maxRequestErrors {
+ if p.getInvalid() > maxRequestErrors {
clientErrorMeter.Mark(1)
return errTooManyInvalidRequest
}
diff --git a/les/serverpool.go b/les/serverpool.go
index 6a1d6c95e..20d282a5a 100644
--- a/les/serverpool.go
+++ b/les/serverpool.go
@@ -17,905 +17,457 @@
package les
import (
- "fmt"
- "io"
- "math"
+ "errors"
+ lpc "github.com/core-coin/go-core/les/lespay/client"
+ "github.com/core-coin/go-core/p2p/enr"
+ "github.com/core-coin/go-core/xcbdb"
"math/rand"
- "net"
- "strconv"
+ "reflect"
"sync"
+ "sync/atomic"
"time"
- eddsa "github.com/core-coin/go-goldilocks"
-
"github.com/core-coin/go-core/common/mclock"
- "github.com/core-coin/go-core/crypto"
"github.com/core-coin/go-core/les/utils"
"github.com/core-coin/go-core/log"
- "github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/p2p/discv5"
"github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/nodestate"
"github.com/core-coin/go-core/rlp"
- "github.com/core-coin/go-core/xcbdb"
)
const (
- // After a connection has been ended or timed out, there is a waiting period
- // before it can be selected for connection again.
- // waiting period = base delay * (1 + random(1))
- // base delay = shortRetryDelay for the first shortRetryCnt times after a
- // successful connection, after that longRetryDelay is applied
- shortRetryCnt = 5
- shortRetryDelay = time.Second * 5
- longRetryDelay = time.Minute * 10
- // maxNewEntries is the maximum number of newly discovered (never connected) nodes.
- // If the limit is reached, the least recently discovered one is thrown out.
- maxNewEntries = 1000
- // maxKnownEntries is the maximum number of known (already connected) nodes.
- // If the limit is reached, the least recently connected one is thrown out.
- // (not that unlike new entries, known entries are persistent)
- maxKnownEntries = 1000
- // target for simultaneously connected servers
- targetServerCount = 5
- // target for servers selected from the known table
- // (we leave room for trying new ones if there is any)
- targetKnownSelect = 3
- // after dialTimeout, consider the server unavailable and adjust statistics
- dialTimeout = time.Second * 500
- // targetConnTime is the minimum expected connection duration before a server
- // drops a client without any specific reason
- targetConnTime = time.Minute * 10
- // new entry selection weight calculation based on most recent discovery time:
- // unity until discoverExpireStart, then exponential decay with discoverExpireConst
- discoverExpireStart = time.Minute * 20
- discoverExpireConst = time.Minute * 20
- // known entry selection weight is dropped by a factor of exp(-failDropLn) after
- // each unsuccessful connection (restored after a successful one)
- failDropLn = 0.1
- // known node connection success and quality statistics have a long term average
- // and a short term value which is adjusted exponentially with a factor of
- // pstatRecentAdjust with each dial/connection and also returned exponentially
- // to the average with the time constant pstatReturnToMeanTC
- pstatReturnToMeanTC = time.Hour
- // node address selection weight is dropped by a factor of exp(-addrFailDropLn) after
- // each unsuccessful connection (restored after a successful one)
- addrFailDropLn = math.Ln2
- // responseScoreTC and delayScoreTC are exponential decay time constants for
- // calculating selection chances from response times and block delay times
- responseScoreTC = time.Millisecond * 100
- delayScoreTC = time.Second * 5
- timeoutPow = 10
- // initStatsWeight is used to initialize previously unknown peers with good
- // statistics to give a chance to prove themselves
- initStatsWeight = 1
+ minTimeout = time.Millisecond * 500 // minimum request timeout suggested by the server pool
+ timeoutRefresh = time.Second * 5 // recalculate timeout if older than this
+ dialCost = 10000 // cost of a TCP dial (used for known node selection weight calculation)
+ dialWaitStep = 1.5 // exponential multiplier of redial wait time when no value was provided by the server
+ queryCost = 500 // cost of a UDP pre-negotiation query
+ queryWaitStep = 1.02 // exponential multiplier of redial wait time when no value was provided by the server
+ waitThreshold = time.Hour * 2000 // drop node if waiting time is over the threshold
+ nodeWeightMul = 1000000 // multiplier constant for node weight calculation
+ nodeWeightThreshold = 100 // minimum weight for keeping a node in the the known (valuable) set
+ minRedialWait = 10 // minimum redial wait time in seconds
+ preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries
+ maxQueryFails = 100 // number of consecutive UDP query failures before we print a warning
)
-// connReq represents a request for peer connection.
-type connReq struct {
- p *serverPeer
- node *enode.Node
- result chan *poolEntry
-}
-
-// disconnReq represents a request for peer disconnection.
-type disconnReq struct {
- entry *poolEntry
- stopped bool
- done chan struct{}
-}
-
-// registerReq represents a request for peer registration.
-type registerReq struct {
- entry *poolEntry
- done chan struct{}
-}
-
-// serverPool implements a pool for storing and selecting newly discovered and already
-// known light server nodes. It received discovered nodes, stores statistics about
-// known nodes and takes care of always having enough good quality servers connected.
+// serverPool provides a node iterator for dial candidates. The output is a mix of newly discovered
+// nodes, a weighted random selection of known (previously valuable) nodes and trusted/paid nodes.
type serverPool struct {
- db xcbdb.Database
- dbKey []byte
- server *p2p.Server
- connWg sync.WaitGroup
-
- topic discv5.Topic
-
- discSetPeriod chan time.Duration
- discNodes chan *enode.Node
- discLookups chan bool
-
- trustedNodes map[enode.ID]*enode.Node
- entries map[enode.ID]*poolEntry
- timeout, enableRetry chan *poolEntry
- adjustStats chan poolStatAdjust
-
- knownQueue, newQueue poolEntryQueue
- knownSelect, newSelect *utils.WeightedRandomSelect
- knownSelected, newSelected int
- fastDiscover bool
- connCh chan *connReq
- disconnCh chan *disconnReq
- registerCh chan *registerReq
-
- closeCh chan struct{}
- wg sync.WaitGroup
-}
-
-// newServerPool creates a new serverPool instance
-func newServerPool(db xcbdb.Database, ulcServers []string) *serverPool {
- pool := &serverPool{
- db: db,
- entries: make(map[enode.ID]*poolEntry),
- timeout: make(chan *poolEntry, 1),
- adjustStats: make(chan poolStatAdjust, 100),
- enableRetry: make(chan *poolEntry, 1),
- connCh: make(chan *connReq),
- disconnCh: make(chan *disconnReq),
- registerCh: make(chan *registerReq),
- closeCh: make(chan struct{}),
- knownSelect: utils.NewWeightedRandomSelect(),
- newSelect: utils.NewWeightedRandomSelect(),
- fastDiscover: true,
- trustedNodes: parseTrustedNodes(ulcServers),
- }
-
- pool.knownQueue = newPoolEntryQueue(maxKnownEntries, pool.removeEntry)
- pool.newQueue = newPoolEntryQueue(maxNewEntries, pool.removeEntry)
- return pool
-}
-
-func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
- pool.server = server
- pool.topic = topic
- pool.dbKey = append([]byte("serverPool/"), []byte(topic)...)
- pool.loadNodes()
- pool.connectToTrustedNodes()
-
- if pool.server.DiscV5 != nil {
- pool.discSetPeriod = make(chan time.Duration, 1)
- pool.discNodes = make(chan *enode.Node, 100)
- pool.discLookups = make(chan bool, 100)
- go pool.discoverNodes()
- }
- pool.checkDial()
- pool.wg.Add(1)
- go pool.eventLoop()
-
- // Inject the bootstrap nodes as initial dial candiates.
- pool.wg.Add(1)
- go func() {
- defer pool.wg.Done()
- for _, n := range server.BootstrapNodes {
- select {
- case pool.discNodes <- n:
- case <-pool.closeCh:
- return
+ clock mclock.Clock
+ unixTime func() int64
+ db xcbdb.KeyValueStore
+
+ ns *nodestate.NodeStateMachine
+ vt *lpc.ValueTracker
+ mixer *enode.FairMix
+ mixSources []enode.Iterator
+ dialIterator enode.Iterator
+ validSchemes enr.IdentityScheme
+ trustedURLs []string
+ fillSet *lpc.FillSet
+ queryFails uint32
+
+ timeoutLock sync.RWMutex
+ timeout time.Duration
+ timeWeights lpc.ResponseTimeWeights
+ timeoutRefreshed mclock.AbsTime
+}
+
+// nodeHistory keeps track of dial costs which determine node weight together with the
+// service value calculated by lpc.ValueTracker.
+type nodeHistory struct {
+ dialCost utils.ExpiredValue
+ redialWaitStart, redialWaitEnd int64 // unix time (seconds)
+}
+
+type nodeHistoryEnc struct {
+ DialCost utils.ExpiredValue
+ RedialWaitStart, RedialWaitEnd uint64
+}
+
+// queryFunc sends a pre-negotiation query and blocks until a response arrives or timeout occurs.
+// It returns 1 if the remote node has confirmed that connection is possible, 0 if not
+// possible and -1 if no response arrived (timeout).
+type queryFunc func(*enode.Node) int
+
+var (
+ serverPoolSetup = &nodestate.Setup{Version: 1}
+ sfHasValue = serverPoolSetup.NewPersistentFlag("hasValue")
+ sfQueried = serverPoolSetup.NewFlag("queried")
+ sfCanDial = serverPoolSetup.NewFlag("canDial")
+ sfDialing = serverPoolSetup.NewFlag("dialed")
+ sfWaitDialTimeout = serverPoolSetup.NewFlag("dialTimeout")
+ sfConnected = serverPoolSetup.NewFlag("connected")
+ sfRedialWait = serverPoolSetup.NewFlag("redialWait")
+ sfAlwaysConnect = serverPoolSetup.NewFlag("alwaysConnect")
+ sfDisableSelection = nodestate.MergeFlags(sfQueried, sfCanDial, sfDialing, sfConnected, sfRedialWait)
+
+ sfiNodeHistory = serverPoolSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}),
+ func(field interface{}) ([]byte, error) {
+ if n, ok := field.(nodeHistory); ok {
+ ne := nodeHistoryEnc{
+ DialCost: n.dialCost,
+ RedialWaitStart: uint64(n.redialWaitStart),
+ RedialWaitEnd: uint64(n.redialWaitEnd),
+ }
+ enc, err := rlp.EncodeToBytes(&ne)
+ return enc, err
+ } else {
+ return nil, errors.New("invalid field type")
}
- }
- }()
-}
-
-func (pool *serverPool) stop() {
- close(pool.closeCh)
- pool.wg.Wait()
-}
-
-// discoverNodes wraps SearchTopic, converting result nodes to enode.Node.
-func (pool *serverPool) discoverNodes() {
- ch := make(chan *discv5.Node)
- go func() {
- pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, ch, pool.discLookups)
- close(ch)
- }()
- for n := range ch {
- pubkey, err := decodePubkey64(n.ID[:])
- if err != nil {
- continue
- }
- pool.discNodes <- enode.NewV4(pubkey, n.IP, int(n.TCP), int(n.UDP))
- }
-}
-
-// connect should be called upon any incoming connection. If the connection has been
-// dialed by the server pool recently, the appropriate pool entry is returned.
-// Otherwise, the connection should be rejected.
-// Note that whenever a connection has been accepted and a pool entry has been returned,
-// disconnect should also always be called.
-func (pool *serverPool) connect(p *serverPeer, node *enode.Node) *poolEntry {
- log.Debug("Connect new entry", "enode", p.id)
- req := &connReq{p: p, node: node, result: make(chan *poolEntry, 1)}
- select {
- case pool.connCh <- req:
- case <-pool.closeCh:
- return nil
- }
- return <-req.result
-}
-
-// registered should be called after a successful handshake
-func (pool *serverPool) registered(entry *poolEntry) {
- log.Debug("Registered new entry", "enode", entry.node.ID())
- req := ®isterReq{entry: entry, done: make(chan struct{})}
- select {
- case pool.registerCh <- req:
- case <-pool.closeCh:
- return
- }
- <-req.done
-}
-
-// disconnect should be called when ending a connection. Service quality statistics
-// can be updated optionally (not updated if no registration happened, in this case
-// only connection statistics are updated, just like in case of timeout)
-func (pool *serverPool) disconnect(entry *poolEntry) {
- stopped := false
- select {
- case <-pool.closeCh:
- stopped = true
- default:
- }
- log.Debug("Disconnected old entry", "enode", entry.node.ID())
- req := &disconnReq{entry: entry, stopped: stopped, done: make(chan struct{})}
-
- // Block until disconnection request is served.
- pool.disconnCh <- req
- <-req.done
-}
-
-const (
- pseBlockDelay = iota
- pseResponseTime
- pseResponseTimeout
+ },
+ func(enc []byte) (interface{}, error) {
+ var ne nodeHistoryEnc
+ err := rlp.DecodeBytes(enc, &ne)
+ n := nodeHistory{
+ dialCost: ne.DialCost,
+ redialWaitStart: int64(ne.RedialWaitStart),
+ redialWaitEnd: int64(ne.RedialWaitEnd),
+ }
+ return n, err
+ },
+ )
+ sfiNodeWeight = serverPoolSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0)))
+ sfiConnectedStats = serverPoolSetup.NewField("connectedStats", reflect.TypeOf(lpc.ResponseTimeStats{}))
)
-// poolStatAdjust records are sent to adjust peer block delay/response time statistics
-type poolStatAdjust struct {
- adjustType int
- entry *poolEntry
- time time.Duration
-}
-
-// adjustBlockDelay adjusts the block announce delay statistics of a node
-func (pool *serverPool) adjustBlockDelay(entry *poolEntry, time time.Duration) {
- if entry == nil {
- return
- }
- pool.adjustStats <- poolStatAdjust{pseBlockDelay, entry, time}
-}
-
-// adjustResponseTime adjusts the request response time statistics of a node
-func (pool *serverPool) adjustResponseTime(entry *poolEntry, time time.Duration, timeout bool) {
- if entry == nil {
- return
- }
- if timeout {
- pool.adjustStats <- poolStatAdjust{pseResponseTimeout, entry, time}
- } else {
- pool.adjustStats <- poolStatAdjust{pseResponseTime, entry, time}
- }
-}
-
-// eventLoop handles pool events and mutex locking for all internal functions
-func (pool *serverPool) eventLoop() {
- defer pool.wg.Done()
- lookupCnt := 0
- var convTime mclock.AbsTime
- if pool.discSetPeriod != nil {
- pool.discSetPeriod <- time.Millisecond * 100
- }
+// newServerPool creates a new server pool
+func newServerPool(db xcbdb.KeyValueStore, dbKey []byte, vt *lpc.ValueTracker, discovery enode.Iterator, mixTimeout time.Duration, query queryFunc, clock mclock.Clock, trustedURLs []string) *serverPool {
+ s := &serverPool{
+ db: db,
+ clock: clock,
+ unixTime: func() int64 { return time.Now().Unix() },
+ validSchemes: enode.ValidSchemes,
+ trustedURLs: trustedURLs,
+ vt: vt,
+ ns: nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+"ns:"), clock, serverPoolSetup),
+ }
+ s.recalTimeout()
+ s.mixer = enode.NewFairMix(mixTimeout)
+ knownSelector := lpc.NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight)
+ alwaysConnect := lpc.NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil)
+ s.mixSources = append(s.mixSources, knownSelector)
+ s.mixSources = append(s.mixSources, alwaysConnect)
+ if discovery != nil {
+ s.mixSources = append(s.mixSources, discovery)
+ }
+
+ iter := enode.Iterator(s.mixer)
+ if query != nil {
+ iter = s.addPreNegFilter(iter, query)
+ }
+ s.dialIterator = enode.Filter(iter, func(node *enode.Node) bool {
+ s.ns.SetState(node, sfDialing, sfCanDial, 0)
+ s.ns.SetState(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10)
+ return true
+ })
- // disconnect updates service quality statistics depending on the connection time
- // and disconnection initiator.
- disconnect := func(req *disconnReq, stopped bool) {
- // Handle peer disconnection requests.
- entry := req.entry
- if entry.state == psRegistered {
- connAdjust := float64(mclock.Now()-entry.regTime) / float64(targetConnTime)
- if connAdjust > 1 {
- connAdjust = 1
- }
- if stopped {
- // disconnect requested by ourselves.
- entry.connectStats.add(1, connAdjust)
- } else {
- // disconnect requested by server side.
- entry.connectStats.add(connAdjust, 1)
- }
+ s.ns.SubscribeState(nodestate.MergeFlags(sfWaitDialTimeout, sfConnected), func(n *enode.Node, oldState, newState nodestate.Flags) {
+ if oldState.Equals(sfWaitDialTimeout) && newState.IsEmpty() {
+ // dial timeout, no connection
+ s.setRedialWait(n, dialCost, dialWaitStep)
+ s.ns.SetState(n, nodestate.Flags{}, sfDialing, 0)
}
- entry.state = psNotConnected
-
- if entry.knownSelected {
- pool.knownSelected--
- } else {
- pool.newSelected--
- }
- pool.setRetryDial(entry)
- pool.connWg.Done()
- close(req.done)
- }
-
- for {
- select {
- case entry := <-pool.timeout:
- if !entry.removed {
- pool.checkDialTimeout(entry)
- }
-
- case entry := <-pool.enableRetry:
- if !entry.removed {
- entry.delayedRetry = false
- pool.updateCheckDial(entry)
- }
+ })
- case adj := <-pool.adjustStats:
- switch adj.adjustType {
- case pseBlockDelay:
- adj.entry.delayStats.add(float64(adj.time), 1)
- case pseResponseTime:
- adj.entry.responseStats.add(float64(adj.time), 1)
- adj.entry.timeoutStats.add(0, 1)
- case pseResponseTimeout:
- adj.entry.timeoutStats.add(1, 1)
+ s.ns.AddLogMetrics(sfHasValue, sfDisableSelection, "selectable", nil, nil, serverSelectableGauge)
+ s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil)
+ s.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, "connected", nil, nil, serverConnectedGauge)
+ return s
+}
+
+// addPreNegFilter installs a node filter mechanism that performs a pre-negotiation query.
+// Nodes that are filtered out and does not appear on the output iterator are put back
+// into redialWait state.
+func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enode.Iterator {
+ s.fillSet = lpc.NewFillSet(s.ns, input, sfQueried)
+ s.ns.SubscribeState(sfQueried, func(n *enode.Node, oldState, newState nodestate.Flags) {
+ if newState.Equals(sfQueried) {
+ fails := atomic.LoadUint32(&s.queryFails)
+ if fails == maxQueryFails {
+ log.Warn("UDP pre-negotiation query does not seem to work")
}
-
- case node := <-pool.discNodes:
- if pool.trustedNodes[node.ID()] == nil {
- entry := pool.findOrNewNode(node)
- pool.updateCheckDial(entry)
+ if fails > maxQueryFails {
+ fails = maxQueryFails
}
-
- case conv := <-pool.discLookups:
- if conv {
- if lookupCnt == 0 {
- convTime = mclock.Now()
- }
- lookupCnt++
- if pool.fastDiscover && (lookupCnt == 50 || time.Duration(mclock.Now()-convTime) > time.Minute) {
- pool.fastDiscover = false
- if pool.discSetPeriod != nil {
- pool.discSetPeriod <- time.Minute
- }
- }
+ if rand.Intn(maxQueryFails*2) < int(fails) {
+ // skip pre-negotiation with increasing chance, max 50%
+ // this ensures that the client can operate even if UDP is not working at all
+ s.ns.SetState(n, sfCanDial, nodestate.Flags{}, time.Second*10)
+ // set canDial before resetting queried so that FillSet will not read more
+ // candidates unnecessarily
+ s.ns.SetState(n, nodestate.Flags{}, sfQueried, 0)
+ return
}
-
- case req := <-pool.connCh:
- if pool.trustedNodes[req.p.ID()] != nil {
- // ignore trusted nodes
- req.result <- &poolEntry{trusted: true}
- } else {
- // Handle peer connection requests.
- entry := pool.entries[req.p.ID()]
- if entry == nil {
- entry = pool.findOrNewNode(req.node)
- }
- if entry.state == psConnected || entry.state == psRegistered {
- req.result <- nil
- continue
+ go func() {
+ q := query(n)
+ if q == -1 {
+ atomic.AddUint32(&s.queryFails, 1)
+ } else {
+ atomic.StoreUint32(&s.queryFails, 0)
}
- pool.connWg.Add(1)
- entry.peer = req.p
- entry.state = psConnected
- addr := &poolEntryAddress{
- ip: req.node.IP(),
- port: uint16(req.node.TCP()),
- lastSeen: mclock.Now(),
+ if q == 1 {
+ s.ns.SetState(n, sfCanDial, nodestate.Flags{}, time.Second*10)
+ } else {
+ s.setRedialWait(n, queryCost, queryWaitStep)
}
- entry.lastConnected = addr
- entry.addr = make(map[string]*poolEntryAddress)
- entry.addr[addr.strKey()] = addr
- entry.addrSelect = *utils.NewWeightedRandomSelect()
- entry.addrSelect.Update(addr)
- req.result <- entry
- }
-
- case req := <-pool.registerCh:
- if req.entry.trusted {
- continue
- }
- // Handle peer registration requests.
- entry := req.entry
- entry.state = psRegistered
- entry.regTime = mclock.Now()
- if !entry.known {
- pool.newQueue.remove(entry)
- entry.known = true
- }
- pool.knownQueue.setLatest(entry)
- entry.shortRetry = shortRetryCnt
- close(req.done)
-
- case req := <-pool.disconnCh:
- if req.entry.trusted {
- continue
- }
- // Handle peer disconnection requests.
- disconnect(req, req.stopped)
-
- case <-pool.closeCh:
- if pool.discSetPeriod != nil {
- close(pool.discSetPeriod)
- }
-
- // Spawn a goroutine to close the disconnCh after all connections are disconnected.
- go func() {
- pool.connWg.Wait()
- close(pool.disconnCh)
+ s.ns.SetState(n, nodestate.Flags{}, sfQueried, 0)
}()
-
- // Handle all remaining disconnection requests before exit.
- for req := range pool.disconnCh {
- disconnect(req, true)
- }
- pool.saveNodes()
- return
- }
- }
-}
-
-func (pool *serverPool) findOrNewNode(node *enode.Node) *poolEntry {
- now := mclock.Now()
- entry := pool.entries[node.ID()]
- if entry == nil {
- log.Debug("Discovered new entry", "id", node.ID())
- entry = &poolEntry{
- node: node,
- addr: make(map[string]*poolEntryAddress),
- addrSelect: *utils.NewWeightedRandomSelect(),
- shortRetry: shortRetryCnt,
}
- pool.entries[node.ID()] = entry
- // initialize previously unknown peers with good statistics to give a chance to prove themselves
- entry.connectStats.add(1, initStatsWeight)
- entry.delayStats.add(0, initStatsWeight)
- entry.responseStats.add(0, initStatsWeight)
- entry.timeoutStats.add(0, initStatsWeight)
- }
- entry.lastDiscovered = now
- addr := &poolEntryAddress{ip: node.IP(), port: uint16(node.TCP())}
- if a, ok := entry.addr[addr.strKey()]; ok {
- addr = a
- } else {
- entry.addr[addr.strKey()] = addr
- }
- addr.lastSeen = now
- entry.addrSelect.Update(addr)
- if !entry.known {
- pool.newQueue.setLatest(entry)
- }
- return entry
-}
-
-// loadNodes loads known nodes and their statistics from the database
-func (pool *serverPool) loadNodes() {
- enc, err := pool.db.Get(pool.dbKey)
- if err != nil {
- return
- }
- var list []*poolEntry
- err = rlp.DecodeBytes(enc, &list)
- if err != nil {
- log.Debug("Failed to decode node list", "err", err)
- return
- }
- for _, e := range list {
- log.Debug("Loaded server stats", "id", e.node.ID(), "fails", e.lastConnected.fails,
- "conn", fmt.Sprintf("%v/%v", e.connectStats.avg, e.connectStats.weight),
- "delay", fmt.Sprintf("%v/%v", time.Duration(e.delayStats.avg), e.delayStats.weight),
- "response", fmt.Sprintf("%v/%v", time.Duration(e.responseStats.avg), e.responseStats.weight),
- "timeout", fmt.Sprintf("%v/%v", e.timeoutStats.avg, e.timeoutStats.weight))
- pool.entries[e.node.ID()] = e
- if pool.trustedNodes[e.node.ID()] == nil {
- pool.knownQueue.setLatest(e)
- pool.knownSelect.Update((*knownEntry)(e))
- }
- }
-}
-
-// connectToTrustedNodes adds trusted server nodes as static trusted peers.
-//
-// Note: trusted nodes are not handled by the server pool logic, they are not
-// added to either the known or new selection pools. They are connected/reconnected
-// by p2p.Server whenever possible.
-func (pool *serverPool) connectToTrustedNodes() {
- //connect to trusted nodes
- for _, node := range pool.trustedNodes {
- pool.server.AddTrustedPeer(node)
- pool.server.AddPeer(node)
- log.Debug("Added trusted node", "id", node.ID().String())
- }
-}
-
-// parseTrustedNodes returns valid and parsed enodes
-func parseTrustedNodes(trustedNodes []string) map[enode.ID]*enode.Node {
- nodes := make(map[enode.ID]*enode.Node)
-
- for _, node := range trustedNodes {
- node, err := enode.Parse(enode.ValidSchemes, node)
- if err != nil {
- log.Warn("Trusted node URL invalid", "enode", node, "err", err)
- continue
- }
- nodes[node.ID()] = node
- }
- return nodes
-}
-
-// saveNodes saves known nodes and their statistics into the database. Nodes are
-// ordered from least to most recently connected.
-func (pool *serverPool) saveNodes() {
- list := make([]*poolEntry, len(pool.knownQueue.queue))
- for i := range list {
- list[i] = pool.knownQueue.fetchOldest()
- }
- enc, err := rlp.EncodeToBytes(list)
- if err == nil {
- pool.db.Put(pool.dbKey, enc)
- }
-}
-
-// removeEntry removes a pool entry when the entry count limit is reached.
-// Note that it is called by the new/known queues from which the entry has already
-// been removed so removing it from the queues is not necessary.
-func (pool *serverPool) removeEntry(entry *poolEntry) {
- pool.newSelect.Remove((*discoveredEntry)(entry))
- pool.knownSelect.Remove((*knownEntry)(entry))
- entry.removed = true
- delete(pool.entries, entry.node.ID())
-}
-
-// setRetryDial starts the timer which will enable dialing a certain node again
-func (pool *serverPool) setRetryDial(entry *poolEntry) {
- delay := longRetryDelay
- if entry.shortRetry > 0 {
- entry.shortRetry--
- delay = shortRetryDelay
- }
- delay += time.Duration(rand.Int63n(int64(delay) + 1))
- entry.delayedRetry = true
- go func() {
- select {
- case <-pool.closeCh:
- case <-time.After(delay):
- select {
- case <-pool.closeCh:
- case pool.enableRetry <- entry:
- }
+ })
+ return lpc.NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) {
+ if waiting {
+ s.fillSet.SetTarget(preNegLimit)
+ } else {
+ s.fillSet.SetTarget(0)
}
- }()
-}
-
-// updateCheckDial is called when an entry can potentially be dialed again. It updates
-// its selection weights and checks if new dials can/should be made.
-func (pool *serverPool) updateCheckDial(entry *poolEntry) {
- pool.newSelect.Update((*discoveredEntry)(entry))
- pool.knownSelect.Update((*knownEntry)(entry))
- pool.checkDial()
+ })
}
-// checkDial checks if new dials can/should be made. It tries to select servers both
-// based on good statistics and recent discovery.
-func (pool *serverPool) checkDial() {
- fillWithKnownSelects := !pool.fastDiscover
- for pool.knownSelected < targetKnownSelect {
- entry := pool.knownSelect.Choose()
- if entry == nil {
- fillWithKnownSelects = false
- break
- }
- pool.dial((*poolEntry)(entry.(*knownEntry)), true)
- }
- for pool.knownSelected+pool.newSelected < targetServerCount {
- entry := pool.newSelect.Choose()
- if entry == nil {
- break
- }
- pool.dial((*poolEntry)(entry.(*discoveredEntry)), false)
+// start starts the server pool. Note that NodeStateMachine should be started first.
+func (s *serverPool) start() {
+ s.ns.Start()
+ for _, iter := range s.mixSources {
+ // add sources to mixer at startup because the mixer instantly tries to read them
+ // which should only happen after NodeStateMachine has been started
+ s.mixer.AddSource(iter)
}
- if fillWithKnownSelects {
- // no more newly discovered nodes to select and since fast discover period
- // is over, we probably won't find more in the near future so select more
- // known entries if possible
- for pool.knownSelected < targetServerCount {
- entry := pool.knownSelect.Choose()
- if entry == nil {
- break
- }
- pool.dial((*poolEntry)(entry.(*knownEntry)), true)
+ for _, url := range s.trustedURLs {
+ if node, err := enode.Parse(s.validSchemes, url); err == nil {
+ s.ns.SetState(node, sfAlwaysConnect, nodestate.Flags{}, 0)
+ } else {
+ log.Error("Invalid trusted server URL", "url", url, "error", err)
}
}
-}
-
-// dial initiates a new connection
-func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
- if pool.server == nil || entry.state != psNotConnected {
- return
- }
- entry.state = psDialed
- entry.knownSelected = knownSelected
- if knownSelected {
- pool.knownSelected++
- } else {
- pool.newSelected++
- }
- addr := entry.addrSelect.Choose().(*poolEntryAddress)
- log.Debug("Dialing new peer", "lesaddr", entry.node.ID().String()+"@"+addr.strKey(), "set", len(entry.addr), "known", knownSelected)
- entry.dialed = addr
- go func() {
- pool.server.AddPeer(entry.node)
- select {
- case <-pool.closeCh:
- case <-time.After(dialTimeout):
- select {
- case <-pool.closeCh:
- case pool.timeout <- entry:
+ unixTime := s.unixTime()
+ s.ns.ForEach(sfHasValue, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
+ s.calculateWeight(node)
+ if n, ok := s.ns.GetField(node, sfiNodeHistory).(nodeHistory); ok && n.redialWaitEnd > unixTime {
+ wait := n.redialWaitEnd - unixTime
+ lastWait := n.redialWaitEnd - n.redialWaitStart
+ if wait > lastWait {
+ // if the time until expiration is larger than the last suggested
+ // waiting time then the system clock was probably adjusted
+ wait = lastWait
}
+ s.ns.SetState(node, sfRedialWait, nodestate.Flags{}, time.Duration(wait)*time.Second)
}
- }()
-}
-
-// checkDialTimeout checks if the node is still in dialed state and if so, resets it
-// and adjusts connection statistics accordingly.
-func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
- if entry.state != psDialed {
- return
- }
- log.Debug("Dial timeout", "lesaddr", entry.node.ID().String()+"@"+entry.dialed.strKey())
- entry.state = psNotConnected
- if entry.knownSelected {
- pool.knownSelected--
- } else {
- pool.newSelected--
- }
- entry.connectStats.add(0, 1)
- entry.dialed.fails++
- pool.setRetryDial(entry)
-}
-
-const (
- psNotConnected = iota
- psDialed
- psConnected
- psRegistered
-)
-
-// poolEntry represents a server node and stores its current state and statistics.
-type poolEntry struct {
- peer *serverPeer
- pubkey [56]byte // ed448 key of the node
- addr map[string]*poolEntryAddress
- node *enode.Node
- lastConnected, dialed *poolEntryAddress
- addrSelect utils.WeightedRandomSelect
-
- lastDiscovered mclock.AbsTime
- known, knownSelected, trusted bool
- connectStats, delayStats poolStats
- responseStats, timeoutStats poolStats
- state int
- regTime mclock.AbsTime
- queueIdx int
- removed bool
-
- delayedRetry bool
- shortRetry int
-}
-
-// poolEntryEnc is the RLP encoding of poolEntry.
-type poolEntryEnc struct {
- Pubkey []byte
- IP net.IP
- Port uint16
- Fails uint
- CStat, DStat, RStat, TStat poolStats
-}
-
-func (e *poolEntry) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, &poolEntryEnc{
- Pubkey: encodePubkey64(e.node.Pubkey()),
- IP: e.lastConnected.ip,
- Port: e.lastConnected.port,
- Fails: e.lastConnected.fails,
- CStat: e.connectStats,
- DStat: e.delayStats,
- RStat: e.responseStats,
- TStat: e.timeoutStats,
})
}
-func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
- var entry poolEntryEnc
- if err := s.Decode(&entry); err != nil {
- return err
- }
- pubkey, err := decodePubkey64(entry.Pubkey)
- if err != nil {
- return err
- }
- addr := &poolEntryAddress{ip: entry.IP, port: entry.Port, fails: entry.Fails, lastSeen: mclock.Now()}
- e.node = enode.NewV4(pubkey, entry.IP, int(entry.Port), int(entry.Port))
- e.addr = make(map[string]*poolEntryAddress)
- e.addr[addr.strKey()] = addr
- e.addrSelect = *utils.NewWeightedRandomSelect()
- e.addrSelect.Update(addr)
- e.lastConnected = addr
- e.connectStats = entry.CStat
- e.delayStats = entry.DStat
- e.responseStats = entry.RStat
- e.timeoutStats = entry.TStat
- e.shortRetry = shortRetryCnt
- e.known = true
- return nil
-}
-
-func encodePubkey64(pub *eddsa.PublicKey) []byte {
- return crypto.FromEDDSAPub(pub)
-}
-
-func decodePubkey64(b []byte) (*eddsa.PublicKey, error) {
- return crypto.UnmarshalPubkey(b)
-}
-
-// discoveredEntry implements wrsItem
-type discoveredEntry poolEntry
-
-// Weight calculates random selection weight for newly discovered entries
-func (e *discoveredEntry) Weight() int64 {
- if e.state != psNotConnected || e.delayedRetry {
- return 0
+// stop stops the server pool
+func (s *serverPool) stop() {
+ s.dialIterator.Close()
+ if s.fillSet != nil {
+ s.fillSet.Close()
}
- t := time.Duration(mclock.Now() - e.lastDiscovered)
- if t <= discoverExpireStart {
- return 1000000000
+ s.ns.ForEach(sfConnected, nodestate.Flags{}, func(n *enode.Node, state nodestate.Flags) {
+ // recalculate weight of connected nodes in order to update hasValue flag if necessary
+ s.calculateWeight(n)
+ })
+ s.ns.Stop()
+}
+
+// registerPeer implements serverPeerSubscriber
+func (s *serverPool) registerPeer(p *serverPeer) {
+ s.ns.SetState(p.Node(), sfConnected, sfDialing.Or(sfWaitDialTimeout), 0)
+ nvt := s.vt.Register(p.ID())
+ s.ns.SetField(p.Node(), sfiConnectedStats, nvt.RtStats())
+ p.setValueTracker(s.vt, nvt)
+ p.updateVtParams()
+}
+
+// unregisterPeer implements serverPeerSubscriber
+func (s *serverPool) unregisterPeer(p *serverPeer) {
+ s.setRedialWait(p.Node(), dialCost, dialWaitStep)
+ s.ns.SetState(p.Node(), nodestate.Flags{}, sfConnected, 0)
+ s.ns.SetField(p.Node(), sfiConnectedStats, nil)
+ s.vt.Unregister(p.ID())
+ p.setValueTracker(nil, nil)
+}
+
+// recalTimeout calculates the current recommended timeout. This value is used by
+// the client as a "soft timeout" value. It also affects the service value calculation
+// of individual nodes.
+func (s *serverPool) recalTimeout() {
+ // Use cached result if possible, avoid recalculating too frequently.
+ s.timeoutLock.RLock()
+ refreshed := s.timeoutRefreshed
+ s.timeoutLock.RUnlock()
+ now := s.clock.Now()
+ if refreshed != 0 && time.Duration(now-refreshed) < timeoutRefresh {
+ return
}
- return int64(1000000000 * math.Exp(-float64(t-discoverExpireStart)/float64(discoverExpireConst)))
-}
+ // Cached result is stale, recalculate a new one.
+ rts := s.vt.RtStats()
-// knownEntry implements wrsItem
-type knownEntry poolEntry
-
-// Weight calculates random selection weight for known entries
-func (e *knownEntry) Weight() int64 {
- if e.state != psNotConnected || !e.known || e.delayedRetry {
- return 0
+ // Add a fake statistic here. It is an easy way to initialize with some
+ // conservative values when the database is new. As soon as we have a
+ // considerable amount of real stats this small value won't matter.
+ rts.Add(time.Second*2, 10, s.vt.StatsExpFactor())
+ // Use either 10% failure rate timeout or twice the median response time
+ // as the recommended timeout.
+ timeout := minTimeout
+ if t := rts.Timeout(0.1); t > timeout {
+ timeout = t
}
- return int64(1000000000 * e.connectStats.recentAvg() * math.Exp(-float64(e.lastConnected.fails)*failDropLn-e.responseStats.recentAvg()/float64(responseScoreTC)-e.delayStats.recentAvg()/float64(delayScoreTC)) * math.Pow(1-e.timeoutStats.recentAvg(), timeoutPow))
-}
-
-// poolEntryAddress is a separate object because currently it is necessary to remember
-// multiple potential network addresses for a pool entry. This will be removed after
-// the final implementation of v5 discovery which will retrieve signed and serial
-// numbered advertisements, making it clear which IP/port is the latest one.
-type poolEntryAddress struct {
- ip net.IP
- port uint16
- lastSeen mclock.AbsTime // last time it was discovered, connected or loaded from db
- fails uint // connection failures since last successful connection (persistent)
-}
-
-func (a *poolEntryAddress) Weight() int64 {
- t := time.Duration(mclock.Now() - a.lastSeen)
- return int64(1000000*math.Exp(-float64(t)/float64(discoverExpireConst)-float64(a.fails)*addrFailDropLn)) + 1
-}
-
-func (a *poolEntryAddress) strKey() string {
- return a.ip.String() + ":" + strconv.Itoa(int(a.port))
-}
-
-// poolStats implement statistics for a certain quantity with a long term average
-// and a short term value which is adjusted exponentially with a factor of
-// pstatRecentAdjust with each update and also returned exponentially to the
-// average with the time constant pstatReturnToMeanTC
-type poolStats struct {
- sum, weight, avg, recent float64
- lastRecalc mclock.AbsTime
-}
+ if t := rts.Timeout(0.5) * 2; t > timeout {
+ timeout = t
-// init initializes stats with a long term sum/update count pair retrieved from the database
-func (s *poolStats) init(sum, weight float64) {
- s.sum = sum
- s.weight = weight
- var avg float64
- if weight > 0 {
- avg = s.sum / weight
}
- s.avg = avg
- s.recent = avg
- s.lastRecalc = mclock.Now()
-}
+ s.timeoutLock.Lock()
+ if s.timeout != timeout {
+ s.timeout = timeout
+ s.timeWeights = lpc.TimeoutWeights(s.timeout)
-// recalc recalculates recent value return-to-mean and long term average
-func (s *poolStats) recalc() {
- now := mclock.Now()
- s.recent = s.avg + (s.recent-s.avg)*math.Exp(-float64(now-s.lastRecalc)/float64(pstatReturnToMeanTC))
- if s.sum == 0 {
- s.avg = 0
- } else {
- if s.sum > s.weight*1e30 {
- s.avg = 1e30
- } else {
- s.avg = s.sum / s.weight
- }
+ suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond))
+ totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor())))
}
- s.lastRecalc = now
-}
-
-// add updates the stats with a new value
-func (s *poolStats) add(value, weight float64) {
- s.weight += weight
- s.sum += value * weight
- s.recalc()
+ s.timeoutRefreshed = now
+ s.timeoutLock.Unlock()
}
-// recentAvg returns the short-term adjusted average
-func (s *poolStats) recentAvg() float64 {
- s.recalc()
- return s.recent
+// getTimeout returns the recommended request timeout.
+func (s *serverPool) getTimeout() time.Duration {
+ s.recalTimeout()
+ s.timeoutLock.RLock()
+ defer s.timeoutLock.RUnlock()
+ return s.timeout
}
-func (s *poolStats) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, []interface{}{math.Float64bits(s.sum), math.Float64bits(s.weight)})
+// getTimeoutAndWeight returns the recommended request timeout as well as the
+// response time weight which is necessary to calculate service value.
+func (s *serverPool) getTimeoutAndWeight() (time.Duration, lpc.ResponseTimeWeights) {
+ s.recalTimeout()
+ s.timeoutLock.RLock()
+ defer s.timeoutLock.RUnlock()
+ return s.timeout, s.timeWeights
}
-func (s *poolStats) DecodeRLP(st *rlp.Stream) error {
- var stats struct {
- SumUint, WeightUint uint64
+// addDialCost adds the given amount of dial cost to the node history and returns the current
+// amount of total dial cost
+func (s *serverPool) addDialCost(n *nodeHistory, amount int64) uint64 {
+ logOffset := s.vt.StatsExpirer().LogOffset(s.clock.Now())
+ if amount > 0 {
+ n.dialCost.Add(amount, logOffset)
}
- if err := st.Decode(&stats); err != nil {
- return err
+ totalDialCost := n.dialCost.Value(logOffset)
+ if totalDialCost < dialCost {
+ totalDialCost = dialCost
}
- s.init(math.Float64frombits(stats.SumUint), math.Float64frombits(stats.WeightUint))
- return nil
-}
-
-// poolEntryQueue keeps track of its least recently accessed entries and removes
-// them when the number of entries reaches the limit
-type poolEntryQueue struct {
- queue map[int]*poolEntry // known nodes indexed by their latest lastConnCnt value
- newPtr, oldPtr, maxCnt int
- removeFromPool func(*poolEntry)
+ return totalDialCost
}
-// newPoolEntryQueue returns a new poolEntryQueue
-func newPoolEntryQueue(maxCnt int, removeFromPool func(*poolEntry)) poolEntryQueue {
- return poolEntryQueue{queue: make(map[int]*poolEntry), maxCnt: maxCnt, removeFromPool: removeFromPool}
-}
-
-// fetchOldest returns and removes the least recently accessed entry
-func (q *poolEntryQueue) fetchOldest() *poolEntry {
- if len(q.queue) == 0 {
- return nil
- }
- for {
- if e := q.queue[q.oldPtr]; e != nil {
- delete(q.queue, q.oldPtr)
- q.oldPtr++
- return e
- }
- q.oldPtr++
+// serviceValue returns the service value accumulated in this session and in total
+func (s *serverPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) {
+ nvt := s.vt.GetNode(node.ID())
+ if nvt == nil {
+ return 0, 0
}
-}
+ currentStats := nvt.RtStats()
+ _, timeWeights := s.getTimeoutAndWeight()
+ expFactor := s.vt.StatsExpFactor()
-// remove removes an entry from the queue
-func (q *poolEntryQueue) remove(entry *poolEntry) {
- if q.queue[entry.queueIdx] == entry {
- delete(q.queue, entry.queueIdx)
+ totalValue = currentStats.Value(timeWeights, expFactor)
+ if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(lpc.ResponseTimeStats); ok {
+ diff := currentStats
+ diff.SubStats(&connStats)
+ sessionValue = diff.Value(timeWeights, expFactor)
+ sessionValueMeter.Mark(int64(sessionValue))
}
+ return
}
-// setLatest adds or updates a recently accessed entry. It also checks if an old entry
-// needs to be removed and removes it from the parent pool too with a callback function.
-func (q *poolEntryQueue) setLatest(entry *poolEntry) {
- if q.queue[entry.queueIdx] == entry {
- delete(q.queue, entry.queueIdx)
+// updateWeight calculates the node weight and updates the nodeWeight field and the
+// hasValue flag. It also saves the node state if necessary.
+func (s *serverPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) {
+ weight := uint64(totalValue * nodeWeightMul / float64(totalDialCost))
+ if weight >= nodeWeightThreshold {
+ s.ns.SetState(node, sfHasValue, nodestate.Flags{}, 0)
+ s.ns.SetField(node, sfiNodeWeight, weight)
} else {
- if len(q.queue) == q.maxCnt {
- e := q.fetchOldest()
- q.remove(e)
- q.removeFromPool(e)
- }
- }
- entry.queueIdx = q.newPtr
- q.queue[entry.queueIdx] = entry
- q.newPtr++
+ s.ns.SetState(node, nodestate.Flags{}, sfHasValue, 0)
+ s.ns.SetField(node, sfiNodeWeight, nil)
+ }
+ s.ns.Persist(node) // saved if node history or hasValue changed
+}
+
+// setRedialWait calculates and sets the redialWait timeout based on the service value
+// and dial cost accumulated during the last session/attempt and in total.
+// The waiting time is raised exponentially if no service value has been received in order
+// to prevent dialing an unresponsive node frequently for a very long time just because it
+// was useful in the past. It can still be occasionally dialed though and once it provides
+// a significant amount of service value again its waiting time is quickly reduced or reset
+// to the minimum.
+// Note: node weight is also recalculated and updated by this function.
+func (s *serverPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) {
+ n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory)
+ sessionValue, totalValue := s.serviceValue(node)
+ totalDialCost := s.addDialCost(&n, addDialCost)
+
+ // if the current dial session has yielded at least the average value/dial cost ratio
+ // then the waiting time should be reset to the minimum. If the session value
+ // is below average but still positive then timeout is limited to the ratio of
+ // average / current service value multiplied by the minimum timeout. If the attempt
+ // was unsuccessful then timeout is raised exponentially without limitation.
+ // Note: dialCost is used in the formula below even if dial was not attempted at all
+ // because the pre-negotiation query did not return a positive result. In this case
+ // the ratio has no meaning anyway and waitFactor is always raised, though in smaller
+ // steps because queries are cheaper and therefore we can allow more failed attempts.
+ unixTime := s.unixTime()
+ plannedTimeout := float64(n.redialWaitEnd - n.redialWaitStart) // last planned redialWait timeout
+ var actualWait float64 // actual waiting time elapsed
+ if unixTime > n.redialWaitEnd {
+ // the planned timeout has elapsed
+ actualWait = plannedTimeout
+ } else {
+ // if the node was redialed earlier then we do not raise the planned timeout
+ // exponentially because that could lead to the timeout rising very high in
+ // a short amount of time
+ // Note that in case of an early redial actualWait also includes the dial
+ // timeout or connection time of the last attempt but it still serves its
+ // purpose of preventing the timeout rising quicker than linearly as a function
+ // of total time elapsed without a successful connection.
+ actualWait = float64(unixTime - n.redialWaitStart)
+ }
+ // raise timeout exponentially if the last planned timeout has elapsed
+ // (use at least the last planned timeout otherwise)
+ nextTimeout := actualWait * waitStep
+ if plannedTimeout > nextTimeout {
+ nextTimeout = plannedTimeout
+ }
+ // we reduce the waiting time if the server has provided service value during the
+ // connection (but never under the minimum)
+ a := totalValue * dialCost * float64(minRedialWait)
+ b := float64(totalDialCost) * sessionValue
+ if a < b*nextTimeout {
+ nextTimeout = a / b
+ }
+ if nextTimeout < minRedialWait {
+ nextTimeout = minRedialWait
+ }
+ wait := time.Duration(float64(time.Second) * nextTimeout)
+ if wait < waitThreshold {
+ n.redialWaitStart = unixTime
+ n.redialWaitEnd = unixTime + int64(nextTimeout)
+ s.ns.SetField(node, sfiNodeHistory, n)
+ s.ns.SetState(node, sfRedialWait, nodestate.Flags{}, wait)
+ s.updateWeight(node, totalValue, totalDialCost)
+ } else {
+ // discard known node statistics if waiting time is very long because the node
+ // hasn't been responsive for a very long time
+ s.ns.SetField(node, sfiNodeHistory, nil)
+ s.ns.SetField(node, sfiNodeWeight, nil)
+ s.ns.SetState(node, nodestate.Flags{}, sfHasValue, 0)
+ }
+}
+
+// calculateWeight calculates and sets the node weight without altering the node history.
+// This function should be called during startup and shutdown only, otherwise setRedialWait
+// will keep the weights updated as the underlying statistics are adjusted.
+func (s *serverPool) calculateWeight(node *enode.Node) {
+ n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory)
+ _, totalValue := s.serviceValue(node)
+ totalDialCost := s.addDialCost(&n, 0)
+ s.updateWeight(node, totalValue, totalDialCost)
}
diff --git a/les/serverpool_test.go b/les/serverpool_test.go
new file mode 100644
index 000000000..c0597b77f
--- /dev/null
+++ b/les/serverpool_test.go
@@ -0,0 +1,354 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package les
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ lpc "github.com/core-coin/go-core/les/lespay/client"
+ "github.com/core-coin/go-core/p2p"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/enr"
+ "github.com/core-coin/go-core/xcbdb"
+ "github.com/core-coin/go-core/xcbdb/memorydb"
+ "math/rand"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+const (
+ spTestNodes = 1000
+ spTestTarget = 5
+ spTestLength = 10000
+ spMinTotal = 40000
+ spMaxTotal = 50000
+)
+
+func testNodeID(i int) enode.ID {
+ return enode.ID{42, byte(i % 256), byte(i / 256)}
+}
+
+func testNodeIndex(id enode.ID) int {
+ if id[0] != 42 {
+ return -1
+ }
+ return int(id[1]) + int(id[2])*256
+}
+
+type serverPoolTest struct {
+ db xcbdb.KeyValueStore
+ clock *mclock.Simulated
+ quit chan struct{}
+ preNeg, preNegFail bool
+ vt *lpc.ValueTracker
+ sp *serverPool
+ input enode.Iterator
+ testNodes []spTestNode
+ trusted []string
+ waitCount, waitEnded int32
+
+ cycle, conn, servedConn int
+ serviceCycles, dialCount int
+ disconnect map[int][]int
+}
+
+type spTestNode struct {
+ connectCycles, waitCycles int
+ nextConnCycle, totalConn int
+ connected, service bool
+ peer *serverPeer
+}
+
+func newServerPoolTest(preNeg, preNegFail bool) *serverPoolTest {
+ nodes := make([]*enode.Node, spTestNodes)
+ for i := range nodes {
+ nodes[i] = enode.SignNull(&enr.Record{}, testNodeID(i))
+ }
+ return &serverPoolTest{
+ clock: &mclock.Simulated{},
+ db: memorydb.New(),
+ input: enode.CycleNodes(nodes),
+ testNodes: make([]spTestNode, spTestNodes),
+ preNeg: preNeg,
+ preNegFail: preNegFail,
+ }
+}
+
+func (s *serverPoolTest) beginWait() {
+ // ensure that dialIterator and the maximal number of pre-neg queries are not all stuck in a waiting state
+ for atomic.AddInt32(&s.waitCount, 1) > preNegLimit {
+ atomic.AddInt32(&s.waitCount, -1)
+ s.clock.Run(time.Second)
+ }
+}
+
+func (s *serverPoolTest) endWait() {
+ atomic.AddInt32(&s.waitCount, -1)
+ atomic.AddInt32(&s.waitEnded, 1)
+}
+
+func (s *serverPoolTest) addTrusted(i int) {
+ s.trusted = append(s.trusted, enode.SignNull(&enr.Record{}, testNodeID(i)).String())
+}
+
+func (s *serverPoolTest) start() {
+ var testQuery queryFunc
+ if s.preNeg {
+ testQuery = func(node *enode.Node) int {
+ idx := testNodeIndex(node.ID())
+ n := &s.testNodes[idx]
+ canConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle
+ if s.preNegFail {
+ // simulate a scenario where UDP queries never work
+ s.beginWait()
+ s.clock.Sleep(time.Second * 5)
+ s.endWait()
+ return -1
+ } else {
+ switch idx % 3 {
+ case 0:
+ // pre-neg returns true only if connection is possible
+ if canConnect {
+ return 1
+ } else {
+ return 0
+ }
+ case 1:
+ // pre-neg returns true but connection might still fail
+ return 1
+ case 2:
+ // pre-neg returns true if connection is possible, otherwise timeout (node unresponsive)
+ if canConnect {
+ return 1
+ } else {
+ s.beginWait()
+ s.clock.Sleep(time.Second * 5)
+ s.endWait()
+ return -1
+ }
+ }
+ return -1
+ }
+ }
+ }
+
+ s.vt = lpc.NewValueTracker(s.db, s.clock, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000))
+ s.sp = newServerPool(s.db, []byte("serverpool:"), s.vt, s.input, 0, testQuery, s.clock, s.trusted)
+ s.sp.validSchemes = enode.ValidSchemesForTesting
+ s.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) }
+ s.disconnect = make(map[int][]int)
+ s.sp.start()
+ s.quit = make(chan struct{})
+ go func() {
+ last := int32(-1)
+ for {
+ select {
+ case <-time.After(time.Millisecond * 100):
+ c := atomic.LoadInt32(&s.waitEnded)
+ if c == last {
+ // advance clock if test is stuck (might happen in rare cases)
+ s.clock.Run(time.Second)
+ }
+ last = c
+ case <-s.quit:
+ return
+ }
+ }
+ }()
+}
+
+func (s *serverPoolTest) stop() {
+ close(s.quit)
+ s.sp.stop()
+ s.vt.Stop()
+ for i := range s.testNodes {
+ n := &s.testNodes[i]
+ if n.connected {
+ n.totalConn += s.cycle
+ }
+ n.connected = false
+ n.peer = nil
+ n.nextConnCycle = 0
+ }
+ s.conn, s.servedConn = 0, 0
+}
+
+func (s *serverPoolTest) run() {
+ for count := spTestLength; count > 0; count-- {
+ if dcList := s.disconnect[s.cycle]; dcList != nil {
+ for _, idx := range dcList {
+ n := &s.testNodes[idx]
+ s.sp.unregisterPeer(n.peer)
+ n.totalConn += s.cycle
+ n.connected = false
+ n.peer = nil
+ s.conn--
+ if n.service {
+ s.servedConn--
+ }
+ n.nextConnCycle = s.cycle + n.waitCycles
+ }
+ delete(s.disconnect, s.cycle)
+ }
+ if s.conn < spTestTarget {
+ s.dialCount++
+ s.beginWait()
+ s.sp.dialIterator.Next()
+ s.endWait()
+ dial := s.sp.dialIterator.Node()
+ id := dial.ID()
+ idx := testNodeIndex(id)
+ n := &s.testNodes[idx]
+ if !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle {
+ s.conn++
+ if n.service {
+ s.servedConn++
+ }
+ n.totalConn -= s.cycle
+ n.connected = true
+ dc := s.cycle + n.connectCycles
+ s.disconnect[dc] = append(s.disconnect[dc], idx)
+ n.peer = &serverPeer{peerCommons: peerCommons{Peer: p2p.NewPeer(id, "", nil)}}
+ s.sp.registerPeer(n.peer)
+ if n.service {
+ s.vt.Served(s.vt.GetNode(id), []lpc.ServedRequest{{ReqType: 0, Amount: 100}}, 0)
+ }
+ }
+ }
+ s.serviceCycles += s.servedConn
+ s.clock.Run(time.Second)
+ s.cycle++
+ }
+}
+
+func (s *serverPoolTest) setNodes(count, conn, wait int, service, trusted bool) (res []int) {
+ for ; count > 0; count-- {
+ idx := rand.Intn(spTestNodes)
+ for s.testNodes[idx].connectCycles != 0 || s.testNodes[idx].connected {
+ idx = rand.Intn(spTestNodes)
+ }
+ res = append(res, idx)
+ s.testNodes[idx] = spTestNode{
+ connectCycles: conn,
+ waitCycles: wait,
+ service: service,
+ }
+ if trusted {
+ s.addTrusted(idx)
+ }
+ }
+ return
+}
+
+func (s *serverPoolTest) resetNodes() {
+ for i, n := range s.testNodes {
+ if n.connected {
+ n.totalConn += s.cycle
+ s.sp.unregisterPeer(n.peer)
+ }
+ s.testNodes[i] = spTestNode{totalConn: n.totalConn}
+ }
+ s.conn, s.servedConn = 0, 0
+ s.disconnect = make(map[int][]int)
+ s.trusted = nil
+}
+
+func (s *serverPoolTest) checkNodes(t *testing.T, nodes []int) {
+ var sum int
+ for _, idx := range nodes {
+ n := &s.testNodes[idx]
+ if n.connected {
+ n.totalConn += s.cycle
+ }
+ sum += n.totalConn
+ n.totalConn = 0
+ if n.connected {
+ n.totalConn -= s.cycle
+ }
+ }
+ if sum < spMinTotal || sum > spMaxTotal {
+ t.Errorf("Total connection amount %d outside expected range %d to %d", sum, spMinTotal, spMaxTotal)
+ }
+}
+
+func TestServerPool(t *testing.T) { testServerPool(t, false, false) }
+func TestServerPoolWithPreNeg(t *testing.T) { testServerPool(t, true, false) }
+func TestServerPoolWithPreNegFail(t *testing.T) { testServerPool(t, true, true) }
+func testServerPool(t *testing.T, preNeg, fail bool) {
+ t.Skip("skip long-running tests")
+ s := newServerPoolTest(preNeg, fail)
+ nodes := s.setNodes(100, 200, 200, true, false)
+ s.setNodes(100, 20, 20, false, false)
+ s.start()
+ s.run()
+ s.stop()
+ s.checkNodes(t, nodes)
+}
+
+func TestServerPoolChangedNodes(t *testing.T) { testServerPoolChangedNodes(t, false) }
+func TestServerPoolChangedNodesWithPreNeg(t *testing.T) { testServerPoolChangedNodes(t, true) }
+func testServerPoolChangedNodes(t *testing.T, preNeg bool) {
+ t.Skip("skip long-running tests")
+ s := newServerPoolTest(preNeg, false)
+ nodes := s.setNodes(100, 200, 200, true, false)
+ s.setNodes(100, 20, 20, false, false)
+ s.start()
+ s.run()
+ s.checkNodes(t, nodes)
+ for i := 0; i < 3; i++ {
+ s.resetNodes()
+ nodes := s.setNodes(100, 200, 200, true, false)
+ s.setNodes(100, 20, 20, false, false)
+ s.run()
+ s.checkNodes(t, nodes)
+ }
+ s.stop()
+}
+
+func TestServerPoolRestartNoDiscovery(t *testing.T) { testServerPoolRestartNoDiscovery(t, false) }
+func TestServerPoolRestartNoDiscoveryWithPreNeg(t *testing.T) {
+ t.Skip("skip long-running tests")
+ testServerPoolRestartNoDiscovery(t, true)
+}
+func testServerPoolRestartNoDiscovery(t *testing.T, preNeg bool) {
+ s := newServerPoolTest(preNeg, false)
+ nodes := s.setNodes(100, 200, 200, true, false)
+ s.setNodes(100, 20, 20, false, false)
+ s.start()
+ s.run()
+ s.stop()
+ s.checkNodes(t, nodes)
+ s.input = nil
+ s.start()
+ s.run()
+ s.stop()
+ s.checkNodes(t, nodes)
+}
+
+func TestServerPoolTrustedNoDiscovery(t *testing.T) { testServerPoolTrustedNoDiscovery(t, false) }
+func TestServerPoolTrustedNoDiscoveryWithPreNeg(t *testing.T) {
+ testServerPoolTrustedNoDiscovery(t, true)
+}
+func testServerPoolTrustedNoDiscovery(t *testing.T, preNeg bool) {
+ s := newServerPoolTest(preNeg, false)
+ trusted := s.setNodes(200, 200, 200, true, true)
+ s.input = nil
+ s.start()
+ s.run()
+ s.stop()
+ s.checkNodes(t, trusted)
+}
diff --git a/les/sync_test.go b/les/sync_test.go
index 1560eec84..dace3ef41 100644
--- a/les/sync_test.go
+++ b/les/sync_test.go
@@ -53,7 +53,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { //TODO: T
if cs >= 1 && bts >= 1 {
break
}
- time.Sleep(10 * time.Millisecond)
+ time.Sleep(20 * time.Millisecond)
}
}
// Generate 512+4 blocks (totally 1 CHT sections)
@@ -92,7 +92,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { //TODO: T
for {
_, hash, _, err := server.handler.server.oracle.Contract().Contract().GetLatestCheckpoint(nil)
if err != nil || hash == [32]byte{} {
- time.Sleep(10 * time.Millisecond)
+ time.Sleep(20 * time.Millisecond)
continue
}
break
@@ -125,7 +125,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { //TODO: T
t.Error("sync failed", err)
}
return
- case <-time.NewTimer(10 * time.Second).C:
+ case <-time.NewTimer(25 * time.Second).C:
t.Error("checkpoint syncing timeout")
}
}
diff --git a/les/test_helper.go b/les/test_helper.go
index 76138d0db..0f26c0aa4 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -228,6 +228,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
if client.oracle != nil {
client.oracle.Start(backend)
}
+ client.handler.start()
return client.handler
}
@@ -513,7 +514,7 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer
clock = &mclock.Simulated{}
}
dist := newRequestDistributor(speers, clock)
- rm := newRetrieveManager(speers, dist, nil)
+ rm := newRetrieveManager(speers, dist, func() time.Duration { return time.Millisecond * 500 })
odr := NewLesOdr(cdb, light.TestClientIndexerConfig, rm)
sindexers := testIndexers(sdb, nil, light.TestServerIndexerConfig, true)
@@ -547,8 +548,8 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer
}
select {
case <-done:
- case <-time.After(10 * time.Second):
- t.Fatal("test peer did not connect and sync within 3s")
+ case <-time.After(20 * time.Second):
+ t.Fatal("test peer did not connect and sync within 20s")
}
}
s := &testServer{
diff --git a/les/txrelay.go b/les/txrelay.go
index 06dd4d6a5..20ae8cc0e 100644
--- a/les/txrelay.go
+++ b/les/txrelay.go
@@ -144,7 +144,7 @@ func (ltrx *lesTxRelay) send(txs types.Transactions, count int) {
peer := dp.(*serverPeer)
cost := peer.getTxRelayCost(len(ll), len(enc))
peer.fcServer.QueuedRequest(reqID, cost)
- return func() { peer.sendTxs(reqID, enc) }
+ return func() { peer.sendTxs(reqID, len(ll), enc) }
},
}
go ltrx.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, ltrx.stop)
diff --git a/les/ulc_test.go b/les/ulc_test.go
index 2257cc388..acfed4921 100644
--- a/les/ulc_test.go
+++ b/les/ulc_test.go
@@ -116,7 +116,7 @@ func connect(server *serverHandler, serverId enode.ID, client *clientHandler, pr
}()
select {
- case <-time.After(time.Millisecond * 100):
+ case <-time.After(time.Millisecond * 500):
case err := <-errc1:
return nil, nil, fmt.Errorf("peerLight handshake error: %v", err)
case err := <-errc2:
diff --git a/les/utils/expiredvalue.go b/les/utils/expiredvalue.go
new file mode 100644
index 000000000..16e997043
--- /dev/null
+++ b/les/utils/expiredvalue.go
@@ -0,0 +1,246 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package utils
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ "math"
+)
+
+// ExpiredValue is a scalar value that is continuously expired (decreased
+// exponentially) based on the provided logarithmic expiration offset value.
+//
+// The formula for value calculation is: base*2^(exp-logOffset). In order to
+// simplify the calculation of ExpiredValue, its value is expressed in the form
+// of an exponent with a base of 2.
+//
+// Also here is a trick to reduce a lot of calculations. In theory, when a value X
+// decays over time and then a new value Y is added, the final result should be
+// X*2^(exp-logOffset)+Y. However it's very hard to represent in memory.
+// So the trick is using the idea of inflation instead of exponential decay. At this
+// moment the temporary value becomes: X*2^exp+Y*2^logOffset_1, apply the exponential
+// decay when we actually want to calculate the value.
+//
+// e.g.
+// t0: V = 100
+// t1: add 30, inflationary value is: 100 + 30/0.3, 0.3 is the decay coefficient
+// t2: get value, decay coefficient is 0.2 now, final result is: 200*0.2 = 40
+type ExpiredValue struct {
+ Base, Exp uint64 // rlp encoding works by default
+}
+
+// ExpirationFactor is calculated from logOffset. 1 <= Factor < 2 and Factor*2^Exp
+// describes the multiplier applicable for additions and the divider for readouts.
+// If logOffset changes slowly then it saves some expensive operations to not calculate
+// them for each addition and readout but cache this intermediate form for some time.
+// It is also useful for structures where multiple values are expired with the same
+// Expirer.
+type ExpirationFactor struct {
+ Exp uint64
+ Factor float64
+}
+
+// ExpFactor calculates ExpirationFactor based on logOffset
+func ExpFactor(logOffset Fixed64) ExpirationFactor {
+ return ExpirationFactor{Exp: logOffset.ToUint64(), Factor: logOffset.Fraction().Pow2()}
+}
+
+// Value calculates the expired value based on a floating point base and integer
+// power-of-2 exponent. This function should be used by multi-value expired structures.
+func (e ExpirationFactor) Value(base float64, exp uint64) float64 {
+ return base / e.Factor * math.Pow(2, float64(int64(exp-e.Exp)))
+}
+
+// value calculates the value at the given moment.
+func (e ExpiredValue) Value(logOffset Fixed64) uint64 {
+ offset := Uint64ToFixed64(e.Exp) - logOffset
+ return uint64(float64(e.Base) * offset.Pow2())
+}
+
+// add adds a signed value at the given moment
+func (e *ExpiredValue) Add(amount int64, logOffset Fixed64) int64 {
+ integer, frac := logOffset.ToUint64(), logOffset.Fraction()
+ factor := frac.Pow2()
+ base := factor * float64(amount)
+ if integer < e.Exp {
+ base /= math.Pow(2, float64(e.Exp-integer))
+ }
+ if integer > e.Exp {
+ e.Base >>= (integer - e.Exp)
+ e.Exp = integer
+ }
+ if base >= 0 || uint64(-base) <= e.Base {
+ // The conversion from negative float64 to
+ // uint64 is undefined in golang, and doesn't
+ // work with ARMv8. More details at:
+ // https://github.com/golang/go/issues/43047
+ if base >= 0 {
+ e.Base += uint64(base)
+ } else {
+ e.Base -= uint64(-base)
+ }
+ return amount
+ }
+ net := int64(-float64(e.Base) / factor)
+ e.Base = 0
+ return net
+}
+
+// addExp adds another ExpiredValue
+func (e *ExpiredValue) AddExp(a ExpiredValue) {
+ if e.Exp > a.Exp {
+ a.Base >>= (e.Exp - a.Exp)
+ }
+ if e.Exp < a.Exp {
+ e.Base >>= (a.Exp - e.Exp)
+ e.Exp = a.Exp
+ }
+ e.Base += a.Base
+}
+
+// subExp subtracts another ExpiredValue
+func (e *ExpiredValue) SubExp(a ExpiredValue) {
+ if e.Exp > a.Exp {
+ a.Base >>= (e.Exp - a.Exp)
+ }
+ if e.Exp < a.Exp {
+ e.Base >>= (a.Exp - e.Exp)
+ e.Exp = a.Exp
+ }
+ if e.Base > a.Base {
+ e.Base -= a.Base
+ } else {
+ e.Base = 0
+ }
+}
+
+// LinearExpiredValue is very similar with the expiredValue which the value
+// will continuously expired. But the different part is it's expired linearly.
+type LinearExpiredValue struct {
+ Offset uint64 // The latest time offset
+ Val uint64 // The remaining value, can never be negative
+ Rate mclock.AbsTime `rlp:"-"` // Expiration rate(by nanosecond), will ignored by RLP
+}
+
+// value calculates the value at the given moment. This function always has the
+// assumption that the given timestamp shouldn't less than the recorded one.
+func (e LinearExpiredValue) Value(now mclock.AbsTime) uint64 {
+ offset := uint64(now / e.Rate)
+ if e.Offset < offset {
+ diff := offset - e.Offset
+ if e.Val >= diff {
+ e.Val -= diff
+ } else {
+ e.Val = 0
+ }
+ }
+ return e.Val
+}
+
+// add adds a signed value at the given moment. This function always has the
+// assumption that the given timestamp shouldn't less than the recorded one.
+func (e *LinearExpiredValue) Add(amount int64, now mclock.AbsTime) uint64 {
+ offset := uint64(now / e.Rate)
+ if e.Offset < offset {
+ diff := offset - e.Offset
+ if e.Val >= diff {
+ e.Val -= diff
+ } else {
+ e.Val = 0
+ }
+ e.Offset = offset
+ }
+ if amount < 0 && uint64(-amount) > e.Val {
+ e.Val = 0
+ } else {
+ e.Val = uint64(int64(e.Val) + amount)
+ }
+ return e.Val
+}
+
+// Expirer changes logOffset with a linear rate which can be changed during operation.
+// It is not thread safe, if access by multiple goroutines is needed then it should be
+// encapsulated into a locked structure.
+// Note that if neither SetRate nor SetLogOffset are used during operation then LogOffset
+// is thread safe.
+type Expirer struct {
+ logOffset Fixed64
+ rate float64
+ lastUpdate mclock.AbsTime
+}
+
+// SetRate changes the expiration rate which is the inverse of the time constant in
+// nanoseconds.
+func (e *Expirer) SetRate(now mclock.AbsTime, rate float64) {
+ dt := now - e.lastUpdate
+ if dt > 0 {
+ e.logOffset += Fixed64(logToFixedFactor * float64(dt) * e.rate)
+ }
+ e.lastUpdate = now
+ e.rate = rate
+}
+
+// SetLogOffset sets logOffset instantly.
+func (e *Expirer) SetLogOffset(now mclock.AbsTime, logOffset Fixed64) {
+ e.lastUpdate = now
+ e.logOffset = logOffset
+}
+
+// LogOffset returns the current logarithmic offset.
+func (e *Expirer) LogOffset(now mclock.AbsTime) Fixed64 {
+ dt := now - e.lastUpdate
+ if dt <= 0 {
+ return e.logOffset
+ }
+ return e.logOffset + Fixed64(logToFixedFactor*float64(dt)*e.rate)
+}
+
+// fixedFactor is the fixed point multiplier factor used by Fixed64.
+const fixedFactor = 0x1000000
+
+// Fixed64 implements 64-bit fixed point arithmetic functions.
+type Fixed64 int64
+
+// Uint64ToFixed64 converts uint64 integer to Fixed64 format.
+func Uint64ToFixed64(f uint64) Fixed64 {
+ return Fixed64(f * fixedFactor)
+}
+
+// float64ToFixed64 converts float64 to Fixed64 format.
+func Float64ToFixed64(f float64) Fixed64 {
+ return Fixed64(f * fixedFactor)
+}
+
+// toUint64 converts Fixed64 format to uint64.
+func (f64 Fixed64) ToUint64() uint64 {
+ return uint64(f64) / fixedFactor
+}
+
+// fraction returns the fractional part of a Fixed64 value.
+func (f64 Fixed64) Fraction() Fixed64 {
+ return f64 % fixedFactor
+}
+
+var (
+ logToFixedFactor = float64(fixedFactor) / math.Log(2)
+ fixedToLogFactor = math.Log(2) / float64(fixedFactor)
+)
+
+// pow2Fixed returns the base 2 power of the fixed point value.
+func (f64 Fixed64) Pow2() float64 {
+ return math.Exp(float64(f64) * fixedToLogFactor)
+}
diff --git a/les/utils/expiredvalue_test.go b/les/utils/expiredvalue_test.go
new file mode 100644
index 000000000..beefbb681
--- /dev/null
+++ b/les/utils/expiredvalue_test.go
@@ -0,0 +1,194 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package utils
+
+import (
+ "github.com/core-coin/go-core/common/mclock"
+ "testing"
+)
+
+func TestValueExpiration(t *testing.T) {
+ var cases = []struct {
+ input ExpiredValue
+ timeOffset Fixed64
+ expect uint64
+ }{
+ {ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 128},
+ {ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 64},
+ {ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(2), 32},
+ {ExpiredValue{Base: 128, Exp: 2}, Uint64ToFixed64(2), 128},
+ {ExpiredValue{Base: 128, Exp: 2}, Uint64ToFixed64(3), 64},
+ }
+ for _, c := range cases {
+ if got := c.input.Value(c.timeOffset); got != c.expect {
+ t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
+ }
+ }
+}
+
+func TestValueAddition(t *testing.T) {
+ var cases = []struct {
+ input ExpiredValue
+ addend int64
+ timeOffset Fixed64
+ expect uint64
+ expectNet int64
+ }{
+ // Addition
+ {ExpiredValue{Base: 128, Exp: 0}, 128, Uint64ToFixed64(0), 256, 128},
+ {ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(0), 640, 128},
+
+ // Addition with offset
+ {ExpiredValue{Base: 128, Exp: 0}, 128, Uint64ToFixed64(1), 192, 128},
+ {ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(1), 384, 128},
+ {ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(3), 192, 128},
+
+ // Subtraction
+ {ExpiredValue{Base: 128, Exp: 0}, -64, Uint64ToFixed64(0), 64, -64},
+ {ExpiredValue{Base: 128, Exp: 0}, -128, Uint64ToFixed64(0), 0, -128},
+ {ExpiredValue{Base: 128, Exp: 0}, -192, Uint64ToFixed64(0), 0, -128},
+
+ // Subtraction with offset
+ {ExpiredValue{Base: 128, Exp: 0}, -64, Uint64ToFixed64(1), 0, -64},
+ {ExpiredValue{Base: 128, Exp: 0}, -128, Uint64ToFixed64(1), 0, -64},
+ {ExpiredValue{Base: 128, Exp: 2}, -128, Uint64ToFixed64(1), 128, -128},
+ {ExpiredValue{Base: 128, Exp: 2}, -128, Uint64ToFixed64(2), 0, -128},
+ }
+ for _, c := range cases {
+ if net := c.input.Add(c.addend, c.timeOffset); net != c.expectNet {
+ t.Fatalf("Net amount mismatch, want=%d, got=%d", c.expectNet, net)
+ }
+ if got := c.input.Value(c.timeOffset); got != c.expect {
+ t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
+ }
+ }
+}
+
+func TestExpiredValueAddition(t *testing.T) {
+ var cases = []struct {
+ input ExpiredValue
+ another ExpiredValue
+ timeOffset Fixed64
+ expect uint64
+ }{
+ {ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 256},
+ {ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 384},
+ {ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 1}, Uint64ToFixed64(0), 384},
+ {ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 128},
+ }
+ for _, c := range cases {
+ c.input.AddExp(c.another)
+ if got := c.input.Value(c.timeOffset); got != c.expect {
+ t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
+ }
+ }
+}
+
+func TestExpiredValueSubtraction(t *testing.T) {
+ var cases = []struct {
+ input ExpiredValue
+ another ExpiredValue
+ timeOffset Fixed64
+ expect uint64
+ }{
+ {ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 0},
+ {ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 1}, Uint64ToFixed64(0), 0},
+ {ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 128},
+ {ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 64},
+ }
+ for _, c := range cases {
+ c.input.SubExp(c.another)
+ if got := c.input.Value(c.timeOffset); got != c.expect {
+ t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
+ }
+ }
+}
+
+func TestLinearExpiredValue(t *testing.T) {
+ var cases = []struct {
+ value LinearExpiredValue
+ now mclock.AbsTime
+ expect uint64
+ }{
+ {LinearExpiredValue{
+ Offset: 0,
+ Val: 0,
+ Rate: mclock.AbsTime(1),
+ }, 0, 0},
+
+ {LinearExpiredValue{
+ Offset: 1,
+ Val: 1,
+ Rate: mclock.AbsTime(1),
+ }, 0, 1},
+
+ {LinearExpiredValue{
+ Offset: 1,
+ Val: 1,
+ Rate: mclock.AbsTime(1),
+ }, mclock.AbsTime(2), 0},
+
+ {LinearExpiredValue{
+ Offset: 1,
+ Val: 1,
+ Rate: mclock.AbsTime(1),
+ }, mclock.AbsTime(3), 0},
+ }
+ for _, c := range cases {
+ if value := c.value.Value(c.now); value != c.expect {
+ t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value)
+ }
+ }
+}
+
+func TestLinearExpiredAddition(t *testing.T) {
+ var cases = []struct {
+ value LinearExpiredValue
+ amount int64
+ now mclock.AbsTime
+ expect uint64
+ }{
+ {LinearExpiredValue{
+ Offset: 0,
+ Val: 0,
+ Rate: mclock.AbsTime(1),
+ }, -1, 0, 0},
+
+ {LinearExpiredValue{
+ Offset: 1,
+ Val: 1,
+ Rate: mclock.AbsTime(1),
+ }, -1, 0, 0},
+
+ {LinearExpiredValue{
+ Offset: 1,
+ Val: 2,
+ Rate: mclock.AbsTime(1),
+ }, -1, mclock.AbsTime(2), 0},
+
+ {LinearExpiredValue{
+ Offset: 1,
+ Val: 2,
+ Rate: mclock.AbsTime(1),
+ }, -2, mclock.AbsTime(2), 0},
+ }
+ for _, c := range cases {
+ if value := c.value.Add(c.amount, c.now); value != c.expect {
+ t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value)
+ }
+ }
+}
diff --git a/les/utils/weighted_select.go b/les/utils/weighted_select.go
index dc8d055d3..4d3c8b842 100644
--- a/les/utils/weighted_select.go
+++ b/les/utils/weighted_select.go
@@ -18,26 +18,40 @@ package utils
import "math/rand"
-// wrsItem interface should be implemented by any entries that are to be selected from
-// a WeightedRandomSelect set. Note that recalculating monotonously decreasing item
-// weights on-demand (without constantly calling Update) is allowed
-type wrsItem interface {
- Weight() int64
+type (
+ // WeightedRandomSelect is capable of weighted random selection from a set of items
+ WeightedRandomSelect struct {
+ root *wrsNode
+ idx map[WrsItem]int
+ wfn WeightFn
+ }
+ WrsItem interface{}
+ WeightFn func(interface{}) uint64
+)
+
+// NewWeightedRandomSelect returns a new WeightedRandomSelect structure
+func NewWeightedRandomSelect(wfn WeightFn) *WeightedRandomSelect {
+ return &WeightedRandomSelect{root: &wrsNode{maxItems: wrsBranches}, idx: make(map[WrsItem]int), wfn: wfn}
}
-// WeightedRandomSelect is capable of weighted random selection from a set of items
-type WeightedRandomSelect struct {
- root *wrsNode
- idx map[wrsItem]int
+// Update updates an item's weight, adds it if it was non-existent or removes it if
+// the new weight is zero. Note that explicitly updating decreasing weights is not necessary.
+func (w *WeightedRandomSelect) Update(item WrsItem) {
+ w.setWeight(item, w.wfn(item))
}
-// NewWeightedRandomSelect returns a new WeightedRandomSelect structure
-func NewWeightedRandomSelect() *WeightedRandomSelect {
- return &WeightedRandomSelect{root: &wrsNode{maxItems: wrsBranches}, idx: make(map[wrsItem]int)}
+// Remove removes an item from the set
+func (w *WeightedRandomSelect) Remove(item WrsItem) {
+ w.setWeight(item, 0)
+}
+
+// IsEmpty returns true if the set is empty
+func (w *WeightedRandomSelect) IsEmpty() bool {
+ return w.root.sumWeight == 0
}
// setWeight sets an item's weight to a specific value (removes it if zero)
-func (w *WeightedRandomSelect) setWeight(item wrsItem, weight int64) {
+func (w *WeightedRandomSelect) setWeight(item WrsItem, weight uint64) {
idx, ok := w.idx[item]
if ok {
w.root.setWeight(idx, weight)
@@ -58,33 +72,22 @@ func (w *WeightedRandomSelect) setWeight(item wrsItem, weight int64) {
}
}
-// Update updates an item's weight, adds it if it was non-existent or removes it if
-// the new weight is zero. Note that explicitly updating decreasing weights is not necessary.
-func (w *WeightedRandomSelect) Update(item wrsItem) {
- w.setWeight(item, item.Weight())
-}
-
-// Remove removes an item from the set
-func (w *WeightedRandomSelect) Remove(item wrsItem) {
- w.setWeight(item, 0)
-}
-
// Choose randomly selects an item from the set, with a chance proportional to its
// current weight. If the weight of the chosen element has been decreased since the
// last stored value, returns it with a newWeight/oldWeight chance, otherwise just
// updates its weight and selects another one
-func (w *WeightedRandomSelect) Choose() wrsItem {
+func (w *WeightedRandomSelect) Choose() WrsItem {
for {
if w.root.sumWeight == 0 {
return nil
}
- val := rand.Int63n(w.root.sumWeight)
+ val := uint64(rand.Int63n(int64(w.root.sumWeight)))
choice, lastWeight := w.root.choose(val)
- weight := choice.Weight()
+ weight := w.wfn(choice)
if weight != lastWeight {
w.setWeight(choice, weight)
}
- if weight >= lastWeight || rand.Int63n(lastWeight) < weight {
+ if weight >= lastWeight || uint64(rand.Int63n(int64(lastWeight))) < weight {
return choice
}
}
@@ -92,16 +95,16 @@ func (w *WeightedRandomSelect) Choose() wrsItem {
const wrsBranches = 8 // max number of branches in the wrsNode tree
-// wrsNode is a node of a tree structure that can store wrsItems or further wrsNodes.
+// wrsNode is a node of a tree structure that can store WrsItems or further wrsNodes.
type wrsNode struct {
items [wrsBranches]interface{}
- weights [wrsBranches]int64
- sumWeight int64
+ weights [wrsBranches]uint64
+ sumWeight uint64
level, itemCnt, maxItems int
}
// insert recursively inserts a new item to the tree and returns the item index
-func (n *wrsNode) insert(item wrsItem, weight int64) int {
+func (n *wrsNode) insert(item WrsItem, weight uint64) int {
branch := 0
for n.items[branch] != nil && (n.level == 0 || n.items[branch].(*wrsNode).itemCnt == n.items[branch].(*wrsNode).maxItems) {
branch++
@@ -129,7 +132,7 @@ func (n *wrsNode) insert(item wrsItem, weight int64) int {
// setWeight updates the weight of a certain item (which should exist) and returns
// the change of the last weight value stored in the tree
-func (n *wrsNode) setWeight(idx int, weight int64) int64 {
+func (n *wrsNode) setWeight(idx int, weight uint64) uint64 {
if n.level == 0 {
oldWeight := n.weights[idx]
n.weights[idx] = weight
@@ -152,12 +155,12 @@ func (n *wrsNode) setWeight(idx int, weight int64) int64 {
return diff
}
-// Choose recursively selects an item from the tree and returns it along with its weight
-func (n *wrsNode) choose(val int64) (wrsItem, int64) {
+// choose recursively selects an item from the tree and returns it along with its weight
+func (n *wrsNode) choose(val uint64) (WrsItem, uint64) {
for i, w := range n.weights {
if val < w {
if n.level == 0 {
- return n.items[i].(wrsItem), n.weights[i]
+ return n.items[i].(WrsItem), n.weights[i]
}
return n.items[i].(*wrsNode).choose(val)
}
diff --git a/les/utils/weighted_select_test.go b/les/utils/weighted_select_test.go
index 3d7854dc0..4a23a4174 100644
--- a/les/utils/weighted_select_test.go
+++ b/les/utils/weighted_select_test.go
@@ -26,17 +26,18 @@ type testWrsItem struct {
widx *int
}
-func (t *testWrsItem) Weight() int64 {
+func testWeight(i interface{}) uint64 {
+ t := i.(*testWrsItem)
w := *t.widx
if w == -1 || w == t.idx {
- return int64(t.idx + 1)
+ return uint64(t.idx + 1)
}
return 0
}
func TestWeightedRandomSelect(t *testing.T) {
testFn := func(cnt int) {
- s := NewWeightedRandomSelect()
+ s := NewWeightedRandomSelect(testWeight)
w := -1
list := make([]testWrsItem, cnt)
for i := range list {
diff --git a/light/odr_test.go b/light/odr_test.go
index 079453698..df739c151 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -206,8 +206,8 @@ func odrContractCall(ctx context.Context, db xcbdb.Database, bc *core.BlockChain
context := core.NewCVMContext(msg, header, chain, nil)
vmenv := vm.NewCVM(context, st, config, vm.Config{})
gp := new(core.EnergyPool).AddEnergy(math.MaxUint64)
- ret, _, _, _ := core.ApplyMessage(vmenv, msg, gp)
- res = append(res, ret...)
+ result, _ := core.ApplyMessage(vmenv, msg, gp)
+ res = append(res, result.Return()...)
if st.Error() != nil {
return res, st.Error()
}
@@ -265,7 +265,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, cryptore.NewFullFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, cryptore.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, cryptore.NewFaker(), sdb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
t.Fatal(err)
diff --git a/light/postprocess.go b/light/postprocess.go
index 1becfdfeb..26ced5c66 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -227,7 +227,7 @@ func (c *ChtIndexerBackend) Commit() error {
c.trieset.Clear()
c.triedb.Commit(root, false, func(hash common.Hash) { c.trieset.Add(hash) })
- it := c.trieTable.NewIterator()
+ it := c.trieTable.NewIterator(nil, nil)
defer it.Release()
var (
@@ -464,7 +464,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
b.trieset.Clear()
b.triedb.Commit(root, false, func(hash common.Hash) { b.trieset.Add(hash) })
- it := b.trieTable.NewIterator()
+ it := b.trieTable.NewIterator(nil, nil)
defer it.Release()
var (
diff --git a/light/trie_test.go b/light/trie_test.go
index 3d4f26314..17f522099 100644
--- a/light/trie_test.go
+++ b/light/trie_test.go
@@ -22,7 +22,6 @@ import (
"fmt"
"testing"
- "github.com/davecgh/go-spew/spew"
"github.com/core-coin/go-core/consensus/cryptore"
"github.com/core-coin/go-core/core"
"github.com/core-coin/go-core/core/rawdb"
@@ -30,6 +29,7 @@ import (
"github.com/core-coin/go-core/core/vm"
"github.com/core-coin/go-core/params"
"github.com/core-coin/go-core/trie"
+ "github.com/davecgh/go-spew/spew"
)
func TestNodeIterator(t *testing.T) {
@@ -40,7 +40,7 @@ func TestNodeIterator(t *testing.T) {
genesis = gspec.MustCommit(fulldb)
)
gspec.MustCommit(lightdb)
- blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, cryptore.NewFullFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, cryptore.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, cryptore.NewFaker(), fulldb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
diff --git a/light/txpool_test.go b/light/txpool_test.go
index 60a5242c0..f6aa12f4b 100644
--- a/light/txpool_test.go
+++ b/light/txpool_test.go
@@ -88,7 +88,7 @@ func TestTxPool(t *testing.T) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, cryptore.NewFullFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, cryptore.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, cryptore.NewFaker(), sdb, poolTestBlocks, txPoolTestChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
diff --git a/miner/stress_clique.go b/miner/stress_clique.go
index 2387287b9..87cc0f307 100644
--- a/miner/stress_clique.go
+++ b/miner/stress_clique.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-core library. If not, see .
+//go:build none
// +build none
// This file contains a miner stress test based on the Clique consensus engine.
@@ -61,30 +62,30 @@ func main() {
genesis := makeGenesis(faucets, sealers)
var (
- nodes []*node.Node
+ nodes []*xcb.Core
enodes []*enode.Node
)
for _, sealer := range sealers {
// Start the node and wait until it's up
- node, err := makeSealer(genesis)
+ stack, xcbBackend, err := makeSealer(genesis)
if err != nil {
panic(err)
}
- defer node.Close()
+ defer stack.Close()
- for node.Server().NodeInfo().Ports.Listener == 0 {
+ for stack.Server().NodeInfo().Ports.Listener == 0 {
time.Sleep(250 * time.Millisecond)
}
- // Connect the node to al the previous ones
+ // Connect the node to all the previous ones
for _, n := range enodes {
- node.Server().AddPeer(n)
+ stack.Server().AddPeer(n)
}
// Start tracking the node and it's enode
- nodes = append(nodes, node)
- enodes = append(enodes, node.Server().Self())
+ nodes = append(nodes, xcbBackend)
+ enodes = append(enodes, stack.Server().Self())
// Inject the signer key and start sealing with it
- store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ store := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
signer, err := store.ImportEDDSA(sealer, "")
if err != nil {
panic(err)
@@ -93,15 +94,11 @@ func main() {
panic(err)
}
}
+
// Iterate over all the nodes and start signing with them
time.Sleep(3 * time.Second)
-
for _, node := range nodes {
- var core *xcb.Core
- if err := node.Service(&core); err != nil {
- panic(err)
- }
- if err := core.StartMining(1); err != nil {
+ if err := node.StartMining(1); err != nil {
panic(err)
}
}
@@ -110,25 +107,21 @@ func main() {
// Start injecting transactions from the faucet like crazy
nonces := make([]uint64, len(faucets))
for {
+ // Pick a random signer node
index := rand.Intn(len(faucets))
-
- // Fetch the accessor for the relevant signer
- var core *xcb.Core
- if err := nodes[index%len(nodes)].Service(&core); err != nil {
- panic(err)
- }
+ backend := nodes[index%len(nodes)]
// Create a self transaction and inject into the pool
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000), nil), types.NewNucleusSigner(genesis.Config.NetworkID), faucets[index])
if err != nil {
panic(err)
}
- if err := core.TxPool().AddLocal(tx); err != nil {
+ if err := backend.TxPool().AddLocal(tx); err != nil {
panic(err)
}
nonces[index]++
// Wait if we're too saturated
- if pend, _ := core.TxPool().Stats(); pend > 2048 {
+ if pend, _ := backend.TxPool().Stats(); pend > 2048 {
time.Sleep(100 * time.Millisecond)
}
}
@@ -171,7 +164,7 @@ func makeGenesis(faucets []*eddsa.PrivateKey, sealers []*eddsa.PrivateKey) *core
return genesis
}
-func makeSealer(genesis *core.Genesis) (*node.Node, error) {
+func makeSealer(genesis *core.Genesis) (*node.Node, *xcb.Core, error) {
// Define the basic configurations for the Core node
datadir, _ := ioutil.TempDir("", "")
@@ -189,27 +182,28 @@ func makeSealer(genesis *core.Genesis) (*node.Node, error) {
// Start the node and configure a full Core node on it
stack, err := node.New(config)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- return xcb.New(ctx, &xcb.Config{
- Genesis: genesis,
- NetworkId: genesis.Config.NetworkID.Uint64(),
- SyncMode: downloader.FullSync,
- DatabaseCache: 256,
- DatabaseHandles: 256,
- TxPool: core.DefaultTxPoolConfig,
- GPO: xcb.DefaultConfig.GPO,
- Miner: miner.Config{
- EnergyFloor: genesis.EnergyLimit * 9 / 10,
- EnergyCeil: genesis.EnergyLimit * 11 / 10,
- EnergyPrice: big.NewInt(1),
- Recommit: time.Second,
- },
- })
- }); err != nil {
- return nil, err
+ // Create and register the backend
+ xcbBackend, err := xcb.New(stack, &xcb.Config{
+ Genesis: genesis,
+ NetworkId: genesis.Config.NetworkID.Uint64(),
+ SyncMode: downloader.FullSync,
+ DatabaseCache: 256,
+ DatabaseHandles: 256,
+ TxPool: core.DefaultTxPoolConfig,
+ GPO: xcb.DefaultConfig.GPO,
+ Miner: miner.Config{
+ EnergyFloor: genesis.EnergyLimit * 9 / 10,
+ EnergyCeil: genesis.EnergyLimit * 11 / 10,
+ EnergyPrice: big.NewInt(1),
+ Recommit: time.Second,
+ },
+ })
+ if err != nil {
+ return nil, nil, err
}
- // Start the node and return if successful
- return stack, stack.Start()
+
+ err = stack.Start()
+ return stack, xcbBackend, err
}
diff --git a/miner/stress_cryptore.go b/miner/stress_cryptore.go
index 86211b342..19e8e4fe4 100644
--- a/miner/stress_cryptore.go
+++ b/miner/stress_cryptore.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-core library. If not, see .
+//go:build none
// +build none
// This file contains a miner stress test based on the Cryptore consensus engine.
@@ -56,43 +57,39 @@ func main() {
genesis := makeGenesis(faucets)
var (
- nodes []*node.Node
+ nodes []*xcb.Core
enodes []*enode.Node
)
for i := 0; i < 4; i++ {
// Start the node and wait until it's up
- node, err := makeMiner(genesis)
+ stack, xcbBackend, err := makeMiner(genesis)
if err != nil {
panic(err)
}
- defer node.Close()
+ defer stack.Close()
- for node.Server().NodeInfo().Ports.Listener == 0 {
+ for stack.Server().NodeInfo().Ports.Listener == 0 {
time.Sleep(250 * time.Millisecond)
}
- // Connect the node to al the previous ones
+ // Connect the node to all the previous ones
for _, n := range enodes {
- node.Server().AddPeer(n)
+ stack.Server().AddPeer(n)
}
// Start tracking the node and it's enode
- nodes = append(nodes, node)
- enodes = append(enodes, node.Server().Self())
+ nodes = append(nodes, xcbBackend)
+ enodes = append(enodes, stack.Server().Self())
// Inject the signer key and start sealing with it
- store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ store := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
if _, err := store.NewAccount(""); err != nil {
panic(err)
}
}
- // Iterate over all the nodes and start signing with them
- time.Sleep(3 * time.Second)
+ // Iterate over all the nodes and start mining
+ time.Sleep(3 * time.Second)
for _, node := range nodes {
- var core *xcb.Core
- if err := node.Service(&core); err != nil {
- panic(err)
- }
- if err := core.StartMining(1); err != nil {
+ if err := node.StartMining(1); err != nil {
panic(err)
}
}
@@ -101,25 +98,21 @@ func main() {
// Start injecting transactions from the faucets like crazy
nonces := make([]uint64, len(faucets))
for {
+ // Pick a random mining node
index := rand.Intn(len(faucets))
-
- // Fetch the accessor for the relevant signer
- var core *xcb.Core
- if err := nodes[index%len(nodes)].Service(&core); err != nil {
- panic(err)
- }
+ backend := nodes[index%len(nodes)]
// Create a self transaction and inject into the pool
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), types.NewNucleusSigner(genesis.Config.NetworkID), faucets[index])
if err != nil {
panic(err)
}
- if err := core.TxPool().AddLocal(tx); err != nil {
+ if err := backend.TxPool().AddLocal(tx); err != nil {
panic(err)
}
nonces[index]++
// Wait if we're too saturated
- if pend, _ := core.TxPool().Stats(); pend > 2048 {
+ if pend, _ := backend.TxPool().Stats(); pend > 2048 {
time.Sleep(100 * time.Millisecond)
}
}
@@ -144,7 +137,7 @@ func makeGenesis(faucets []*eddsa.PrivateKey) *core.Genesis {
return genesis
}
-func makeMiner(genesis *core.Genesis) (*node.Node, error) {
+func makeMiner(genesis *core.Genesis) (*node.Node, *xcb.Core, error) {
// Define the basic configurations for the Core node
datadir, _ := ioutil.TempDir("", "")
@@ -160,31 +153,31 @@ func makeMiner(genesis *core.Genesis) (*node.Node, error) {
NoUSB: true,
UseLightweightKDF: true,
}
- // Start the node and configure a full Core node on it
+ // Create the node and configure a full Core node on it
stack, err := node.New(config)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- return xcb.New(ctx, &xcb.Config{
- Genesis: genesis,
- NetworkId: genesis.Config.NetworkID.Uint64(),
- SyncMode: downloader.FullSync,
- DatabaseCache: 256,
- DatabaseHandles: 256,
- TxPool: core.DefaultTxPoolConfig,
- GPO: xcb.DefaultConfig.GPO,
- Cryptore: xcb.DefaultConfig.Cryptore,
- Miner: miner.Config{
- EnergyFloor: genesis.EnergyLimit * 9 / 10,
- EnergyCeil: genesis.EnergyLimit * 11 / 10,
- EnergyPrice: big.NewInt(1),
- Recommit: time.Second,
- },
- })
- }); err != nil {
- return nil, err
+ xcbBackend, err := xcb.New(stack, &xcb.Config{
+ Genesis: genesis,
+ NetworkId: genesis.Config.NetworkID.Uint64(),
+ SyncMode: downloader.FullSync,
+ DatabaseCache: 256,
+ DatabaseHandles: 256,
+ TxPool: core.DefaultTxPoolConfig,
+ GPO: xcb.DefaultConfig.GPO,
+ Ethash: xcb.DefaultConfig.Cryptore,
+ Miner: miner.Config{
+ EnergyFloor: genesis.EnergyLimit * 9 / 10,
+ EnergyCeil: genesis.EnergyLimit * 11 / 10,
+ EnergyPrice: big.NewInt(1),
+ Recommit: time.Second,
+ },
+ })
+ if err != nil {
+ return nil, nil, err
}
- // Start the node and return if successful
- return stack, stack.Start()
+
+ err = stack.Start()
+ return stack, xcbBackend, err
}
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 48ead79b1..50451d5f3 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -121,7 +121,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
}
genesis := gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil)
+ chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil, nil)
txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain)
// Generate a small n-block chain and an uncle block for it
@@ -216,7 +216,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) {
// This test chain imports the mined blocks.
db2 := rawdb.NewMemoryDatabase()
b.genesis.MustCommit(db2)
- chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil)
+ chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil, nil)
defer chain.Stop()
// Ignore empty commit here for less noise.
@@ -289,9 +289,7 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens
}
w.skipSealHook = func(task *task) bool { return true }
w.fullTaskHook = func() {
- // Arch64 unit tests are running in a VM on travis, they must
- // be given more time to execute.
- time.Sleep(time.Second)
+ time.Sleep(100 * time.Millisecond)
}
w.start() // Start mining!
for i := 0; i < 2; i += 1 {
diff --git a/mobile/bind.go b/mobile/bind.go
index 273a71bde..2147a1007 100644
--- a/mobile/bind.go
+++ b/mobile/bind.go
@@ -197,6 +197,15 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, args *Interf
return &Transaction{rawTx}, nil
}
+// RawTransact invokes the (paid) contract method with raw calldata as input values.
+func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (tx *Transaction, _ error) {
+ rawTx, err := c.contract.RawTransact(&opts.opts, calldata)
+ if err != nil {
+ return nil, err
+ }
+ return &Transaction{rawTx}, nil
+}
+
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (c *BoundContract) Transfer(opts *TransactOpts) (tx *Transaction, _ error) {
diff --git a/mobile/gocore.go b/mobile/gocore.go
index 9dc7342e3..183281164 100644
--- a/mobile/gocore.go
+++ b/mobile/gocore.go
@@ -130,8 +130,8 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) {
// Create the empty networking stack
nodeConf := &node.Config{
- Name: clientIdentifier,
- Version: params.VersionWithMeta,
+ Name: clientIdentifier,
+ //Version: params.VersionWithMeta,
DataDir: datadir,
KeyStoreDir: filepath.Join(datadir, "keystore"), // Mobile should never use internal keystores!
P2P: p2p.Config{
@@ -173,19 +173,13 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) {
xcbConf.SyncMode = downloader.LightSync
xcbConf.NetworkId = uint64(config.CoreNetworkID)
xcbConf.DatabaseCache = config.CoreDatabaseCache
- if err := rawStack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- return les.New(ctx, &xcbConf)
- }); err != nil {
+ lesBackend, err := les.New(rawStack, &xcbConf)
+ if err != nil {
return nil, fmt.Errorf("core init: %v", err)
}
// If netstats reporting is requested, do it
if config.CoreNetStats != "" {
- if err := rawStack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- var lesServ *les.LightCore
- ctx.Service(&lesServ)
-
- return xcbstats.New(config.CoreNetStats, nil, lesServ)
- }); err != nil {
+ if err := xcbstats.New(rawStack, lesBackend.ApiBackend, lesBackend.Engine(), config.CoreNetStats); err != nil {
return nil, fmt.Errorf("netstats init: %v", err)
}
}
@@ -194,21 +188,24 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) {
return &Node{rawStack}, nil
}
-// Close terminates a running node along with all it's services, tearing internal
-// state doen too. It's not possible to restart a closed node.
+// Close terminates a running node along with all it's services, tearing internal state
+// down. It is not possible to restart a closed node.
func (n *Node) Close() error {
return n.node.Close()
}
// Start creates a live P2P node and starts running it.
func (n *Node) Start() error {
+ // TODO: recreate the node so it can be started multiple times
return n.node.Start()
}
-// Stop terminates a running node along with all it's services. If the node was
-// not started, an error is returned.
+// Stop terminates a running node along with all its services. If the node was not started,
+// an error is returned. It is not possible to restart a stopped node.
+//
+// Deprecated: use Close()
func (n *Node) Stop() error {
- return n.node.Stop()
+ return n.node.Close()
}
// GetCoreClient retrieves a client to access the Core subsystem.
diff --git a/node/api.go b/node/api.go
index 7e2fdcde6..25db18383 100644
--- a/node/api.go
+++ b/node/api.go
@@ -19,6 +19,7 @@ package node
import (
"context"
"fmt"
+ "github.com/core-coin/go-core/internal/debug"
"strings"
"github.com/core-coin/go-core/common/hexutil"
@@ -28,21 +29,40 @@ import (
"github.com/core-coin/go-core/rpc"
)
-// PrivateAdminAPI is the collection of administrative API methods exposed only
-// over a secure RPC channel.
-type PrivateAdminAPI struct {
- node *Node // Node interfaced by this API
+// apis returns the collection of built-in RPC APIs.
+func (n *Node) apis() []rpc.API {
+ return []rpc.API{
+ {
+ Namespace: "admin",
+ Version: "1.0",
+ Service: &privateAdminAPI{n},
+ }, {
+ Namespace: "admin",
+ Version: "1.0",
+ Service: &publicAdminAPI{n},
+ Public: true,
+ }, {
+ Namespace: "debug",
+ Version: "1.0",
+ Service: debug.Handler,
+ }, {
+ Namespace: "web3",
+ Version: "1.0",
+ Service: &publicWeb3API{n},
+ Public: true,
+ },
+ }
}
-// NewPrivateAdminAPI creates a new API definition for the private admin methods
-// of the node itself.
-func NewPrivateAdminAPI(node *Node) *PrivateAdminAPI {
- return &PrivateAdminAPI{node: node}
+// privateAdminAPI is the collection of administrative API methods exposed only
+// over a secure RPC channel.
+type privateAdminAPI struct {
+ node *Node // Node interfaced by this API
}
// AddPeer requests connecting to a remote node, and also maintaining the new
// connection at all times, even reconnecting if it is lost.
-func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
+func (api *privateAdminAPI) AddPeer(url string) (bool, error) {
// Make sure the server is running, fail otherwise
server := api.node.Server()
if server == nil {
@@ -58,7 +78,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
}
// RemovePeer disconnects from a remote node if the connection exists
-func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
+func (api *privateAdminAPI) RemovePeer(url string) (bool, error) {
// Make sure the server is running, fail otherwise
server := api.node.Server()
if server == nil {
@@ -74,7 +94,7 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
}
// AddTrustedPeer allows a remote node to always connect, even if slots are full
-func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
+func (api *privateAdminAPI) AddTrustedPeer(url string) (bool, error) {
// Make sure the server is running, fail otherwise
server := api.node.Server()
if server == nil {
@@ -90,7 +110,7 @@ func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
// RemoveTrustedPeer removes a remote node from the trusted peer set, but it
// does not disconnect it automatically.
-func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
+func (api *privateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
// Make sure the server is running, fail otherwise
server := api.node.Server()
if server == nil {
@@ -106,7 +126,7 @@ func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
// PeerEvents creates an RPC subscription which receives peer events from the
// node's p2p.Server
-func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) {
+func (api *privateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) {
// Make sure the server is running, fail otherwise
server := api.node.Server()
if server == nil {
@@ -143,14 +163,11 @@ func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription,
}
// StartRPC starts the HTTP RPC API server.
-func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) {
+func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) {
api.node.lock.Lock()
defer api.node.lock.Unlock()
- if api.node.httpHandler != nil {
- return false, fmt.Errorf("HTTP RPC already running on %s", api.node.httpEndpoint)
- }
-
+ // Determine host and port.
if host == nil {
h := DefaultHTTPHost
if api.node.config.HTTPHost != "" {
@@ -162,57 +179,55 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis
port = &api.node.config.HTTPPort
}
- allowedOrigins := api.node.config.HTTPCors
+ // Determine config.
+ config := httpConfig{
+ CorsAllowedOrigins: api.node.config.HTTPCors,
+ Vhosts: api.node.config.HTTPVirtualHosts,
+ Modules: api.node.config.HTTPModules,
+ }
if cors != nil {
- allowedOrigins = nil
+ config.CorsAllowedOrigins = nil
for _, origin := range strings.Split(*cors, ",") {
- allowedOrigins = append(allowedOrigins, strings.TrimSpace(origin))
+ config.CorsAllowedOrigins = append(config.CorsAllowedOrigins, strings.TrimSpace(origin))
}
}
-
- allowedVHosts := api.node.config.HTTPVirtualHosts
if vhosts != nil {
- allowedVHosts = nil
+ config.Vhosts = nil
for _, vhost := range strings.Split(*host, ",") {
- allowedVHosts = append(allowedVHosts, strings.TrimSpace(vhost))
+ config.Vhosts = append(config.Vhosts, strings.TrimSpace(vhost))
}
}
-
- modules := api.node.httpWhitelist
if apis != nil {
- modules = nil
+ config.Modules = nil
for _, m := range strings.Split(*apis, ",") {
- modules = append(modules, strings.TrimSpace(m))
+ config.Modules = append(config.Modules, strings.TrimSpace(m))
}
}
- if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, allowedOrigins, allowedVHosts, api.node.config.HTTPTimeouts); err != nil {
+ if err := api.node.http.setListenAddr(*host, *port); err != nil {
+ return false, err
+ }
+ if err := api.node.http.enableRPC(api.node.rpcAPIs, config); err != nil {
+ return false, err
+ }
+ if err := api.node.http.start(); err != nil {
return false, err
}
return true, nil
}
-// StopRPC terminates an already running HTTP RPC API endpoint.
-func (api *PrivateAdminAPI) StopRPC() (bool, error) {
- api.node.lock.Lock()
- defer api.node.lock.Unlock()
-
- if api.node.httpHandler == nil {
- return false, fmt.Errorf("HTTP RPC not running")
- }
- api.node.stopHTTP()
+// StopRPC shuts down the HTTP server.
+func (api *privateAdminAPI) StopRPC() (bool, error) {
+ api.node.http.stop()
return true, nil
}
// StartWS starts the websocket RPC API server.
-func (api *PrivateAdminAPI) StartWS(host *string, port *int, allowedOrigins *string, apis *string) (bool, error) {
+func (api *privateAdminAPI) StartWS(host *string, port *int, allowedOrigins *string, apis *string) (bool, error) {
api.node.lock.Lock()
defer api.node.lock.Unlock()
- if api.node.wsHandler != nil {
- return false, fmt.Errorf("WebSocket RPC already running on %s", api.node.wsEndpoint)
- }
-
+ // Determine host and port.
if host == nil {
h := DefaultWSHost
if api.node.config.WSHost != "" {
@@ -224,55 +239,57 @@ func (api *PrivateAdminAPI) StartWS(host *string, port *int, allowedOrigins *str
port = &api.node.config.WSPort
}
- origins := api.node.config.WSOrigins
- if allowedOrigins != nil {
- origins = nil
- for _, origin := range strings.Split(*allowedOrigins, ",") {
- origins = append(origins, strings.TrimSpace(origin))
- }
+ // Determine config.
+ config := wsConfig{
+ Modules: api.node.config.WSModules,
+ Origins: api.node.config.WSOrigins,
+ // ExposeAll: api.node.config.WSExposeAll,
}
-
- modules := api.node.config.WSModules
if apis != nil {
- modules = nil
+ config.Modules = nil
for _, m := range strings.Split(*apis, ",") {
- modules = append(modules, strings.TrimSpace(m))
+ config.Modules = append(config.Modules, strings.TrimSpace(m))
+ }
+ }
+ if allowedOrigins != nil {
+ config.Origins = nil
+ for _, origin := range strings.Split(*allowedOrigins, ",") {
+ config.Origins = append(config.Origins, strings.TrimSpace(origin))
}
}
- if err := api.node.startWS(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, origins, api.node.config.WSExposeAll); err != nil {
+ // Enable WebSocket on the server.
+ server := api.node.wsServerForPort(*port, false)
+ if err := server.setListenAddr(*host, *port); err != nil {
+ return false, err
+ }
+ openApis, _ := api.node.GetAPIs()
+ if err := server.enableWS(openApis, config); err != nil {
return false, err
}
+ if err := server.start(); err != nil {
+ return false, err
+ }
+ api.node.http.log.Info("WebSocket endpoint opened", "url", api.node.WSEndpoint())
return true, nil
}
-// StopWS terminates an already running websocket RPC API endpoint.
-func (api *PrivateAdminAPI) StopWS() (bool, error) {
- api.node.lock.Lock()
- defer api.node.lock.Unlock()
-
- if api.node.wsHandler == nil {
- return false, fmt.Errorf("WebSocket RPC not running")
- }
- api.node.stopWS()
+// StopWS terminates all WebSocket servers.
+func (api *privateAdminAPI) StopWS() (bool, error) {
+ api.node.http.stopWS()
+ api.node.ws.stop()
return true, nil
}
-// PublicAdminAPI is the collection of administrative API methods exposed over
+// publicAdminAPI is the collection of administrative API methods exposed over
// both secure and unsecure RPC channels.
-type PublicAdminAPI struct {
+type publicAdminAPI struct {
node *Node // Node interfaced by this API
}
-// NewPublicAdminAPI creates a new API definition for the public admin methods
-// of the node itself.
-func NewPublicAdminAPI(node *Node) *PublicAdminAPI {
- return &PublicAdminAPI{node: node}
-}
-
// Peers retrieves all the information we know about each individual peer at the
// protocol granularity.
-func (api *PublicAdminAPI) Peers() ([]*p2p.PeerInfo, error) {
+func (api *publicAdminAPI) Peers() ([]*p2p.PeerInfo, error) {
server := api.node.Server()
if server == nil {
return nil, ErrNodeStopped
@@ -282,7 +299,7 @@ func (api *PublicAdminAPI) Peers() ([]*p2p.PeerInfo, error) {
// NodeInfo retrieves all the information we know about the host node at the
// protocol granularity.
-func (api *PublicAdminAPI) NodeInfo() (*p2p.NodeInfo, error) {
+func (api *publicAdminAPI) NodeInfo() (*p2p.NodeInfo, error) {
server := api.node.Server()
if server == nil {
return nil, ErrNodeStopped
@@ -291,27 +308,22 @@ func (api *PublicAdminAPI) NodeInfo() (*p2p.NodeInfo, error) {
}
// Datadir retrieves the current data directory the node is using.
-func (api *PublicAdminAPI) Datadir() string {
+func (api *publicAdminAPI) Datadir() string {
return api.node.DataDir()
}
-// PublicWeb3API offers helper utils
-type PublicWeb3API struct {
+// publicWeb3API offers helper utils
+type publicWeb3API struct {
stack *Node
}
-// NewPublicWeb3API creates a new Web3Service instance
-func NewPublicWeb3API(stack *Node) *PublicWeb3API {
- return &PublicWeb3API{stack}
-}
-
// ClientVersion returns the node name
-func (s *PublicWeb3API) ClientVersion() string {
+func (s *publicWeb3API) ClientVersion() string {
return s.stack.Server().Name
}
// Sha3 applies the core sha3 implementation on the input.
// It assumes the input is hex encoded.
-func (s *PublicWeb3API) Sha3(input hexutil.Bytes) hexutil.Bytes {
+func (s *publicWeb3API) Sha3(input hexutil.Bytes) hexutil.Bytes {
return crypto.SHA3(input)
}
diff --git a/node/api_test.go b/node/api_test.go
new file mode 100644
index 000000000..95956150c
--- /dev/null
+++ b/node/api_test.go
@@ -0,0 +1,351 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package node
+
+import (
+ "bytes"
+ "github.com/core-coin/go-core/rpc"
+ "github.com/stretchr/testify/assert"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "testing"
+)
+
+// This test uses the admin_startRPC and admin_startWS APIs,
+// checking whether the HTTP server is started correctly.
+func TestStartRPC(t *testing.T) {
+ type test struct {
+ name string
+ cfg Config
+ fn func(*testing.T, *Node, *privateAdminAPI)
+
+ // Checks. These run after the node is configured and all API calls have been made.
+ wantReachable bool // whether the HTTP server should be reachable at all
+ wantHandlers bool // whether RegisterHandler handlers should be accessible
+ wantRPC bool // whether JSON-RPC/HTTP should be accessible
+ wantWS bool // whether JSON-RPC/WS should be accessible
+ }
+
+ tests := []test{
+ {
+ name: "all off",
+ cfg: Config{},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ },
+ wantReachable: false,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: false,
+ },
+ {
+ name: "rpc enabled through config",
+ cfg: Config{HTTPHost: "127.0.0.1"},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ },
+ wantReachable: true,
+ wantHandlers: true,
+ wantRPC: true,
+ wantWS: false,
+ },
+ {
+ name: "rpc enabled through API",
+ cfg: Config{},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil)
+ assert.NoError(t, err)
+ },
+ wantReachable: true,
+ wantHandlers: true,
+ wantRPC: true,
+ wantWS: false,
+ },
+ {
+ name: "rpc start again after failure",
+ cfg: Config{},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ // Listen on a random port.
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal("can't listen:", err)
+ }
+ defer listener.Close()
+ port := listener.Addr().(*net.TCPAddr).Port
+
+ // Now try to start RPC on that port. This should fail.
+ _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil)
+ if err == nil {
+ t.Fatal("StartRPC should have failed on port", port)
+ }
+
+ // Try again after unblocking the port. It should work this time.
+ listener.Close()
+ _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil)
+ assert.NoError(t, err)
+ },
+ wantReachable: true,
+ wantHandlers: true,
+ wantRPC: true,
+ wantWS: false,
+ },
+ {
+ name: "rpc stopped through API",
+ cfg: Config{HTTPHost: "127.0.0.1"},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StopRPC()
+ assert.NoError(t, err)
+ },
+ wantReachable: false,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: false,
+ },
+ {
+ name: "rpc stopped twice",
+ cfg: Config{HTTPHost: "127.0.0.1"},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StopRPC()
+ assert.NoError(t, err)
+
+ _, err = api.StopRPC()
+ assert.NoError(t, err)
+ },
+ wantReachable: false,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: false,
+ },
+ {
+ name: "ws enabled through config",
+ cfg: Config{WSHost: "127.0.0.1"},
+ wantReachable: true,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: true,
+ },
+ {
+ name: "ws enabled through API",
+ cfg: Config{},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StartWS(sp("127.0.0.1"), ip(0), nil, nil)
+ assert.NoError(t, err)
+ },
+ wantReachable: true,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: true,
+ },
+ {
+ name: "ws stopped through API",
+ cfg: Config{WSHost: "127.0.0.1"},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StopWS()
+ assert.NoError(t, err)
+ },
+ wantReachable: false,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: false,
+ },
+ {
+ name: "ws stopped twice",
+ cfg: Config{WSHost: "127.0.0.1"},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StopWS()
+ assert.NoError(t, err)
+
+ _, err = api.StopWS()
+ assert.NoError(t, err)
+ },
+ wantReachable: false,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: false,
+ },
+ {
+ name: "ws enabled after RPC",
+ cfg: Config{HTTPHost: "127.0.0.1"},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ wsport := n.http.port
+ _, err := api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil)
+ assert.NoError(t, err)
+ },
+ wantReachable: true,
+ wantHandlers: true,
+ wantRPC: true,
+ wantWS: true,
+ },
+ {
+ name: "ws enabled after RPC then stopped",
+ cfg: Config{HTTPHost: "127.0.0.1"},
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ wsport := n.http.port
+ _, err := api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil)
+ assert.NoError(t, err)
+
+ _, err = api.StopWS()
+ assert.NoError(t, err)
+ },
+ wantReachable: true,
+ wantHandlers: true,
+ wantRPC: true,
+ wantWS: false,
+ },
+ {
+ name: "rpc stopped with ws enabled",
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil)
+ assert.NoError(t, err)
+
+ wsport := n.http.port
+ _, err = api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil)
+ assert.NoError(t, err)
+
+ _, err = api.StopRPC()
+ assert.NoError(t, err)
+ },
+ wantReachable: false,
+ wantHandlers: false,
+ wantRPC: false,
+ wantWS: false,
+ },
+ {
+ name: "rpc enabled after ws",
+ fn: func(t *testing.T, n *Node, api *privateAdminAPI) {
+ _, err := api.StartWS(sp("127.0.0.1"), ip(0), nil, nil)
+ assert.NoError(t, err)
+
+ wsport := n.http.port
+ _, err = api.StartRPC(sp("127.0.0.1"), ip(wsport), nil, nil, nil)
+ assert.NoError(t, err)
+ },
+ wantReachable: true,
+ wantHandlers: true,
+ wantRPC: true,
+ wantWS: true,
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ // Apply some sane defaults.
+ config := test.cfg
+ // config.Logger = testlog.Logger(t, log.LvlDebug)
+ config.NoUSB = true
+ config.P2P.NoDiscovery = true
+
+ // Create Node.
+ stack, err := New(&config)
+ if err != nil {
+ t.Fatal("can't create node:", err)
+ }
+ defer stack.Close()
+
+ // Register the test handler.
+ stack.RegisterHandler("test", "/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("OK"))
+ }))
+
+ if err := stack.Start(); err != nil {
+ t.Fatal("can't start node:", err)
+ }
+
+ // Run the API call hook.
+ if test.fn != nil {
+ test.fn(t, stack, &privateAdminAPI{stack})
+ }
+
+ // Check if the HTTP endpoints are available.
+ baseURL := stack.HTTPEndpoint()
+ reachable := checkReachable(baseURL)
+ handlersAvailable := checkBodyOK(baseURL + "/test")
+ rpcAvailable := checkRPC(baseURL)
+ wsAvailable := checkRPC(strings.Replace(baseURL, "http://", "ws://", 1))
+ if reachable != test.wantReachable {
+ t.Errorf("HTTP server is %sreachable, want it %sreachable", not(reachable), not(test.wantReachable))
+ }
+ if handlersAvailable != test.wantHandlers {
+ t.Errorf("RegisterHandler handlers %savailable, want them %savailable", not(handlersAvailable), not(test.wantHandlers))
+ }
+ if rpcAvailable != test.wantRPC {
+ t.Errorf("HTTP RPC %savailable, want it %savailable", not(rpcAvailable), not(test.wantRPC))
+ }
+ if wsAvailable != test.wantWS {
+ t.Errorf("WS RPC %savailable, want it %savailable", not(wsAvailable), not(test.wantWS))
+ }
+ })
+ }
+}
+
+// checkReachable checks if the TCP endpoint in rawurl is open.
+func checkReachable(rawurl string) bool {
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ panic(err)
+ }
+ conn, err := net.Dial("tcp", u.Host)
+ if err != nil {
+ return false
+ }
+ conn.Close()
+ return true
+}
+
+// checkBodyOK checks whether the given HTTP URL responds with 200 OK and body "OK".
+func checkBodyOK(url string) bool {
+ resp, err := http.Get(url)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ return false
+ }
+ buf := make([]byte, 2)
+ if _, err = io.ReadFull(resp.Body, buf); err != nil {
+ return false
+ }
+ return bytes.Equal(buf, []byte("OK"))
+}
+
+// checkRPC checks whether JSON-RPC works against the given URL.
+func checkRPC(url string) bool {
+ c, err := rpc.Dial(url)
+ if err != nil {
+ return false
+ }
+ defer c.Close()
+
+ _, err = c.SupportedModules()
+ return err == nil
+}
+
+// string/int pointer helpers.
+func sp(s string) *string { return &s }
+func ip(i int) *int { return &i }
+
+func not(ok bool) string {
+ if ok {
+ return ""
+ }
+ return "not "
+}
diff --git a/node/config.go b/node/config.go
index 6a5fcc36c..47ca16bbd 100644
--- a/node/config.go
+++ b/node/config.go
@@ -42,6 +42,7 @@ import (
const (
datadirPrivateKey = "nodekey" // Path within the datadir to the node's private key
+ datadirJWTKey = "jwtsecret" // Path within the datadir to the node's jwt secret
datadirDefaultKeyStore = "keystore" // Path within the datadir to the keystore
datadirStaticNodes = "static-nodes.json" // Path within the datadir to the static node list
datadirTrustedNodes = "trusted-nodes.json" // Path within the datadir to the trusted node list
@@ -120,6 +121,9 @@ type Config struct {
// for ephemeral nodes).
HTTPPort int `toml:",omitempty"`
+ // Authport is the port number on which the authenticated API is provided.
+ AuthPort int `toml:",omitempty"`
+
// HTTPCors is the Cross-Origin Resource Sharing header to send to requesting
// clients. Please be aware that CORS is a browser enforced security, it's fully
// useless for custom HTTP clients.
@@ -143,6 +147,9 @@ type Config struct {
// interface.
HTTPTimeouts rpc.HTTPTimeouts
+ // HTTPPathPrefix specifies a path prefix on which http-rpc is to be served.
+ HTTPPathPrefix string `toml:",omitempty"`
+
// WSHost is the host interface on which to start the websocket RPC server. If
// this field is empty, no websocket API endpoint will be started.
WSHost string
@@ -152,6 +159,9 @@ type Config struct {
// ephemeral nodes).
WSPort int `toml:",omitempty"`
+ // WSPathPrefix specifies a path prefix on which ws-rpc is to be served.
+ WSPathPrefix string `toml:",omitempty"`
+
// WSOrigins is the list of domain to accept websocket requests from. Please be
// aware that the server can only act upon the HTTP request the client sends and
// cannot verify the validity of the request header.
@@ -169,15 +179,6 @@ type Config struct {
// private APIs to untrusted users is a major security risk.
WSExposeAll bool `toml:",omitempty"`
- // GraphQLHost is the host interface on which to start the GraphQL server. If this
- // field is empty, no GraphQL API endpoint will be started.
- GraphQLHost string
-
- // GraphQLPort is the TCP port number on which to start the GraphQL server. The
- // default zero value is/ valid and will pick a port number randomly (useful
- // for ephemeral nodes).
- GraphQLPort int `toml:",omitempty"`
-
// GraphQLCors is the Cross-Origin Resource Sharing header to send to requesting
// clients. Please be aware that CORS is a browser enforced security, it's fully
// useless for custom HTTP clients.
@@ -201,6 +202,9 @@ type Config struct {
staticNodesWarning bool
trustedNodesWarning bool
oldGocoreResourceWarning bool
+
+ // JWTSecret is the hex-encoded jwt secret.
+ JWTSecret string `toml:",omitempty"`
}
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
@@ -257,18 +261,9 @@ func (c *Config) HTTPEndpoint() string {
return fmt.Sprintf("%s:%d", c.HTTPHost, c.HTTPPort)
}
-// GraphQLEndpoint resolves a GraphQL endpoint based on the configured host interface
-// and port parameters.
-func (c *Config) GraphQLEndpoint() string {
- if c.GraphQLHost == "" {
- return ""
- }
- return fmt.Sprintf("%s:%d", c.GraphQLHost, c.GraphQLPort)
-}
-
// DefaultHTTPEndpoint returns the HTTP endpoint used by default.
func DefaultHTTPEndpoint() string {
- config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort}
+ config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort, AuthPort: DefaultAuthPort}
return config.HTTPEndpoint()
}
@@ -290,7 +285,7 @@ func DefaultWSEndpoint() string {
// ExtRPCEnabled returns the indicator whether node enables the external
// RPC(http, ws or graphql).
func (c *Config) ExtRPCEnabled() bool {
- return c.HTTPHost != "" || c.WSHost != "" || c.GraphQLHost != ""
+ return c.HTTPHost != "" || c.WSHost != ""
}
// NodeName returns the devp2p node identifier.
diff --git a/node/defaults.go b/node/defaults.go
index 57912498c..0abced47d 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -34,18 +34,28 @@ const (
DefaultWSPort = 8546 // Default TCP port for the websocket RPC server
DefaultGraphQLHost = "localhost" // Default host interface for the GraphQL server
DefaultGraphQLPort = 8547 // Default TCP port for the GraphQL server
+ DefaultAuthHost = "localhost" // Default host interface for the authenticated apis
+ DefaultAuthPort = 8551 // Default port for the authenticated apis
+)
+
+var (
+ DefaultAuthCors = []string{"localhost"} // Default cors domain for the authenticated apis
+ DefaultAuthVhosts = []string{"localhost"} // Default virtual hosts for the authenticated apis
+ DefaultAuthOrigins = []string{"localhost"} // Default origins for the authenticated apis
+ DefaultAuthPrefix = "" // Default prefix for the authenticated apis
+ DefaultAuthModules = []string{"xcb"}
)
// DefaultConfig contains reasonable default settings.
var DefaultConfig = Config{
DataDir: DefaultDataDir(),
HTTPPort: DefaultHTTPPort,
+ AuthPort: DefaultAuthPort,
HTTPModules: []string{"net", "web3"},
HTTPVirtualHosts: []string{"localhost"},
HTTPTimeouts: rpc.DefaultHTTPTimeouts,
WSPort: DefaultWSPort,
WSModules: []string{"net", "web3"},
- GraphQLPort: DefaultGraphQLPort,
GraphQLVirtualHosts: []string{"localhost"},
P2P: p2p.Config{
ListenAddr: ":30300",
diff --git a/node/doc.go b/node/doc.go
index 037de21f3..f393d1ca4 100644
--- a/node/doc.go
+++ b/node/doc.go
@@ -21,6 +21,33 @@ In the model exposed by this package, a node is a collection of services which u
resources to provide RPC APIs. Services can also offer devp2p protocols, which are wired
up to the devp2p network when the node instance is started.
+Node Lifecycle
+The Node object has a lifecycle consisting of three basic states, INITIALIZING, RUNNING
+and CLOSED.
+ ●───────┐
+ New()
+ │
+ ▼
+ INITIALIZING ────Start()─┐
+ │ │
+ │ ▼
+ Close() RUNNING
+ │ │
+ ▼ │
+ CLOSED ◀──────Close()─┘
+Creating a Node allocates basic resources such as the data directory and returns the node
+in its INITIALIZING state. Lifecycle objects, RPC APIs and peer-to-peer networking
+protocols can be registered in this state. Basic operations such as opening a key-value
+database are permitted while initializing.
+Once everything is registered, the node can be started, which moves it into the RUNNING
+state. Starting the node starts all registered Lifecycle objects and enables RPC and
+peer-to-peer networking. Note that no additional Lifecycles, APIs or p2p protocols can be
+registered while the node is running.
+Closing the node releases all held resources. The actions performed by Close depend on the
+state it was in. When closing a node in INITIALIZING state, resources related to the data
+directory are released. If the node was RUNNING, closing it also stops all Lifecycle
+objects and shuts down RPC and peer-to-peer networking.
+You must always call Close on Node, even if the node was not started.
Resources Managed By Node
diff --git a/node/endpoints.go b/node/endpoints.go
new file mode 100644
index 000000000..a412db4b0
--- /dev/null
+++ b/node/endpoints.go
@@ -0,0 +1,85 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package node
+
+import (
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/rpc"
+ "net"
+ "net/http"
+ "time"
+)
+
+// StartHTTPEndpoint starts the HTTP RPC endpoint.
+func StartHTTPEndpoint(endpoint string, timeouts rpc.HTTPTimeouts, handler http.Handler) (net.Listener, error) {
+ // start the HTTP listener
+ var (
+ listener net.Listener
+ err error
+ )
+ if listener, err = net.Listen("tcp", endpoint); err != nil {
+ return nil, err
+ }
+ // make sure timeout values are meaningful
+ CheckTimeouts(&timeouts)
+ // Bundle and start the HTTP server
+ httpSrv := &http.Server{
+ Handler: handler,
+ ReadTimeout: timeouts.ReadTimeout,
+ WriteTimeout: timeouts.WriteTimeout,
+ IdleTimeout: timeouts.IdleTimeout,
+ }
+ go httpSrv.Serve(listener)
+ return listener, err
+}
+
+// checkModuleAvailability checks that all names given in modules are actually
+// available API services. It assumes that the MetadataApi module ("rpc") is always available;
+// the registration of this "rpc" module happens in NewServer() and is thus common to all endpoints.
+func checkModuleAvailability(modules []string, apis []rpc.API) (bad, available []string) {
+ availableSet := make(map[string]struct{})
+ for _, api := range apis {
+ if _, ok := availableSet[api.Namespace]; !ok {
+ availableSet[api.Namespace] = struct{}{}
+ available = append(available, api.Namespace)
+ }
+ }
+ for _, name := range modules {
+ if _, ok := availableSet[name]; !ok {
+ if name != rpc.MetadataApi {
+ bad = append(bad, name)
+ }
+ }
+ }
+ return bad, available
+}
+
+// CheckTimeouts ensures that timeout values are meaningful
+func CheckTimeouts(timeouts *rpc.HTTPTimeouts) {
+ if timeouts.ReadTimeout < time.Second {
+ log.Warn("Sanitizing invalid HTTP read timeout", "provided", timeouts.ReadTimeout, "updated", rpc.DefaultHTTPTimeouts.ReadTimeout)
+ timeouts.ReadTimeout = rpc.DefaultHTTPTimeouts.ReadTimeout
+ }
+ if timeouts.WriteTimeout < time.Second {
+ log.Warn("Sanitizing invalid HTTP write timeout", "provided", timeouts.WriteTimeout, "updated", rpc.DefaultHTTPTimeouts.WriteTimeout)
+ timeouts.WriteTimeout = rpc.DefaultHTTPTimeouts.WriteTimeout
+ }
+ if timeouts.IdleTimeout < time.Second {
+ log.Warn("Sanitizing invalid HTTP idle timeout", "provided", timeouts.IdleTimeout, "updated", rpc.DefaultHTTPTimeouts.IdleTimeout)
+ timeouts.IdleTimeout = rpc.DefaultHTTPTimeouts.IdleTimeout
+ }
+}
diff --git a/node/errors.go b/node/errors.go
index 8edf9237a..6450f6d31 100644
--- a/node/errors.go
+++ b/node/errors.go
@@ -39,17 +39,6 @@ func convertFileLockError(err error) error {
return err
}
-// DuplicateServiceError is returned during Node startup if a registered service
-// constructor returns a service of the same type that was already started.
-type DuplicateServiceError struct {
- Kind reflect.Type
-}
-
-// Error generates a textual representation of the duplicate service error.
-func (e *DuplicateServiceError) Error() string {
- return fmt.Sprintf("duplicate service: %v", e.Kind)
-}
-
// StopError is returned if a Node fails to stop either any of its registered
// services or itself.
type StopError struct {
diff --git a/node/jwt_handler.go b/node/jwt_handler.go
new file mode 100644
index 000000000..92da1cd4e
--- /dev/null
+++ b/node/jwt_handler.go
@@ -0,0 +1,78 @@
+// Copyright 2022 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package node
+
+import (
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/golang-jwt/jwt/v4"
+)
+
+type jwtHandler struct {
+ keyFunc func(token *jwt.Token) (interface{}, error)
+ next http.Handler
+}
+
+// newJWTHandler creates a http.Handler with jwt authentication support.
+func newJWTHandler(secret []byte, next http.Handler) http.Handler {
+ return &jwtHandler{
+ keyFunc: func(token *jwt.Token) (interface{}, error) {
+ return secret, nil
+ },
+ next: next,
+ }
+}
+
+// ServeHTTP implements http.Handler
+func (handler *jwtHandler) ServeHTTP(out http.ResponseWriter, r *http.Request) {
+ var (
+ strToken string
+ claims jwt.RegisteredClaims
+ )
+ if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "Bearer ") {
+ strToken = strings.TrimPrefix(auth, "Bearer ")
+ }
+ if len(strToken) == 0 {
+ http.Error(out, "missing token", http.StatusForbidden)
+ return
+ }
+ // We explicitly set only HS256 allowed, and also disables the
+ // claim-check: the RegisteredClaims internally requires 'iat' to
+ // be no later than 'now', but we allow for a bit of drift.
+ token, err := jwt.ParseWithClaims(strToken, &claims, handler.keyFunc,
+ jwt.WithValidMethods([]string{"HS256"}),
+ jwt.WithoutClaimsValidation())
+
+ switch {
+ case err != nil:
+ http.Error(out, err.Error(), http.StatusForbidden)
+ case !token.Valid:
+ http.Error(out, "invalid token", http.StatusForbidden)
+ case !claims.VerifyExpiresAt(time.Now(), false): // optional
+ http.Error(out, "token is expired", http.StatusForbidden)
+ case claims.IssuedAt == nil:
+ http.Error(out, "missing issued-at", http.StatusForbidden)
+ case time.Since(claims.IssuedAt.Time) > 5*time.Second:
+ http.Error(out, "stale token", http.StatusForbidden)
+ case time.Until(claims.IssuedAt.Time) > 5*time.Second:
+ http.Error(out, "future token", http.StatusForbidden)
+ default:
+ handler.next.ServeHTTP(out, r)
+ }
+}
diff --git a/core/vm/int_pool_verifier.go b/node/lifecycle.go
similarity index 52%
rename from core/vm/int_pool_verifier.go
rename to node/lifecycle.go
index 58b1256c6..5afbd8ad9 100644
--- a/core/vm/int_pool_verifier.go
+++ b/node/lifecycle.go
@@ -1,4 +1,4 @@
-// Copyright 2017 by the Authors
+// Copyright 2015 by the Authors
// This file is part of the go-core library.
//
// The go-core library is free software: you can redistribute it and/or modify
@@ -14,18 +14,18 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-core library. If not, see .
-// +build VERIFY_CVM_INTEGER_POOL
+package node
-package vm
+// Lifecycle encompasses the behavior of services that can be started and stopped
+// on the node. Lifecycle management is delegated to the node, but it is the
+// responsibility of the service-specific package to configure and register the
+// service on the node using the `RegisterLifecycle` method.
+type Lifecycle interface {
+ // Start is called after all services have been constructed and the networking
+ // layer was also initialized to spawn any goroutines required by the service.
+ Start() error
-import "fmt"
-
-const verifyPool = true
-
-func verifyIntegerPool(ip *intPool) {
- for i, item := range ip.pool.data {
- if item.Cmp(checkVal) != 0 {
- panic(fmt.Sprintf("%d'th item failed aggressive pool check. Value was modified", i))
- }
- }
+ // Stop terminates all goroutines belonging to the service, blocking until they
+ // are all terminated.
+ Stop() error
}
diff --git a/node/node.go b/node/node.go
index de5823d38..f4aa4fd26 100644
--- a/node/node.go
+++ b/node/node.go
@@ -17,10 +17,14 @@
package node
import (
+ crand "crypto/rand"
"errors"
"fmt"
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/common/hexutil"
"github.com/core-coin/go-core/core/led"
- "net"
+ "github.com/core-coin/go-core/xcbdb"
+ "net/http"
"os"
"path/filepath"
"reflect"
@@ -30,51 +34,44 @@ import (
"github.com/core-coin/go-core/accounts"
"github.com/core-coin/go-core/core/rawdb"
"github.com/core-coin/go-core/event"
- "github.com/core-coin/go-core/internal/debug"
"github.com/core-coin/go-core/log"
"github.com/core-coin/go-core/p2p"
"github.com/core-coin/go-core/rpc"
- "github.com/core-coin/go-core/xcbdb"
"github.com/prometheus/tsdb/fileutil"
)
// Node is a container on which services can be registered.
type Node struct {
- eventmux *event.TypeMux // Event multiplexer used between the services of a stack
- config *Config
- accman *accounts.Manager
-
- ephemeralKeystore string // if non-empty, the key directory that will be removed by Stop
- instanceDirLock fileutil.Releaser // prevents concurrent use of instance directory
-
- serverConfig p2p.Config
- server *p2p.Server // Currently running P2P networking layer
-
- serviceFuncs []ServiceConstructor // Service constructors (in dependency order)
- services map[reflect.Type]Service // Currently running services
-
+ eventmux *event.TypeMux
+ config *Config
+ accman *accounts.Manager
+ log log.Logger
+ ephemKeystore string // if non-empty, the key directory that will be removed by Stop
+ dirLock fileutil.Releaser // prevents concurrent use of instance directory
+ stop chan struct{} // Channel to wait for termination notifications
+ server *p2p.Server // Currently running P2P networking layer
+ startStopLock sync.Mutex // Start/Stop are protected by an additional lock
+ state int // Tracks state of node lifecycle
+
+ lock sync.Mutex
+ lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle
rpcAPIs []rpc.API // List of APIs currently provided by the node
+ http *httpServer //
+ ws *httpServer //
+ httpAuth *httpServer //
+ wsAuth *httpServer //
+ ipc *ipcServer // Stores information about the ipc http server
inprocHandler *rpc.Server // In-process RPC request handler to process the API requests
- ipcEndpoint string // IPC endpoint to listen at (empty = IPC disabled)
- ipcListener net.Listener // IPC RPC listener socket to serve API requests
- ipcHandler *rpc.Server // IPC RPC request handler to process the API requests
-
- httpEndpoint string // HTTP endpoint (interface + port) to listen at (empty = HTTP disabled)
- httpWhitelist []string // HTTP RPC modules to allow through this endpoint
- httpListener net.Listener // HTTP RPC listener socket to server API requests
- httpHandler *rpc.Server // HTTP RPC request handler to process the API requests
-
- wsEndpoint string // Websocket endpoint (interface + port) to listen at (empty = websocket disabled)
- wsListener net.Listener // Websocket RPC listener socket to server API requests
- wsHandler *rpc.Server // Websocket RPC request handler to process the API requests
-
- stop chan struct{} // Channel to wait for termination notifications
- lock sync.RWMutex
-
- log log.Logger
+ databases map[*closeTrackingDB]struct{} // All open databases
}
+const (
+ initializingState = iota
+ runningState
+ closedState
+)
+
// New creates a new P2P node, ready for protocol registration.
func New(conf *Config) (*Node, error) {
// Copy config and resolve the datadir so future changes to the current
@@ -88,6 +85,10 @@ func New(conf *Config) (*Node, error) {
}
conf.DataDir = absdatadir
}
+ if conf.Logger == nil {
+ conf.Logger = log.New()
+ }
+
// Ensure that the instance name doesn't cause weird conflicts with
// other files in the data directory.
if strings.ContainsAny(conf.Name, `/\`) {
@@ -99,43 +100,167 @@ func New(conf *Config) (*Node, error) {
if strings.HasSuffix(conf.Name, ".ipc") {
return nil, errors.New(`Config.Name cannot end in ".ipc"`)
}
- // Ensure that the AccountManager method works before the node has started.
- // We rely on this in cmd/gocore.
+
+ node := &Node{
+ config: conf,
+ inprocHandler: rpc.NewServer(),
+ eventmux: new(event.TypeMux),
+ log: conf.Logger,
+ stop: make(chan struct{}),
+ server: &p2p.Server{Config: conf.P2P},
+ databases: make(map[*closeTrackingDB]struct{}),
+ }
+
+ // Register built-in APIs.
+ node.rpcAPIs = append(node.rpcAPIs, node.apis()...)
+
+ // Acquire the instance directory lock.
+ if err := node.openDataDir(); err != nil {
+ return nil, err
+ }
+ // Ensure that the AccountManager method works before the node has started. We rely on
+ // this in cmd/gocore.
am, ephemeralKeystore, err := makeAccountManager(conf)
if err != nil {
return nil, err
}
- if conf.Logger == nil {
- conf.Logger = log.New()
+ node.accman = am
+ node.ephemKeystore = ephemeralKeystore
+
+ // Initialize the p2p server. This creates the node key and discovery databases.
+ node.server.Config.PrivateKey = node.config.NodeKey()
+ node.server.Config.Name = node.config.NodeName()
+ node.server.Config.Logger = node.log
+ if node.server.Config.StaticNodes == nil {
+ node.server.Config.StaticNodes = node.config.StaticNodes()
+ }
+ if node.server.Config.TrustedNodes == nil {
+ node.server.Config.TrustedNodes = node.config.TrustedNodes()
+ }
+ if node.server.Config.NodeDatabase == "" {
+ node.server.Config.NodeDatabase = node.config.NodeDB()
+ }
+
+ // Check HTTP/WS prefixes are valid.
+ if err := validatePrefix("HTTP", conf.HTTPPathPrefix); err != nil {
+ return nil, err
+ }
+ if err := validatePrefix("WebSocket", conf.WSPathPrefix); err != nil {
+ return nil, err
+ }
+
+ // Configure RPC servers.
+ node.http = newHTTPServer(node.log, conf.HTTPTimeouts)
+ node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts)
+ node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
+ node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
+ node.ipc = newIPCServer(node.log, conf.IPCEndpoint())
+
+ return node, nil
+}
+
+// Start starts all registered lifecycles, RPC services and p2p networking.
+// Node can only be started once.
+func (n *Node) Start() error {
+ n.startStopLock.Lock()
+ defer n.startStopLock.Unlock()
+
+ n.lock.Lock()
+ switch n.state {
+ case runningState:
+ n.lock.Unlock()
+ return ErrNodeRunning
+ case closedState:
+ n.lock.Unlock()
+ return ErrNodeStopped
+ }
+ n.state = runningState
+ // open networking and RPC endpoints
+ err := n.openEndpoints()
+ lifecycles := make([]Lifecycle, len(n.lifecycles))
+ copy(lifecycles, n.lifecycles)
+ n.lock.Unlock()
+
+ // Check if networking startup failed.
+ if err != nil {
+ n.doClose(nil)
+ return err
}
- // Note: any interaction with Config that would create/touch files
- // in the data directory or instance directory is delayed until Start.
- return &Node{
- accman: am,
- ephemeralKeystore: ephemeralKeystore,
- config: conf,
- serviceFuncs: []ServiceConstructor{},
- ipcEndpoint: conf.IPCEndpoint(),
- httpEndpoint: conf.HTTPEndpoint(),
- wsEndpoint: conf.WSEndpoint(),
- eventmux: new(event.TypeMux),
- log: conf.Logger,
- }, nil
+ // Start all registered lifecycles.
+ var started []Lifecycle
+ for _, lifecycle := range lifecycles {
+ if err = lifecycle.Start(); err != nil {
+ break
+ }
+ started = append(started, lifecycle)
+ }
+ // Check if any lifecycle failed to start.
+ if err != nil {
+ n.stopServices(started)
+ n.doClose(nil)
+ }
+ return err
}
// Close stops the Node and releases resources acquired in
// Node constructor New.
func (n *Node) Close() error {
- var errs []error
+ n.startStopLock.Lock()
+ defer n.startStopLock.Unlock()
- // Terminate all subsystems and collect any errors
- if err := n.Stop(); err != nil && err != ErrNodeStopped {
- errs = append(errs, err)
+ n.lock.Lock()
+ state := n.state
+ n.lock.Unlock()
+ switch state {
+ case initializingState:
+ // The node was never started.
+ return n.doClose(nil)
+ case runningState:
+ // The node was started, release resources acquired by Start().
+ var errs []error
+ if err := n.stopServices(n.lifecycles); err != nil {
+ errs = append(errs, err)
+ }
+ return n.doClose(errs)
+ case closedState:
+ return ErrNodeStopped
+ default:
+ panic(fmt.Sprintf("node is in unknown state %d", state))
}
+}
+
+// doClose releases resources acquired by New(), collecting errors.
+func (n *Node) doClose(errs []error) error {
+ // Close databases. This needs the lock because it needs to
+ // synchronize with OpenDatabase*.
+ n.lock.Lock()
+ n.state = closedState
+ errs = append(errs, n.closeDatabases()...)
+ n.lock.Unlock()
+
+ // Disable Led
+ if n.config.Led {
+ if err := led.DisableLed(n.config.LedGPIOPort); err != nil {
+ return err
+ }
+ }
+
if err := n.accman.Close(); err != nil {
errs = append(errs, err)
}
- // Report any errors that might have occurred
+ if n.ephemKeystore != "" {
+ if err := os.RemoveAll(n.ephemKeystore); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Release instance directory lock.
+ n.closeDataDir()
+
+ // Unblock n.Wait.
+ close(n.stop)
+
+ // Report any errors that might have occurred.
switch len(errs) {
case 0:
return nil
@@ -146,121 +271,58 @@ func (n *Node) Close() error {
}
}
-// Register injects a new service into the node's stack. The service created by
-// the passed constructor must be unique in its type with regard to sibling ones.
-func (n *Node) Register(constructor ServiceConstructor) error {
- n.lock.Lock()
- defer n.lock.Unlock()
-
- if n.server != nil {
- return ErrNodeRunning
- }
- n.serviceFuncs = append(n.serviceFuncs, constructor)
- return nil
-}
-
-// Start creates a live P2P node and starts running it.
-func (n *Node) Start() error {
- n.lock.Lock()
- defer n.lock.Unlock()
-
- // Short circuit if the node's already running
- if n.server != nil {
- return ErrNodeRunning
+// openEndpoints starts all network and RPC endpoints.
+func (n *Node) openEndpoints() error {
+ // start networking endpoints
+ n.log.Info("Starting peer-to-peer node", "instance", n.server.Name)
+ if err := n.server.Start(); err != nil {
+ return convertFileLockError(err)
}
- if err := n.openDataDir(); err != nil {
- return err
+ // start RPC endpoints
+ err := n.startRPC()
+ if err != nil {
+ n.stopRPC()
+ n.server.Stop()
}
+ return err
+}
- // Initialize the p2p server. This creates the node key and
- // discovery databases.
- n.serverConfig = n.config.P2P
- n.serverConfig.PrivateKey = n.config.NodeKey()
- n.serverConfig.Name = n.config.NodeName()
- n.serverConfig.Logger = n.log
- if n.serverConfig.StaticNodes == nil {
- n.serverConfig.StaticNodes = n.config.StaticNodes()
- }
- if n.serverConfig.TrustedNodes == nil {
- n.serverConfig.TrustedNodes = n.config.TrustedNodes()
- }
- if n.serverConfig.NodeDatabase == "" {
- n.serverConfig.NodeDatabase = n.config.NodeDB()
- }
- running := &p2p.Server{Config: n.serverConfig}
- n.log.Info("Starting peer-to-peer node", "instance", n.serverConfig.Name)
-
- // Otherwise copy and specialize the P2P configuration
- services := make(map[reflect.Type]Service)
- for _, constructor := range n.serviceFuncs {
- // Create a new context for the particular service
- ctx := &ServiceContext{
- Config: *n.config,
- services: make(map[reflect.Type]Service),
- EventMux: n.eventmux,
- AccountManager: n.accman,
- }
- for kind, s := range services { // copy needed for threaded access
- ctx.services[kind] = s
+// containsLifecycle checks if 'lfs' contains 'l'.
+func containsLifecycle(lfs []Lifecycle, l Lifecycle) bool {
+ for _, obj := range lfs {
+ if obj == l {
+ return true
}
- // Construct and save the service
- service, err := constructor(ctx)
- if err != nil {
- return err
- }
- kind := reflect.TypeOf(service)
- if _, exists := services[kind]; exists {
- return &DuplicateServiceError{Kind: kind}
- }
- services[kind] = service
}
- // Gather the protocols and start the freshly assembled P2P server
- for _, service := range services {
- running.Protocols = append(running.Protocols, service.Protocols()...)
- }
- if err := running.Start(); err != nil {
- return convertFileLockError(err)
- }
- // Start each of the services
- var started []reflect.Type
- for kind, service := range services {
- // Start the next service, stopping all previous upon failure
- if err := service.Start(running); err != nil {
- for _, kind := range started {
- services[kind].Stop()
- }
- running.Stop()
+ return false
+}
- return err
+// stopServices terminates running services, RPC and p2p networking.
+// It is the inverse of Start.
+func (n *Node) stopServices(running []Lifecycle) error {
+ n.stopRPC()
+
+ // Stop running lifecycles in reverse order.
+ failure := &StopError{Services: make(map[reflect.Type]error)}
+ for i := len(running) - 1; i >= 0; i-- {
+ if err := running[i].Stop(); err != nil {
+ failure.Services[reflect.TypeOf(running[i])] = err
}
- // Mark the service started for potential cleanup
- started = append(started, kind)
}
- // Lastly, start the configured RPC interfaces
- if err := n.startRPC(services); err != nil {
- for _, service := range services {
- service.Stop()
- }
- running.Stop()
- return err
+
+ // Stop p2p networking.
+ n.server.Stop()
+
+ if len(failure.Services) > 0 {
+ return failure
}
- // Finish initializing the startup
- n.services = services
- n.server = running
- n.stop = make(chan struct{})
return nil
}
-// Config returns the configuration of node.
-func (n *Node) Config() *Config {
- return n.config
-}
-
func (n *Node) openDataDir() error {
if n.config.DataDir == "" {
return nil // ephemeral
}
-
instdir := filepath.Join(n.config.DataDir, n.config.name())
if err := os.MkdirAll(instdir, 0700); err != nil {
return err
@@ -271,299 +333,314 @@ func (n *Node) openDataDir() error {
if err != nil {
return convertFileLockError(err)
}
- n.instanceDirLock = release
+ n.dirLock = release
return nil
}
-// startRPC is a helper method to start all the various RPC endpoints during node
-// startup. It's not meant to be called at any time afterwards as it makes certain
-// assumptions about the state of the node.
-func (n *Node) startRPC(services map[reflect.Type]Service) error {
- // Gather all the possible APIs to surface
- apis := n.apis()
- for _, service := range services {
- apis = append(apis, service.APIs()...)
- }
- // Start the various API endpoints, terminating all in case of errors
- if err := n.startInProc(apis); err != nil {
- return err
- }
- if err := n.startIPC(apis); err != nil {
- n.stopInProc()
- return err
- }
- if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors, n.config.HTTPVirtualHosts, n.config.HTTPTimeouts); err != nil {
- n.stopIPC()
- n.stopInProc()
- return err
- }
- if err := n.startWS(n.wsEndpoint, apis, n.config.WSModules, n.config.WSOrigins, n.config.WSExposeAll); err != nil {
- n.stopHTTP()
- n.stopIPC()
- n.stopInProc()
- return err
+func (n *Node) closeDataDir() {
+ // Release instance directory lock.
+ if n.dirLock != nil {
+ if err := n.dirLock.Release(); err != nil {
+ n.log.Error("Can't release datadir lock", "err", err)
+ }
+ n.dirLock = nil
}
- // All API endpoints started successfully
- n.rpcAPIs = apis
- return nil
}
-// startInProc initializes an in-process RPC endpoint.
-func (n *Node) startInProc(apis []rpc.API) error {
- // Register all the APIs exposed by the services
- handler := rpc.NewServer()
- for _, api := range apis {
- if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
- return err
+// obtainJWTSecret loads the jwt-secret, either from the provided config,
+// or from the default location. If neither of those are present, it generates
+// a new secret and stores to the default location.
+func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) {
+ var fileName string
+ if len(cliParam) > 0 {
+ // If a plaintext secret was provided via cli flags, use that
+ jwtSecret := common.FromHex(cliParam)
+ if len(jwtSecret) == 32 && strings.HasPrefix(cliParam, "0x") {
+ log.Warn("Plaintext JWT secret provided, please consider passing via file")
+ return jwtSecret, nil
}
- n.log.Debug("InProc registered", "namespace", api.Namespace)
+ // path provided
+ fileName = cliParam
+ } else {
+ // no path provided, use default
+ fileName = n.ResolvePath(datadirJWTKey)
+ }
+ // try reading from file
+ log.Debug("Reading JWT secret", "path", fileName)
+ if data, err := os.ReadFile(fileName); err == nil {
+ jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
+ if len(jwtSecret) == 32 {
+ return jwtSecret, nil
+ }
+ log.Error("Invalid JWT secret", "path", fileName, "length", len(jwtSecret))
+ return nil, errors.New("invalid JWT secret")
+ }
+ // Need to generate one
+ jwtSecret := make([]byte, 32)
+ crand.Read(jwtSecret)
+ // if we're in --dev mode, don't bother saving, just show it
+ if fileName == "" {
+ log.Info("Generated ephemeral JWT secret", "secret", hexutil.Encode(jwtSecret))
+ return jwtSecret, nil
+ }
+ if err := os.WriteFile(fileName, []byte(hexutil.Encode(jwtSecret)), 0600); err != nil {
+ return nil, err
}
- n.inprocHandler = handler
- return nil
+ log.Info("Generated JWT secret", "path", fileName)
+ return jwtSecret, nil
}
-// stopInProc terminates the in-process RPC endpoint.
-func (n *Node) stopInProc() {
- if n.inprocHandler != nil {
- n.inprocHandler.Stop()
- n.inprocHandler = nil
+// startRPC is a helper method to configure all the various RPC endpoints during node
+// startup. It's not meant to be called at any time afterwards as it makes certain
+// assumptions about the state of the node.
+func (n *Node) startRPC() error {
+ if err := n.startInProc(); err != nil {
+ return err
}
-}
-// startIPC initializes and starts the IPC RPC endpoint.
-func (n *Node) startIPC(apis []rpc.API) error {
- if n.ipcEndpoint == "" {
- return nil // IPC disabled.
- }
- listener, handler, err := rpc.StartIPCEndpoint(n.ipcEndpoint, apis)
- if err != nil {
- return err
+ // Configure IPC.
+ if n.ipc.endpoint != "" {
+ if err := n.ipc.start(n.rpcAPIs); err != nil {
+ return err
+ }
}
- n.ipcListener = listener
- n.ipcHandler = handler
- n.log.Info("IPC endpoint opened", "url", n.ipcEndpoint)
- return nil
-}
-// stopIPC terminates the IPC RPC endpoint.
-func (n *Node) stopIPC() {
- if n.ipcListener != nil {
- n.ipcListener.Close()
- n.ipcListener = nil
+ var (
+ servers []*httpServer
+ open, all = n.GetAPIs()
+ )
- n.log.Info("IPC endpoint closed", "url", n.ipcEndpoint)
+ initHttp := func(server *httpServer, apis []rpc.API, port int) error {
+ if err := server.setListenAddr(n.config.HTTPHost, port); err != nil {
+ return err
+ }
+ if err := server.enableRPC(apis, httpConfig{
+ CorsAllowedOrigins: n.config.HTTPCors,
+ Vhosts: n.config.HTTPVirtualHosts,
+ Modules: n.config.HTTPModules,
+ prefix: n.config.HTTPPathPrefix,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ return nil
}
- if n.ipcHandler != nil {
- n.ipcHandler.Stop()
- n.ipcHandler = nil
+ initWS := func(apis []rpc.API, port int) error {
+ server := n.wsServerForPort(port, false)
+ if err := server.setListenAddr(n.config.WSHost, port); err != nil {
+ return err
+ }
+ if err := server.enableWS(n.rpcAPIs, wsConfig{
+ Modules: n.config.WSModules,
+ Origins: n.config.WSOrigins,
+ prefix: n.config.WSPathPrefix,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ return nil
}
-}
-// startHTTP initializes and starts the HTTP RPC endpoint.
-func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) error {
- // Short circuit if the HTTP endpoint isn't being exposed
- if endpoint == "" {
+ initAuth := func(apis []rpc.API, port int, secret []byte) error {
+ // Enable auth via HTTP
+ server := n.httpAuth
+ if err := server.setListenAddr(DefaultAuthHost, port); err != nil {
+ return err
+ }
+ if err := server.enableRPC(apis, httpConfig{
+ CorsAllowedOrigins: DefaultAuthCors,
+ Vhosts: DefaultAuthVhosts,
+ Modules: DefaultAuthModules,
+ prefix: DefaultAuthPrefix,
+ jwtSecret: secret,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
+ // Enable auth via WS
+ server = n.wsServerForPort(port, true)
+ if err := server.setListenAddr(DefaultAuthHost, port); err != nil {
+ return err
+ }
+ if err := server.enableWS(apis, wsConfig{
+ Modules: DefaultAuthModules,
+ Origins: DefaultAuthOrigins,
+ prefix: DefaultAuthPrefix,
+ jwtSecret: secret,
+ }); err != nil {
+ return err
+ }
+ servers = append(servers, server)
return nil
}
- listener, handler, err := rpc.StartHTTPEndpoint(endpoint, apis, modules, cors, vhosts, timeouts)
- if err != nil {
- return err
+ // Set up HTTP.
+ if n.config.HTTPHost != "" {
+ // Configure legacy unauthenticated HTTP.
+ if err := initHttp(n.http, open, n.config.HTTPPort); err != nil {
+ return err
+ }
}
- n.log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", listener.Addr()),
- "cors", strings.Join(cors, ","),
- "vhosts", strings.Join(vhosts, ","))
- // All listeners booted successfully
- n.httpEndpoint = endpoint
- n.httpListener = listener
- n.httpHandler = handler
+ // Configure WebSocket.
+ if n.config.WSHost != "" {
+ // legacy unauthenticated
+ if err := initWS(open, n.config.WSPort); err != nil {
+ return err
+ }
+ }
+ // Configure authenticated API
+ if len(open) != len(all) {
+ jwtSecret, err := n.obtainJWTSecret(n.config.JWTSecret)
+ if err != nil {
+ return err
+ }
+ if err := initAuth(all, n.config.AuthPort, jwtSecret); err != nil {
+ return err
+ }
+ }
+ // Start the servers
+ for _, server := range servers {
+ if err := server.start(); err != nil {
+ return err
+ }
+ }
return nil
}
-// stopHTTP terminates the HTTP RPC endpoint.
-func (n *Node) stopHTTP() {
- if n.httpListener != nil {
- url := fmt.Sprintf("http://%v/", n.httpListener.Addr())
- n.httpListener.Close()
- n.httpListener = nil
- n.log.Info("HTTP endpoint closed", "url", url)
+func (n *Node) wsServerForPort(port int, authenticated bool) *httpServer {
+ httpServer, wsServer := n.http, n.ws
+ if authenticated {
+ httpServer, wsServer = n.httpAuth, n.wsAuth
}
- if n.httpHandler != nil {
- n.httpHandler.Stop()
- n.httpHandler = nil
+ if n.config.HTTPHost == "" || httpServer.port == port {
+ return httpServer
}
+ return wsServer
}
-// startWS initializes and starts the websocket RPC endpoint.
-func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrigins []string, exposeAll bool) error {
- // Short circuit if the WS endpoint isn't being exposed
- if endpoint == "" {
- return nil
- }
- listener, handler, err := rpc.StartWSEndpoint(endpoint, apis, modules, wsOrigins, exposeAll)
- if err != nil {
- return err
- }
- n.log.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%s", listener.Addr()))
- // All listeners booted successfully
- n.wsEndpoint = endpoint
- n.wsListener = listener
- n.wsHandler = handler
+func (n *Node) stopRPC() {
+ n.http.stop()
+ n.ws.stop()
+ n.httpAuth.stop()
+ n.wsAuth.stop()
+ n.ipc.stop()
+ n.stopInProc()
+}
+// startInProc registers all RPC APIs on the inproc server.
+func (n *Node) startInProc() error {
+ for _, api := range n.rpcAPIs {
+ if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil {
+ return err
+ }
+ }
return nil
}
-// stopWS terminates the websocket RPC endpoint.
-func (n *Node) stopWS() {
- if n.wsListener != nil {
- n.wsListener.Close()
- n.wsListener = nil
+// stopInProc terminates the in-process RPC endpoint.
+func (n *Node) stopInProc() {
+ n.inprocHandler.Stop()
+}
- n.log.Info("WebSocket endpoint closed", "url", fmt.Sprintf("ws://%s", n.wsEndpoint))
- }
- if n.wsHandler != nil {
- n.wsHandler.Stop()
- n.wsHandler = nil
- }
+// Wait blocks until the node is closed.
+func (n *Node) Wait() {
+ <-n.stop
}
-// Stop terminates a running node along with all it's services. In the node was
-// not started, an error is returned.
-func (n *Node) Stop() error {
+// RegisterLifecycle registers the given Lifecycle on the node.
+func (n *Node) RegisterLifecycle(lifecycle Lifecycle) {
n.lock.Lock()
defer n.lock.Unlock()
- // Short circuit if the node's not running
- if n.server == nil {
- return ErrNodeStopped
+ if n.state != initializingState {
+ panic("can't register lifecycle on running/stopped node")
}
-
- // Terminate the API, services and the p2p server.
- n.stopWS()
- n.stopHTTP()
- n.stopIPC()
- n.rpcAPIs = nil
- failure := &StopError{
- Services: make(map[reflect.Type]error),
- }
- for kind, service := range n.services {
- if err := service.Stop(); err != nil {
- failure.Services[kind] = err
- }
- }
- n.server.Stop()
- n.services = nil
- n.server = nil
-
- // Release instance directory lock.
- if n.instanceDirLock != nil {
- if err := n.instanceDirLock.Release(); err != nil {
- n.log.Error("Can't release datadir lock", "err", err)
- }
- n.instanceDirLock = nil
+ if containsLifecycle(n.lifecycles, lifecycle) {
+ panic(fmt.Sprintf("attempt to register lifecycle %T more than once", lifecycle))
}
+ n.lifecycles = append(n.lifecycles, lifecycle)
+}
- // unblock n.Wait
- close(n.stop)
+// RegisterProtocols adds backend's protocols to the node's p2p server.
+func (n *Node) RegisterProtocols(protocols []p2p.Protocol) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
- // Disable Led
- if n.config.Led {
- if err := led.DisableLed(n.config.LedGPIOPort); err != nil {
- return err
- }
+ if n.state != initializingState {
+ panic("can't register protocols on running/stopped node")
}
+ n.server.Protocols = append(n.server.Protocols, protocols...)
+}
- // Remove the keystore if it was created ephemerally.
- var keystoreErr error
- if n.ephemeralKeystore != "" {
- keystoreErr = os.RemoveAll(n.ephemeralKeystore)
- }
+// RegisterAPIs registers the APIs a service provides on the node.
+func (n *Node) RegisterAPIs(apis []rpc.API) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
- if len(failure.Services) > 0 {
- return failure
+ if n.state != initializingState {
+ panic("can't register APIs on running/stopped node")
}
- if keystoreErr != nil {
- return keystoreErr
- }
- return nil
+ n.rpcAPIs = append(n.rpcAPIs, apis...)
}
-// Wait blocks the thread until the node is stopped. If the node is not running
-// at the time of invocation, the method immediately returns.
-func (n *Node) Wait() {
- n.lock.RLock()
- if n.server == nil {
- n.lock.RUnlock()
- return
+// GetAPIs return two sets of APIs, both the ones that do not require
+// authentication, and the complete set
+func (n *Node) GetAPIs() (unauthenticated, all []rpc.API) {
+ for _, api := range n.rpcAPIs {
+ if !api.Authenticated {
+ unauthenticated = append(unauthenticated, api)
+ }
}
- stop := n.stop
- n.lock.RUnlock()
-
- <-stop
+ return unauthenticated, n.rpcAPIs
}
-// Restart terminates a running node and boots up a new one in its place. If the
-// node isn't running, an error is returned.
-func (n *Node) Restart() error {
- if err := n.Stop(); err != nil {
- return err
- }
- if err := n.Start(); err != nil {
- return err
+// RegisterHandler mounts a handler on the given path on the canonical HTTP server.
+//
+// The name of the handler is shown in a log message when the HTTP server starts
+// and should be a descriptive term for the service provided by the handler.
+func (n *Node) RegisterHandler(name, path string, handler http.Handler) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ if n.state != initializingState {
+ panic("can't register HTTP handler on running/stopped node")
}
- return nil
+
+ n.http.mux.Handle(path, handler)
+ n.http.handlerNames[path] = name
}
// Attach creates an RPC client attached to an in-process API handler.
func (n *Node) Attach() (*rpc.Client, error) {
- n.lock.RLock()
- defer n.lock.RUnlock()
-
- if n.server == nil {
- return nil, ErrNodeStopped
- }
return rpc.DialInProc(n.inprocHandler), nil
}
// RPCHandler returns the in-process RPC request handler.
func (n *Node) RPCHandler() (*rpc.Server, error) {
- n.lock.RLock()
- defer n.lock.RUnlock()
+ n.lock.Lock()
+ defer n.lock.Unlock()
- if n.inprocHandler == nil {
+ if n.state == closedState {
return nil, ErrNodeStopped
}
return n.inprocHandler, nil
}
+// Config returns the configuration of node.
+func (n *Node) Config() *Config {
+ return n.config
+}
+
// Server retrieves the currently running P2P network layer. This method is meant
-// only to inspect fields of the currently running server, life cycle management
-// should be left to this Node entity.
+// only to inspect fields of the currently running server. Callers should not
+// start or stop the returned server.
func (n *Node) Server() *p2p.Server {
- n.lock.RLock()
- defer n.lock.RUnlock()
+ n.lock.Lock()
+ defer n.lock.Unlock()
return n.server
}
-// Service retrieves a currently running service registered of a specific type.
-func (n *Node) Service(service interface{}) error {
- n.lock.RLock()
- defer n.lock.RUnlock()
-
- // Short circuit if the node's not running
- if n.server == nil {
- return ErrNodeStopped
- }
- // Otherwise try to find the service to return
- element := reflect.ValueOf(service).Elem()
- if running, ok := n.services[element.Type()]; ok {
- element.Set(reflect.ValueOf(running))
- return nil
- }
- return ErrServiceUnknown
-}
-
// DataDir retrieves the current datadir used by the protocol stack.
// Deprecated: No files should be stored in this directory, use InstanceDir instead.
func (n *Node) DataDir() string {
@@ -582,29 +659,21 @@ func (n *Node) AccountManager() *accounts.Manager {
// IPCEndpoint retrieves the current IPC endpoint used by the protocol stack.
func (n *Node) IPCEndpoint() string {
- return n.ipcEndpoint
+ return n.ipc.endpoint
}
-// HTTPEndpoint retrieves the current HTTP endpoint used by the protocol stack.
+// HTTPEndpoint returns the URL of the HTTP server. Note that this URL does not
+// contain the JSON-RPC path prefix set by HTTPPathPrefix.
func (n *Node) HTTPEndpoint() string {
- n.lock.Lock()
- defer n.lock.Unlock()
-
- if n.httpListener != nil {
- return n.httpListener.Addr().String()
- }
- return n.httpEndpoint
+ return "http://" + n.http.listenAddr()
}
-// WSEndpoint retrieves the current WS endpoint used by the protocol stack.
+// WSEndpoint returns the current JSON-RPC over WebSocket endpoint.
func (n *Node) WSEndpoint() string {
- n.lock.Lock()
- defer n.lock.Unlock()
-
- if n.wsListener != nil {
- return n.wsListener.Addr().String()
+ if n.http.wsAllowed() {
+ return "ws://" + n.http.listenAddr() + n.http.wsConfig.prefix
}
- return n.wsEndpoint
+ return "ws://" + n.ws.listenAddr() + n.ws.wsConfig.prefix
}
// EventMux retrieves the event multiplexer used by all the network services in
@@ -617,10 +686,24 @@ func (n *Node) EventMux() *event.TypeMux {
// previous can be found) from within the node's instance directory. If the node is
// ephemeral, a memory database is returned.
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) (xcbdb.Database, error) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ if n.state == closedState {
+ return nil, ErrNodeStopped
+ }
+
+ var db xcbdb.Database
+ var err error
if n.config.DataDir == "" {
- return rawdb.NewMemoryDatabase(), nil
+ db = rawdb.NewMemoryDatabase()
+ } else {
+ db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace)
}
- return rawdb.NewLevelDBDatabase(n.config.ResolvePath(name), cache, handles, namespace)
+
+ if err == nil {
+ db = n.wrapDatabase(db)
+ }
+ return db, err
}
// OpenDatabaseWithFreezer opens an existing database with the given name (or
@@ -629,18 +712,31 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) (
// database to immutable append-only files. If the node is an ephemeral one, a
// memory database is returned.
func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string) (xcbdb.Database, error) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ if n.state == closedState {
+ return nil, ErrNodeStopped
+ }
+
+ var db xcbdb.Database
+ var err error
if n.config.DataDir == "" {
- return rawdb.NewMemoryDatabase(), nil
+ db = rawdb.NewMemoryDatabase()
+ } else {
+ root := n.ResolvePath(name)
+ switch {
+ case freezer == "":
+ freezer = filepath.Join(root, "ancient")
+ case !filepath.IsAbs(freezer):
+ freezer = n.ResolvePath(freezer)
+ }
+ db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace)
}
- root := n.config.ResolvePath(name)
- switch {
- case freezer == "":
- freezer = filepath.Join(root, "ancient")
- case !filepath.IsAbs(freezer):
- freezer = n.config.ResolvePath(freezer)
+ if err == nil {
+ db = n.wrapDatabase(db)
}
- return rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace)
+ return db, err
}
// ResolvePath returns the absolute path of a resource in the instance directory.
@@ -648,27 +744,35 @@ func (n *Node) ResolvePath(x string) string {
return n.config.ResolvePath(x)
}
-// apis returns the collection of RPC descriptors this node offers.
-func (n *Node) apis() []rpc.API {
- return []rpc.API{
- {
- Namespace: "admin",
- Version: "1.0",
- Service: NewPrivateAdminAPI(n),
- }, {
- Namespace: "admin",
- Version: "1.0",
- Service: NewPublicAdminAPI(n),
- Public: true,
- }, {
- Namespace: "debug",
- Version: "1.0",
- Service: debug.Handler,
- }, {
- Namespace: "web3",
- Version: "1.0",
- Service: NewPublicWeb3API(n),
- Public: true,
- },
+// closeTrackingDB wraps the Close method of a database. When the database is closed by the
+// service, the wrapper removes it from the node's database map. This ensures that Node
+// won't auto-close the database if it is closed by the service that opened it.
+type closeTrackingDB struct {
+ xcbdb.Database
+ n *Node
+}
+
+func (db *closeTrackingDB) Close() error {
+ db.n.lock.Lock()
+ delete(db.n.databases, db)
+ db.n.lock.Unlock()
+ return db.Database.Close()
+}
+
+// wrapDatabase ensures the database will be auto-closed when Node is closed.
+func (n *Node) wrapDatabase(db xcbdb.Database) xcbdb.Database {
+ wrapper := &closeTrackingDB{db, n}
+ n.databases[wrapper] = struct{}{}
+ return wrapper
+}
+
+// closeDatabases closes all open databases.
+func (n *Node) closeDatabases() (errors []error) {
+ for db := range n.databases {
+ delete(n.databases, db)
+ if err := db.Database.Close(); err != nil {
+ errors = append(errors, err)
+ }
}
+ return errors
}
diff --git a/node/node_example_test.go b/node/node_example_test.go
index 79d4fee61..c71ed8091 100644
--- a/node/node_example_test.go
+++ b/node/node_example_test.go
@@ -21,24 +21,18 @@ import (
"log"
"github.com/core-coin/go-core/node"
- "github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/rpc"
)
-// SampleService is a trivial network service that can be attached to a node for
+// SampleLifecycle is a trivial network service that can be attached to a node for
// life cycle management.
//
-// The following methods are needed to implement a node.Service:
-// - Protocols() []p2p.Protocol - devp2p protocols the service can communicate on
-// - APIs() []rpc.API - api methods the service wants to expose on rpc channels
+// The following methods are needed to implement a node.Lifecycle:
// - Start() error - method invoked when the node is ready to start the service
// - Stop() error - method invoked when the node terminates the service
-type SampleService struct{}
+type SampleLifecycle struct{}
-func (s *SampleService) Protocols() []p2p.Protocol { return nil }
-func (s *SampleService) APIs() []rpc.API { return nil }
-func (s *SampleService) Start(*p2p.Server) error { fmt.Println("Service starting..."); return nil }
-func (s *SampleService) Stop() error { fmt.Println("Service stopping..."); return nil }
+func (s *SampleLifecycle) Start() error { fmt.Println("Service starting..."); return nil }
+func (s *SampleLifecycle) Stop() error { fmt.Println("Service stopping..."); return nil }
func ExampleService() {
// Create a network node to run protocols with the default values.
@@ -48,29 +42,17 @@ func ExampleService() {
}
defer stack.Close()
- // Create and register a simple network service. This is done through the definition
- // of a node.ServiceConstructor that will instantiate a node.Service. The reason for
- // the factory method approach is to support service restarts without relying on the
- // individual implementations' support for such operations.
- constructor := func(context *node.ServiceContext) (node.Service, error) {
- return new(SampleService), nil
- }
- if err := stack.Register(constructor); err != nil {
- log.Fatalf("Failed to register service: %v", err)
- }
+ // Create and register a simple network Lifecycle.
+ service := new(SampleLifecycle)
+ stack.RegisterLifecycle(service)
// Boot up the entire protocol stack, do a restart and terminate
if err := stack.Start(); err != nil {
log.Fatalf("Failed to start the protocol stack: %v", err)
}
- if err := stack.Restart(); err != nil {
- log.Fatalf("Failed to restart the protocol stack: %v", err)
- }
- if err := stack.Stop(); err != nil {
+ if err := stack.Close(); err != nil {
log.Fatalf("Failed to stop the protocol stack: %v", err)
}
// Output:
// Service starting...
// Service stopping...
- // Service starting...
- // Service stopping...
}
diff --git a/node/node_test.go b/node/node_test.go
index 2bcc28bbc..0b90a1264 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -19,15 +19,20 @@ package node
import (
"crypto/rand"
"errors"
+ "fmt"
+ "github.com/core-coin/go-core/crypto"
+ "github.com/core-coin/go-core/p2p"
+ "github.com/core-coin/go-core/rpc"
+ "github.com/core-coin/go-core/xcbdb"
+ "github.com/stretchr/testify/assert"
+ "io"
"io/ioutil"
+ "net"
+ "net/http"
"os"
"reflect"
+ "strings"
"testing"
- "time"
-
- "github.com/core-coin/go-core/crypto"
- "github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/rpc"
)
var (
@@ -41,20 +46,27 @@ func testNodeConfig() *Config {
}
}
-// Tests that an empty protocol stack can be started, restarted and stopped.
-func TestNodeLifeCycle(t *testing.T) {
+// Tests that an empty protocol stack can be closed more than once.
+func TestNodeCloseMultipleTimes(t *testing.T) {
stack, err := New(testNodeConfig())
if err != nil {
t.Fatalf("failed to create protocol stack: %v", err)
}
- defer stack.Close()
+ stack.Close()
// Ensure that a stopped node can be stopped again
for i := 0; i < 3; i++ {
- if err := stack.Stop(); err != ErrNodeStopped {
+ if err := stack.Close(); err != ErrNodeStopped {
t.Fatalf("iter %d: stop failure mismatch: have %v, want %v", i, err, ErrNodeStopped)
}
}
+}
+
+func TestNodeStartMultipleTimes(t *testing.T) {
+ stack, err := New(testNodeConfig())
+ if err != nil {
+ t.Fatalf("failed to create protocol stack: %v", err)
+ }
// Ensure that a node can be successfully started, but only once
if err := stack.Start(); err != nil {
t.Fatalf("failed to start node: %v", err)
@@ -62,17 +74,12 @@ func TestNodeLifeCycle(t *testing.T) {
if err := stack.Start(); err != ErrNodeRunning {
t.Fatalf("start failure mismatch: have %v, want %v ", err, ErrNodeRunning)
}
- // Ensure that a node can be restarted arbitrarily many times
- for i := 0; i < 3; i++ {
- if err := stack.Restart(); err != nil {
- t.Fatalf("iter %d: failed to restart node: %v", i, err)
- }
- }
+
// Ensure that a node can be stopped, but only once
- if err := stack.Stop(); err != nil {
+ if err := stack.Close(); err != nil {
t.Fatalf("failed to stop node: %v", err)
}
- if err := stack.Stop(); err != ErrNodeStopped {
+ if err := stack.Close(); err != ErrNodeStopped {
t.Fatalf("stop failure mismatch: have %v, want %v ", err, ErrNodeStopped)
}
}
@@ -96,88 +103,149 @@ func TestNodeUsedDataDir(t *testing.T) {
if err := original.Start(); err != nil {
t.Fatalf("failed to start original protocol stack: %v", err)
}
- defer original.Stop()
// Create a second node based on the same data directory and ensure failure
- duplicate, err := New(&Config{DataDir: dir})
- if err != nil {
- t.Fatalf("failed to create duplicate protocol stack: %v", err)
- }
- defer duplicate.Close()
-
- if err := duplicate.Start(); err != ErrDatadirUsed {
+ _, err = New(&Config{DataDir: dir})
+ if err != ErrDatadirUsed {
t.Fatalf("duplicate datadir failure mismatch: have %v, want %v", err, ErrDatadirUsed)
}
}
-// Tests whether services can be registered and duplicates caught.
-func TestServiceRegistry(t *testing.T) {
+// Tests whether a Lifecycle can be registered.
+func TestLifecycleRegistry_Successful(t *testing.T) {
stack, err := New(testNodeConfig())
if err != nil {
t.Fatalf("failed to create protocol stack: %v", err)
}
defer stack.Close()
- // Register a batch of unique services and ensure they start successfully
- services := []ServiceConstructor{NewNoopServiceA, NewNoopServiceB, NewNoopServiceC}
- for i, constructor := range services {
- if err := stack.Register(constructor); err != nil {
- t.Fatalf("service #%d: registration failed: %v", i, err)
- }
+ noop := NewNoop()
+ stack.RegisterLifecycle(noop)
+
+ if !containsLifecycle(stack.lifecycles, noop) {
+ t.Fatalf("lifecycle was not properly registered on the node, %v", err)
}
- if err := stack.Start(); err != nil {
- t.Fatalf("failed to start original service stack: %v", err)
+}
+
+// Tests whether a service's protocols can be registered properly on the node's p2p server.
+func TestRegisterProtocols(t *testing.T) {
+ stack, err := New(testNodeConfig())
+ if err != nil {
+ t.Fatalf("failed to create protocol stack: %v", err)
}
- if err := stack.Stop(); err != nil {
- t.Fatalf("failed to stop original service stack: %v", err)
+ defer stack.Close()
+
+ fs, err := NewFullService(stack)
+ if err != nil {
+ t.Fatalf("could not create full service: %v", err)
}
- // Duplicate one of the services and retry starting the node
- if err := stack.Register(NewNoopServiceB); err != nil {
- t.Fatalf("duplicate registration failed: %v", err)
+
+ for _, protocol := range fs.Protocols() {
+ if !containsProtocol(stack.server.Protocols, protocol) {
+ t.Fatalf("protocol %v was not successfully registered", protocol)
+ }
}
- if err := stack.Start(); err == nil {
- t.Fatalf("duplicate service started")
- } else {
- if _, ok := err.(*DuplicateServiceError); !ok {
- t.Fatalf("duplicate error mismatch: have %v, want %v", err, DuplicateServiceError{})
+
+ for _, api := range fs.APIs() {
+ if !containsAPI(stack.rpcAPIs, api) {
+ t.Fatalf("api %v was not successfully registered", api)
}
}
}
-// Tests that registered services get started and stopped correctly.
-func TestServiceLifeCycle(t *testing.T) {
- stack, err := New(testNodeConfig())
+// This test checks that open databases are closed with node.
+func TestNodeCloseClosesDB(t *testing.T) {
+ stack, _ := New(testNodeConfig())
+ defer stack.Close()
+
+ db, err := stack.OpenDatabase("mydb", 0, 0, "")
if err != nil {
- t.Fatalf("failed to create protocol stack: %v", err)
+ t.Fatal("can't open DB:", err)
+ }
+ if err = db.Put([]byte{}, []byte{}); err != nil {
+ t.Fatal("can't Put on open DB:", err)
}
- defer stack.Close()
- // Register a batch of life-cycle instrumented services
- services := map[string]InstrumentingWrapper{
- "A": InstrumentedServiceMakerA,
- "B": InstrumentedServiceMakerB,
- "C": InstrumentedServiceMakerC,
+ stack.Close()
+ if err = db.Put([]byte{}, []byte{}); err == nil {
+ t.Fatal("Put succeeded after node is closed")
}
+}
+
+// This test checks that OpenDatabase can be used from within a Lifecycle Start method.
+func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) {
+ stack, _ := New(testNodeConfig())
+ defer stack.Close()
+
+ var db xcbdb.Database
+ var err error
+ stack.RegisterLifecycle(&InstrumentedService{
+ startHook: func() {
+ db, err = stack.OpenDatabase("mydb", 0, 0, "")
+ if err != nil {
+ t.Fatal("can't open DB:", err)
+ }
+ },
+ stopHook: func() {
+ db.Close()
+ },
+ })
+
+ stack.Start()
+ stack.Close()
+}
+
+// This test checks that OpenDatabase can be used from within a Lifecycle Stop method.
+func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) {
+ stack, _ := New(testNodeConfig())
+ defer stack.Close()
+
+ stack.RegisterLifecycle(&InstrumentedService{
+ stopHook: func() {
+ db, err := stack.OpenDatabase("mydb", 0, 0, "")
+ if err != nil {
+ t.Fatal("can't open DB:", err)
+ }
+ db.Close()
+ },
+ })
+
+ stack.Start()
+ stack.Close()
+}
+
+// Tests that registered Lifecycles get started and stopped correctly.
+func TestLifecycleLifeCycle(t *testing.T) {
+ stack, _ := New(testNodeConfig())
+ defer stack.Close()
+
started := make(map[string]bool)
stopped := make(map[string]bool)
- for id, maker := range services {
- id := id // Closure for the constructor
- constructor := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{
- startHook: func(*p2p.Server) { started[id] = true },
- stopHook: func() { stopped[id] = true },
- }, nil
- }
- if err := stack.Register(maker(constructor)); err != nil {
- t.Fatalf("service %s: registration failed: %v", id, err)
- }
+ // Create a batch of instrumented services
+ lifecycles := map[string]Lifecycle{
+ "A": &InstrumentedService{
+ startHook: func() { started["A"] = true },
+ stopHook: func() { stopped["A"] = true },
+ },
+ "B": &InstrumentedService{
+ startHook: func() { started["B"] = true },
+ stopHook: func() { stopped["B"] = true },
+ },
+ "C": &InstrumentedService{
+ startHook: func() { started["C"] = true },
+ stopHook: func() { stopped["C"] = true },
+ },
+ }
+ // register lifecycles on node
+ for _, lifecycle := range lifecycles {
+ stack.RegisterLifecycle(lifecycle)
}
// Start the node and check that all services are running
if err := stack.Start(); err != nil {
t.Fatalf("failed to start protocol stack: %v", err)
}
- for id := range services {
+ for id := range lifecycles {
if !started[id] {
t.Fatalf("service %s: freshly started service not running", id)
}
@@ -186,415 +254,391 @@ func TestServiceLifeCycle(t *testing.T) {
}
}
// Stop the node and check that all services have been stopped
- if err := stack.Stop(); err != nil {
+ if err := stack.Close(); err != nil {
t.Fatalf("failed to stop protocol stack: %v", err)
}
- for id := range services {
+ for id := range lifecycles {
if !stopped[id] {
t.Fatalf("service %s: freshly terminated service still running", id)
}
}
}
-// Tests that services are restarted cleanly as new instances.
-func TestServiceRestarts(t *testing.T) {
+// Tests that if a Lifecycle fails to start, all others started before it will be
+// shut down.
+func TestLifecycleStartupError(t *testing.T) {
stack, err := New(testNodeConfig())
if err != nil {
t.Fatalf("failed to create protocol stack: %v", err)
}
defer stack.Close()
- // Define a service that does not support restarts
- var (
- running bool
- started int
- )
- constructor := func(*ServiceContext) (Service, error) {
- running = false
-
- return &InstrumentedService{
- startHook: func(*p2p.Server) {
- if running {
- panic("already running")
- }
- running = true
- started++
- },
- }, nil
- }
- // Register the service and start the protocol stack
- if err := stack.Register(constructor); err != nil {
- t.Fatalf("failed to register the service: %v", err)
- }
- if err := stack.Start(); err != nil {
- t.Fatalf("failed to start protocol stack: %v", err)
+ started := make(map[string]bool)
+ stopped := make(map[string]bool)
+
+ // Create a batch of instrumented services
+ lifecycles := map[string]Lifecycle{
+ "A": &InstrumentedService{
+ startHook: func() { started["A"] = true },
+ stopHook: func() { stopped["A"] = true },
+ },
+ "B": &InstrumentedService{
+ startHook: func() { started["B"] = true },
+ stopHook: func() { stopped["B"] = true },
+ },
+ "C": &InstrumentedService{
+ startHook: func() { started["C"] = true },
+ stopHook: func() { stopped["C"] = true },
+ },
+ }
+ // register lifecycles on node
+ for _, lifecycle := range lifecycles {
+ stack.RegisterLifecycle(lifecycle)
}
- defer stack.Stop()
- if !running || started != 1 {
- t.Fatalf("running/started mismatch: have %v/%d, want true/1", running, started)
+ // Register a service that fails to construct itself
+ failure := errors.New("fail")
+ failer := &InstrumentedService{start: failure}
+ stack.RegisterLifecycle(failer)
+
+ // Start the protocol stack and ensure all started services stop
+ if err := stack.Start(); err != failure {
+ t.Fatalf("stack startup failure mismatch: have %v, want %v", err, failure)
}
- // Restart the stack a few times and check successful service restarts
- for i := 0; i < 3; i++ {
- if err := stack.Restart(); err != nil {
- t.Fatalf("iter %d: failed to restart stack: %v", i, err)
+ for id := range lifecycles {
+ if started[id] && !stopped[id] {
+ t.Fatalf("service %s: started but not stopped", id)
}
- }
- if !running || started != 4 {
- t.Fatalf("running/started mismatch: have %v/%d, want true/4", running, started)
+ delete(started, id)
+ delete(stopped, id)
}
}
-// Tests that if a service fails to initialize itself, none of the other services
-// will be allowed to even start.
-func TestServiceConstructionAbortion(t *testing.T) {
+// Tests that even if a registered Lifecycle fails to shut down cleanly, it does
+// not influence the rest of the shutdown invocations.
+func TestLifecycleTerminationGuarantee(t *testing.T) {
stack, err := New(testNodeConfig())
if err != nil {
t.Fatalf("failed to create protocol stack: %v", err)
}
defer stack.Close()
- // Define a batch of good services
- services := map[string]InstrumentingWrapper{
- "A": InstrumentedServiceMakerA,
- "B": InstrumentedServiceMakerB,
- "C": InstrumentedServiceMakerC,
- }
started := make(map[string]bool)
- for id, maker := range services {
- id := id // Closure for the constructor
- constructor := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{
- startHook: func(*p2p.Server) { started[id] = true },
- }, nil
- }
- if err := stack.Register(maker(constructor)); err != nil {
- t.Fatalf("service %s: registration failed: %v", id, err)
- }
+ stopped := make(map[string]bool)
+
+ // Create a batch of instrumented services
+ lifecycles := map[string]Lifecycle{
+ "A": &InstrumentedService{
+ startHook: func() { started["A"] = true },
+ stopHook: func() { stopped["A"] = true },
+ },
+ "B": &InstrumentedService{
+ startHook: func() { started["B"] = true },
+ stopHook: func() { stopped["B"] = true },
+ },
+ "C": &InstrumentedService{
+ startHook: func() { started["C"] = true },
+ stopHook: func() { stopped["C"] = true },
+ },
+ }
+ // register lifecycles on node
+ for _, lifecycle := range lifecycles {
+ stack.RegisterLifecycle(lifecycle)
}
- // Register a service that fails to construct itself
+
+ // Register a service that fails to shot down cleanly
failure := errors.New("fail")
- failer := func(*ServiceContext) (Service, error) {
- return nil, failure
+ failer := &InstrumentedService{stop: failure}
+ stack.RegisterLifecycle(failer)
+
+ // Start the protocol stack, and ensure that a failing shut down terminates all
+ // Start the stack and make sure all is online
+ if err := stack.Start(); err != nil {
+ t.Fatalf("failed to start protocol stack: %v", err)
}
- if err := stack.Register(failer); err != nil {
- t.Fatalf("failer registration failed: %v", err)
+ for id := range lifecycles {
+ if !started[id] {
+ t.Fatalf("service %s: service not running", id)
+ }
+ if stopped[id] {
+ t.Fatalf("service %s: service already stopped", id)
+ }
}
- // Start the protocol stack and ensure none of the services get started
- for i := 0; i < 100; i++ {
- if err := stack.Start(); err != failure {
- t.Fatalf("iter %d: stack startup failure mismatch: have %v, want %v", i, err, failure)
+ // Stop the stack, verify failure and check all terminations
+ err = stack.Close()
+ if err, ok := err.(*StopError); !ok {
+ t.Fatalf("termination failure mismatch: have %v, want StopError", err)
+ } else {
+ failer := reflect.TypeOf(&InstrumentedService{})
+ if err.Services[failer] != failure {
+ t.Fatalf("failer termination failure mismatch: have %v, want %v", err.Services[failer], failure)
}
- for id := range services {
- if started[id] {
- t.Fatalf("service %s: started should not have", id)
- }
- delete(started, id)
+ if len(err.Services) != 1 {
+ t.Fatalf("failure count mismatch: have %d, want %d", len(err.Services), 1)
+ }
+ }
+ for id := range lifecycles {
+ if !stopped[id] {
+ t.Fatalf("service %s: service not terminated", id)
}
+ delete(started, id)
+ delete(stopped, id)
}
+
+ stack.server = &p2p.Server{}
+ stack.server.PrivateKey = testNodeKey
}
-// Tests that if a service fails to start, all others started before it will be
-// shut down.
-func TestServiceStartupAbortion(t *testing.T) {
- stack, err := New(testNodeConfig())
+// Tests whether a handler can be successfully mounted on the canonical HTTP server
+// on the given prefix
+func TestRegisterHandler_Successful(t *testing.T) {
+ node := createNode(t, 7878, 7979)
+ defer node.Close()
+ // create and mount handler
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("success"))
+ })
+ node.RegisterHandler("test", "/test", handler)
+
+ // start node
+ if err := node.Start(); err != nil {
+ t.Fatalf("could not start node: %v", err)
+ }
+
+ // create HTTP request
+ httpReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:7878/test", nil)
if err != nil {
- t.Fatalf("failed to create protocol stack: %v", err)
+ t.Error("could not issue new http request ", err)
}
- defer stack.Close()
- // Register a batch of good services
- services := map[string]InstrumentingWrapper{
- "A": InstrumentedServiceMakerA,
- "B": InstrumentedServiceMakerB,
- "C": InstrumentedServiceMakerC,
+ // check response
+ resp := doHTTPRequest(t, httpReq)
+ buf := make([]byte, 7)
+ _, err = io.ReadFull(resp.Body, buf)
+ if err != nil {
+ t.Fatalf("could not read response: %v", err)
}
- started := make(map[string]bool)
- stopped := make(map[string]bool)
+ assert.Equal(t, "success", string(buf))
+}
- for id, maker := range services {
- id := id // Closure for the constructor
- constructor := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{
- startHook: func(*p2p.Server) { started[id] = true },
- stopHook: func() { stopped[id] = true },
- }, nil
- }
- if err := stack.Register(maker(constructor)); err != nil {
- t.Fatalf("service %s: registration failed: %v", id, err)
- }
+// Tests that the given handler will not be successfully mounted since no HTTP server
+// is enabled for RPC
+func TestRegisterHandler_Unsuccessful(t *testing.T) {
+ node, err := New(&DefaultConfig)
+ if err != nil {
+ t.Fatalf("could not create new node: %v", err)
}
- // Register a service that fails to start
- failure := errors.New("fail")
- failer := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{
- start: failure,
- }, nil
+
+ // create and mount handler
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("success"))
+ })
+ node.RegisterHandler("test", "/test", handler)
+}
+
+// Tests whether websocket requests can be handled on the same port as a regular http server.
+func TestWebsocketHTTPOnSamePort_WebsocketRequest(t *testing.T) {
+ node := startHTTP(t, 0, 0)
+ defer node.Close()
+
+ ws := strings.Replace(node.HTTPEndpoint(), "http://", "ws://", 1)
+
+ if node.WSEndpoint() != ws {
+ t.Fatalf("endpoints should be the same")
}
- if err := stack.Register(failer); err != nil {
- t.Fatalf("failer registration failed: %v", err)
+ if !checkRPC(ws) {
+ t.Fatalf("ws request failed")
}
- // Start the protocol stack and ensure all started services stop
- for i := 0; i < 100; i++ {
- if err := stack.Start(); err != failure {
- t.Fatalf("iter %d: stack startup failure mismatch: have %v, want %v", i, err, failure)
- }
- for id := range services {
- if started[id] && !stopped[id] {
- t.Fatalf("service %s: started but not stopped", id)
- }
- delete(started, id)
- delete(stopped, id)
- }
+ if !checkRPC(node.HTTPEndpoint()) {
+ t.Fatalf("http request failed")
}
}
-// Tests that even if a registered service fails to shut down cleanly, it does
-// not influece the rest of the shutdown invocations.
-func TestServiceTerminationGuarantee(t *testing.T) {
- stack, err := New(testNodeConfig())
+func TestWebsocketHTTPOnSeparatePort_WSRequest(t *testing.T) {
+ // try and get a free port
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
- t.Fatalf("failed to create protocol stack: %v", err)
+ t.Fatal("can't listen:", err)
}
- defer stack.Close()
+ port := listener.Addr().(*net.TCPAddr).Port
+ listener.Close()
- // Register a batch of good services
- services := map[string]InstrumentingWrapper{
- "A": InstrumentedServiceMakerA,
- "B": InstrumentedServiceMakerB,
- "C": InstrumentedServiceMakerC,
- }
- started := make(map[string]bool)
- stopped := make(map[string]bool)
+ node := startHTTP(t, 0, port)
+ defer node.Close()
- for id, maker := range services {
- id := id // Closure for the constructor
- constructor := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{
- startHook: func(*p2p.Server) { started[id] = true },
- stopHook: func() { stopped[id] = true },
- }, nil
- }
- if err := stack.Register(maker(constructor)); err != nil {
- t.Fatalf("service %s: registration failed: %v", id, err)
- }
+ wsOnHTTP := strings.Replace(node.HTTPEndpoint(), "http://", "ws://", 1)
+ ws := fmt.Sprintf("ws://127.0.0.1:%d", port)
+
+ if node.WSEndpoint() == wsOnHTTP {
+ t.Fatalf("endpoints should not be the same")
}
- // Register a service that fails to shot down cleanly
- failure := errors.New("fail")
- failer := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{
- stop: failure,
- }, nil
+ // ensure ws endpoint matches the expected endpoint
+ if node.WSEndpoint() != ws {
+ t.Fatalf("ws endpoint is incorrect: expected %s, got %s", ws, node.WSEndpoint())
}
- if err := stack.Register(failer); err != nil {
- t.Fatalf("failer registration failed: %v", err)
+
+ if !checkRPC(ws) {
+ t.Fatalf("ws request failed")
}
- // Start the protocol stack, and ensure that a failing shut down terminates all
- for i := 0; i < 100; i++ {
- // Start the stack and make sure all is online
- if err := stack.Start(); err != nil {
- t.Fatalf("iter %d: failed to start protocol stack: %v", i, err)
- }
- for id := range services {
- if !started[id] {
- t.Fatalf("iter %d, service %s: service not running", i, id)
- }
- if stopped[id] {
- t.Fatalf("iter %d, service %s: service already stopped", i, id)
- }
- }
- // Stop the stack, verify failure and check all terminations
- err := stack.Stop()
- if err, ok := err.(*StopError); !ok {
- t.Fatalf("iter %d: termination failure mismatch: have %v, want StopError", i, err)
- } else {
- failer := reflect.TypeOf(&InstrumentedService{})
- if err.Services[failer] != failure {
- t.Fatalf("iter %d: failer termination failure mismatch: have %v, want %v", i, err.Services[failer], failure)
+ if !checkRPC(node.HTTPEndpoint()) {
+ t.Fatalf("http request failed")
+ }
+}
+
+type rpcPrefixTest struct {
+ httpPrefix, wsPrefix string
+ // These lists paths on which JSON-RPC should be served / not served.
+ wantHTTP []string
+ wantNoHTTP []string
+ wantWS []string
+ wantNoWS []string
+}
+
+func TestNodeRPCPrefix(t *testing.T) {
+ t.Parallel()
+
+ tests := []rpcPrefixTest{
+ // both off
+ {
+ httpPrefix: "", wsPrefix: "",
+ wantHTTP: []string{"/", "/?p=1"},
+ wantNoHTTP: []string{"/test", "/test?p=1"},
+ wantWS: []string{"/", "/?p=1"},
+ wantNoWS: []string{"/test", "/test?p=1"},
+ },
+ // only http prefix
+ {
+ httpPrefix: "/testprefix", wsPrefix: "",
+ wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"},
+ wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"},
+ wantWS: []string{"/", "/?p=1"},
+ wantNoWS: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"},
+ },
+ // only ws prefix
+ {
+ httpPrefix: "", wsPrefix: "/testprefix",
+ wantHTTP: []string{"/", "/?p=1"},
+ wantNoHTTP: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"},
+ wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"},
+ wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"},
+ },
+ // both set
+ {
+ httpPrefix: "/testprefix", wsPrefix: "/testprefix",
+ wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"},
+ wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"},
+ wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"},
+ wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix)
+ t.Run(name, func(t *testing.T) {
+ cfg := &Config{
+ HTTPHost: "127.0.0.1",
+ HTTPPathPrefix: test.httpPrefix,
+ WSHost: "127.0.0.1",
+ WSPathPrefix: test.wsPrefix,
}
- if len(err.Services) != 1 {
- t.Fatalf("iter %d: failure count mismatch: have %d, want %d", i, len(err.Services), 1)
+ node, err := New(cfg)
+ if err != nil {
+ t.Fatal("can't create node:", err)
}
- }
- for id := range services {
- if !stopped[id] {
- t.Fatalf("iter %d, service %s: service not terminated", i, id)
+ defer node.Close()
+ if err := node.Start(); err != nil {
+ t.Fatal("can't start node:", err)
}
- delete(started, id)
- delete(stopped, id)
- }
+ test.check(t, node)
+ })
}
}
-// TestServiceRetrieval tests that individual services can be retrieved.
-func TestServiceRetrieval(t *testing.T) {
- // Create a simple stack and register two service types
- stack, err := New(testNodeConfig())
- if err != nil {
- t.Fatalf("failed to create protocol stack: %v", err)
- }
- defer stack.Close()
+func (test rpcPrefixTest) check(t *testing.T, node *Node) {
+ t.Helper()
+ httpBase := "http://" + node.http.listenAddr()
+ wsBase := "ws://" + node.http.listenAddr()
- if err := stack.Register(NewNoopService); err != nil {
- t.Fatalf("noop service registration failed: %v", err)
- }
- if err := stack.Register(NewInstrumentedService); err != nil {
- t.Fatalf("instrumented service registration failed: %v", err)
+ if node.WSEndpoint() != wsBase+test.wsPrefix {
+ t.Errorf("Error: node has wrong WSEndpoint %q", node.WSEndpoint())
}
- // Make sure none of the services can be retrieved until started
- var noopServ *NoopService
- if err := stack.Service(&noopServ); err != ErrNodeStopped {
- t.Fatalf("noop service retrieval mismatch: have %v, want %v", err, ErrNodeStopped)
+
+ for _, path := range test.wantHTTP {
+ resp := rpcRequest(t, httpBase+path)
+ if resp.StatusCode != 200 {
+ t.Errorf("Error: %s: bad status code %d, want 200", path, resp.StatusCode)
+ }
}
- var instServ *InstrumentedService
- if err := stack.Service(&instServ); err != ErrNodeStopped {
- t.Fatalf("instrumented service retrieval mismatch: have %v, want %v", err, ErrNodeStopped)
+ for _, path := range test.wantNoHTTP {
+ resp := rpcRequest(t, httpBase+path)
+ if resp.StatusCode != 404 {
+ t.Errorf("Error: %s: bad status code %d, want 404", path, resp.StatusCode)
+ }
}
- // Start the stack and ensure everything is retrievable now
- if err := stack.Start(); err != nil {
- t.Fatalf("failed to start stack: %v", err)
+ for _, path := range test.wantWS {
+ err := wsRequest(t, wsBase+path)
+ if err != nil {
+ t.Errorf("Error: %s: WebSocket connection failed: %v", path, err)
+ }
}
- defer stack.Stop()
+ for _, path := range test.wantNoWS {
+ err := wsRequest(t, wsBase+path)
+ if err == nil {
+ t.Errorf("Error: %s: WebSocket connection succeeded for path in wantNoWS", path)
+ }
- if err := stack.Service(&noopServ); err != nil {
- t.Fatalf("noop service retrieval mismatch: have %v, want %v", err, nil)
- }
- if err := stack.Service(&instServ); err != nil {
- t.Fatalf("instrumented service retrieval mismatch: have %v, want %v", err, nil)
}
}
-// Tests that all protocols defined by individual services get launched.
-func TestProtocolGather(t *testing.T) {
- stack, err := New(testNodeConfig())
+func createNode(t *testing.T, httpPort, wsPort int) *Node {
+ conf := &Config{
+ HTTPHost: "127.0.0.1",
+ HTTPPort: httpPort,
+ WSHost: "127.0.0.1",
+ WSPort: wsPort,
+ }
+ node, err := New(conf)
if err != nil {
- t.Fatalf("failed to create protocol stack: %v", err)
+ t.Fatalf("could not create a new node: %v", err)
}
- defer stack.Close()
+ return node
+}
- // Register a batch of services with some configured number of protocols
- services := map[string]struct {
- Count int
- Maker InstrumentingWrapper
- }{
- "zero": {0, InstrumentedServiceMakerA},
- "one": {1, InstrumentedServiceMakerB},
- "many": {10, InstrumentedServiceMakerC},
- }
- for id, config := range services {
- protocols := make([]p2p.Protocol, config.Count)
- for i := 0; i < len(protocols); i++ {
- protocols[i].Name = id
- protocols[i].Version = uint(i)
- }
- constructor := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{
- protocols: protocols,
- }, nil
- }
- if err := stack.Register(config.Maker(constructor)); err != nil {
- t.Fatalf("service %s: registration failed: %v", id, err)
- }
- }
- // Start the services and ensure all protocols start successfully
- if err := stack.Start(); err != nil {
- t.Fatalf("failed to start protocol stack: %v", err)
+func startHTTP(t *testing.T, httpPort, wsPort int) *Node {
+ node := createNode(t, httpPort, wsPort)
+ err := node.Start()
+ if err != nil {
+ t.Fatalf("could not start http service on node: %v", err)
}
- defer stack.Stop()
- protocols := stack.Server().Protocols
- if len(protocols) != 11 {
- t.Fatalf("mismatching number of protocols launched: have %d, want %d", len(protocols), 26)
- }
- for id, config := range services {
- for ver := 0; ver < config.Count; ver++ {
- launched := false
- for i := 0; i < len(protocols); i++ {
- if protocols[i].Name == id && protocols[i].Version == uint(ver) {
- launched = true
- break
- }
- }
- if !launched {
- t.Errorf("configured protocol not launched: %s v%d", id, ver)
- }
- }
- }
+ return node
}
-// Tests that all APIs defined by individual services get exposed.
-func TestAPIGather(t *testing.T) {
- stack, err := New(testNodeConfig())
+func doHTTPRequest(t *testing.T, req *http.Request) *http.Response {
+ client := http.DefaultClient
+ resp, err := client.Do(req)
if err != nil {
- t.Fatalf("failed to create protocol stack: %v", err)
+ t.Fatalf("could not issue a GET request to the given endpoint: %v", err)
+
}
- defer stack.Close()
+ return resp
+}
- // Register a batch of services with some configured APIs
- calls := make(chan string, 1)
- makeAPI := func(result string) *OneMethodAPI {
- return &OneMethodAPI{fun: func() { calls <- result }}
- }
- services := map[string]struct {
- APIs []rpc.API
- Maker InstrumentingWrapper
- }{
- "Zero APIs": {
- []rpc.API{}, InstrumentedServiceMakerA},
- "Single API": {
- []rpc.API{
- {Namespace: "single", Version: "1", Service: makeAPI("single.v1"), Public: true},
- }, InstrumentedServiceMakerB},
- "Many APIs": {
- []rpc.API{
- {Namespace: "multi", Version: "1", Service: makeAPI("multi.v1"), Public: true},
- {Namespace: "multi.v2", Version: "2", Service: makeAPI("multi.v2"), Public: true},
- {Namespace: "multi.v2.nested", Version: "2", Service: makeAPI("multi.v2.nested"), Public: true},
- }, InstrumentedServiceMakerC},
- }
-
- for id, config := range services {
- config := config
- constructor := func(*ServiceContext) (Service, error) {
- return &InstrumentedService{apis: config.APIs}, nil
- }
- if err := stack.Register(config.Maker(constructor)); err != nil {
- t.Fatalf("service %s: registration failed: %v", id, err)
+func containsProtocol(stackProtocols []p2p.Protocol, protocol p2p.Protocol) bool {
+ for _, a := range stackProtocols {
+ if reflect.DeepEqual(a, protocol) {
+ return true
}
}
- // Start the services and ensure all API start successfully
- if err := stack.Start(); err != nil {
- t.Fatalf("failed to start protocol stack: %v", err)
- }
- defer stack.Stop()
+ return false
+}
- // Connect to the RPC server and verify the various registered endpoints
- client, err := stack.Attach()
- if err != nil {
- t.Fatalf("failed to connect to the inproc API server: %v", err)
- }
- defer client.Close()
-
- tests := []struct {
- Method string
- Result string
- }{
- {"single_theOneMethod", "single.v1"},
- {"multi_theOneMethod", "multi.v1"},
- {"multi.v2_theOneMethod", "multi.v2"},
- {"multi.v2.nested_theOneMethod", "multi.v2.nested"},
- }
- for i, test := range tests {
- if err := client.Call(nil, test.Method); err != nil {
- t.Errorf("test %d: API request failed: %v", i, err)
- }
- select {
- case result := <-calls:
- if result != test.Result {
- t.Errorf("test %d: result mismatch: have %s, want %s", i, result, test.Result)
- }
- case <-time.After(time.Second):
- t.Fatalf("test %d: rpc execution timeout", i)
+func containsAPI(stackAPIs []rpc.API, api rpc.API) bool {
+ for _, a := range stackAPIs {
+ if reflect.DeepEqual(a, api) {
+ return true
}
}
+ return false
}
diff --git a/node/rpcstack.go b/node/rpcstack.go
new file mode 100644
index 000000000..5ea8e4648
--- /dev/null
+++ b/node/rpcstack.go
@@ -0,0 +1,545 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package node
+
+import (
+ "compress/gzip"
+ "context"
+ "fmt"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/rpc"
+ "github.com/rs/cors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// httpConfig is the JSON-RPC/HTTP configuration.
+type httpConfig struct {
+ Modules []string
+ CorsAllowedOrigins []string
+ Vhosts []string
+ prefix string // path prefix on which to mount http handler
+ jwtSecret []byte // optional JWT secret
+}
+
+// wsConfig is the JSON-RPC/Websocket configuration
+type wsConfig struct {
+ Origins []string
+ Modules []string
+ prefix string // path prefix on which to mount ws handler
+ jwtSecret []byte // optional JWT secret
+}
+
+type rpcHandler struct {
+ http.Handler
+ server *rpc.Server
+}
+
+type httpServer struct {
+ log log.Logger
+ timeouts rpc.HTTPTimeouts
+ mux http.ServeMux // registered handlers go here
+
+ mu sync.Mutex
+ server *http.Server
+ listener net.Listener // non-nil when server is running
+
+ // HTTP RPC handler things.
+ httpConfig httpConfig
+ httpHandler atomic.Value // *rpcHandler
+
+ // WebSocket handler things.
+ wsConfig wsConfig
+ wsHandler atomic.Value // *rpcHandler
+
+ // These are set by setListenAddr.
+ endpoint string
+ host string
+ port int
+
+ handlerNames map[string]string
+}
+
+func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts) *httpServer {
+ h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string)}
+ h.httpHandler.Store((*rpcHandler)(nil))
+ h.wsHandler.Store((*rpcHandler)(nil))
+ return h
+}
+
+// setListenAddr configures the listening address of the server.
+// The address can only be set while the server isn't running.
+func (h *httpServer) setListenAddr(host string, port int) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ if h.listener != nil && (host != h.host || port != h.port) {
+ return fmt.Errorf("HTTP server already running on %s", h.endpoint)
+ }
+
+ h.host, h.port = host, port
+ h.endpoint = fmt.Sprintf("%s:%d", host, port)
+ return nil
+}
+
+// listenAddr returns the listening address of the server.
+func (h *httpServer) listenAddr() string {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ if h.listener != nil {
+ return h.listener.Addr().String()
+ }
+ return h.endpoint
+}
+
+// start starts the HTTP server if it is enabled and not already running.
+func (h *httpServer) start() error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ if h.endpoint == "" || h.listener != nil {
+ return nil // already running or not configured
+ }
+
+ // Initialize the server.
+ h.server = &http.Server{Handler: h}
+ if h.timeouts != (rpc.HTTPTimeouts{}) {
+ CheckTimeouts(&h.timeouts)
+ h.server.ReadTimeout = h.timeouts.ReadTimeout
+ h.server.WriteTimeout = h.timeouts.WriteTimeout
+ h.server.IdleTimeout = h.timeouts.IdleTimeout
+ }
+
+ // Start the server.
+ listener, err := net.Listen("tcp", h.endpoint)
+ if err != nil {
+ // If the server fails to start, we need to clear out the RPC and WS
+ // configuration so they can be configured another time.
+ h.disableRPC()
+ h.disableWS()
+ return err
+ }
+ h.listener = listener
+ go h.server.Serve(listener)
+
+ // if server is websocket only, return after logging
+ if h.wsAllowed() && !h.rpcAllowed() {
+ url := fmt.Sprintf("ws://%v", listener.Addr())
+ if h.wsConfig.prefix != "" {
+ url += h.wsConfig.prefix
+ }
+ h.log.Info("WebSocket enabled", "url", url)
+ return nil
+ }
+ // Log http endpoint.
+ h.log.Info("HTTP server started",
+ "endpoint", listener.Addr(), "auth", (h.httpConfig.jwtSecret != nil),
+ "prefix", h.httpConfig.prefix,
+ "cors", strings.Join(h.httpConfig.CorsAllowedOrigins, ","),
+ "vhosts", strings.Join(h.httpConfig.Vhosts, ","),
+ )
+
+ // Log all handlers mounted on server.
+ var paths []string
+ for path := range h.handlerNames {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths)
+ logged := make(map[string]bool, len(paths))
+ for _, path := range paths {
+ name := h.handlerNames[path]
+ if !logged[name] {
+ log.Info(name+" enabled", "url", "http://"+listener.Addr().String()+path)
+ logged[name] = true
+ }
+ }
+ return nil
+}
+
+func (h *httpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // check if ws request and serve if ws enabled
+ ws := h.wsHandler.Load().(*rpcHandler)
+ if ws != nil && isWebsocket(r) {
+ if checkPath(r, h.wsConfig.prefix) {
+ ws.ServeHTTP(w, r)
+ }
+ return
+ }
+ // if http-rpc is enabled, try to serve request
+ rpc := h.httpHandler.Load().(*rpcHandler)
+ if rpc != nil {
+ // First try to route in the mux.
+ // Requests to a path below root are handled by the mux,
+ // which has all the handlers registered via Node.RegisterHandler.
+ // These are made available when RPC is enabled.
+ muxHandler, pattern := h.mux.Handler(r)
+ if pattern != "" {
+ muxHandler.ServeHTTP(w, r)
+ return
+ }
+
+ if checkPath(r, h.httpConfig.prefix) {
+ rpc.ServeHTTP(w, r)
+ return
+ }
+ }
+ w.WriteHeader(http.StatusNotFound)
+}
+
+// checkPath checks whether a given request URL matches a given path prefix.
+func checkPath(r *http.Request, path string) bool {
+ // if no prefix has been specified, request URL must be on root
+ if path == "" {
+ return r.URL.Path == "/"
+ }
+ // otherwise, check to make sure prefix matches
+ return len(r.URL.Path) >= len(path) && r.URL.Path[:len(path)] == path
+}
+
+// validatePrefix checks if 'path' is a valid configuration value for the RPC prefix option.
+func validatePrefix(what, path string) error {
+ if path == "" {
+ return nil
+ }
+ if path[0] != '/' {
+ return fmt.Errorf(`%s RPC path prefix %q does not contain leading "/"`, what, path)
+ }
+ if strings.ContainsAny(path, "?#") {
+ // This is just to avoid confusion. While these would match correctly (i.e. they'd
+ // match if URL-escaped into path), it's not easy to understand for users when
+ // setting that on the command line.
+ return fmt.Errorf("%s RPC path prefix %q contains URL meta-characters", what, path)
+ }
+ return nil
+}
+
+// stop shuts down the HTTP server.
+func (h *httpServer) stop() {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.doStop()
+}
+
+func (h *httpServer) doStop() {
+ if h.listener == nil {
+ return // not running
+ }
+
+ // Shut down the server.
+ httpHandler := h.httpHandler.Load().(*rpcHandler)
+ wsHandler := h.httpHandler.Load().(*rpcHandler)
+ if httpHandler != nil {
+ h.httpHandler.Store((*rpcHandler)(nil))
+ httpHandler.server.Stop()
+ }
+ if wsHandler != nil {
+ h.wsHandler.Store((*rpcHandler)(nil))
+ wsHandler.server.Stop()
+ }
+ h.server.Shutdown(context.Background())
+ h.listener.Close()
+ h.log.Info("HTTP server stopped", "endpoint", h.listener.Addr())
+
+ // Clear out everything to allow re-configuring it later.
+ h.host, h.port, h.endpoint = "", 0, ""
+ h.server, h.listener = nil, nil
+}
+
+// enableRPC turns on JSON-RPC over HTTP on the server.
+func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ if h.rpcAllowed() {
+ return fmt.Errorf("JSON-RPC over HTTP is already enabled")
+ }
+
+ // Create RPC server and handler.
+ srv := rpc.NewServer()
+ if err := RegisterApisFromWhitelist(apis, config.Modules, srv, false); err != nil {
+ return err
+ }
+ h.httpConfig = config
+ h.httpHandler.Store(&rpcHandler{
+ Handler: NewHTTPHandlerStack(srv, config.CorsAllowedOrigins, config.Vhosts, config.jwtSecret),
+ server: srv,
+ })
+ return nil
+}
+
+// disableRPC stops the HTTP RPC handler. This is internal, the caller must hold h.mu.
+func (h *httpServer) disableRPC() bool {
+ handler := h.httpHandler.Load().(*rpcHandler)
+ if handler != nil {
+ h.httpHandler.Store((*rpcHandler)(nil))
+ handler.server.Stop()
+ }
+ return handler != nil
+}
+
+// enableWS turns on JSON-RPC over WebSocket on the server.
+func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ if h.wsAllowed() {
+ return fmt.Errorf("JSON-RPC over WebSocket is already enabled")
+ }
+
+ // Create RPC server and handler.
+ srv := rpc.NewServer()
+ if err := RegisterApisFromWhitelist(apis, config.Modules, srv, false); err != nil {
+ return err
+ }
+ h.wsConfig = config
+ h.wsHandler.Store(&rpcHandler{
+ Handler: NewWSHandlerStack(srv.WebsocketHandler(config.Origins), config.jwtSecret),
+ server: srv,
+ })
+ return nil
+}
+
+// stopWS disables JSON-RPC over WebSocket and also stops the server if it only serves WebSocket.
+func (h *httpServer) stopWS() {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ if h.disableWS() {
+ if !h.rpcAllowed() {
+ h.doStop()
+ }
+ }
+}
+
+// disableWS disables the WebSocket handler. This is internal, the caller must hold h.mu.
+func (h *httpServer) disableWS() bool {
+ ws := h.wsHandler.Load().(*rpcHandler)
+ if ws != nil {
+ h.wsHandler.Store((*rpcHandler)(nil))
+ ws.server.Stop()
+ }
+ return ws != nil
+}
+
+// rpcAllowed returns true when JSON-RPC over HTTP is enabled.
+func (h *httpServer) rpcAllowed() bool {
+ return h.httpHandler.Load().(*rpcHandler) != nil
+}
+
+// wsAllowed returns true when JSON-RPC over WebSocket is enabled.
+func (h *httpServer) wsAllowed() bool {
+ return h.wsHandler.Load().(*rpcHandler) != nil
+}
+
+// isWebsocket checks the header of an http request for a websocket upgrade request.
+func isWebsocket(r *http.Request) bool {
+ return strings.ToLower(r.Header.Get("Upgrade")) == "websocket" &&
+ strings.Contains(strings.ToLower(r.Header.Get("Connection")), "upgrade")
+}
+
+// NewHTTPHandlerStack returns wrapped http-related handlers
+func NewHTTPHandlerStack(srv http.Handler, cors []string, vhosts []string, jwtSecret []byte) http.Handler {
+ // Wrap the CORS-handler within a host-handler
+ handler := newCorsHandler(srv, cors)
+ handler = newVHostHandler(vhosts, handler)
+ if len(jwtSecret) != 0 {
+ handler = newJWTHandler(jwtSecret, handler)
+ }
+ return newGzipHandler(handler)
+}
+
+// NewWSHandlerStack returns a wrapped ws-related handler.
+func NewWSHandlerStack(srv http.Handler, jwtSecret []byte) http.Handler {
+ if len(jwtSecret) != 0 {
+ return newJWTHandler(jwtSecret, srv)
+ }
+ return srv
+}
+
+func newCorsHandler(srv http.Handler, allowedOrigins []string) http.Handler {
+ // disable CORS support if user has not specified a custom CORS configuration
+ if len(allowedOrigins) == 0 {
+ return srv
+ }
+ c := cors.New(cors.Options{
+ AllowedOrigins: allowedOrigins,
+ AllowedMethods: []string{http.MethodPost, http.MethodGet},
+ AllowedHeaders: []string{"*"},
+ MaxAge: 600,
+ })
+ return c.Handler(srv)
+}
+
+// virtualHostHandler is a handler which validates the Host-header of incoming requests.
+// Using virtual hosts can help prevent DNS rebinding attacks, where a 'random' domain name points to
+// the service ip address (but without CORS headers). By verifying the targeted virtual host, we can
+// ensure that it's a destination that the node operator has defined.
+type virtualHostHandler struct {
+ vhosts map[string]struct{}
+ next http.Handler
+}
+
+func newVHostHandler(vhosts []string, next http.Handler) http.Handler {
+ vhostMap := make(map[string]struct{})
+ for _, allowedHost := range vhosts {
+ vhostMap[strings.ToLower(allowedHost)] = struct{}{}
+ }
+ return &virtualHostHandler{vhostMap, next}
+}
+
+// ServeHTTP serves JSON-RPC requests over HTTP, implements http.Handler
+func (h *virtualHostHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // if r.Host is not set, we can continue serving since a browser would set the Host header
+ if r.Host == "" {
+ h.next.ServeHTTP(w, r)
+ return
+ }
+ host, _, err := net.SplitHostPort(r.Host)
+ if err != nil {
+ // Either invalid (too many colons) or no port specified
+ host = r.Host
+ }
+ if ipAddr := net.ParseIP(host); ipAddr != nil {
+ // It's an IP address, we can serve that
+ h.next.ServeHTTP(w, r)
+ return
+
+ }
+ // Not an IP address, but a hostname. Need to validate
+ if _, exist := h.vhosts["*"]; exist {
+ h.next.ServeHTTP(w, r)
+ return
+ }
+ if _, exist := h.vhosts[host]; exist {
+ h.next.ServeHTTP(w, r)
+ return
+ }
+ http.Error(w, "invalid host specified", http.StatusForbidden)
+}
+
+var gzPool = sync.Pool{
+ New: func() interface{} {
+ w := gzip.NewWriter(ioutil.Discard)
+ return w
+ },
+}
+
+type gzipResponseWriter struct {
+ io.Writer
+ http.ResponseWriter
+}
+
+func (w *gzipResponseWriter) WriteHeader(status int) {
+ w.Header().Del("Content-Length")
+ w.ResponseWriter.WriteHeader(status)
+}
+
+func (w *gzipResponseWriter) Write(b []byte) (int, error) {
+ return w.Writer.Write(b)
+}
+
+func newGzipHandler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ w.Header().Set("Content-Encoding", "gzip")
+
+ gz := gzPool.Get().(*gzip.Writer)
+ defer gzPool.Put(gz)
+
+ gz.Reset(w)
+ defer gz.Close()
+
+ next.ServeHTTP(&gzipResponseWriter{ResponseWriter: w, Writer: gz}, r)
+ })
+}
+
+type ipcServer struct {
+ log log.Logger
+ endpoint string
+
+ mu sync.Mutex
+ listener net.Listener
+ srv *rpc.Server
+}
+
+func newIPCServer(log log.Logger, endpoint string) *ipcServer {
+ return &ipcServer{log: log, endpoint: endpoint}
+}
+
+// Start starts the httpServer's http.Server
+func (is *ipcServer) start(apis []rpc.API) error {
+ is.mu.Lock()
+ defer is.mu.Unlock()
+
+ if is.listener != nil {
+ return nil // already running
+ }
+ listener, srv, err := rpc.StartIPCEndpoint(is.endpoint, apis)
+ if err != nil {
+ return err
+ }
+ is.log.Info("IPC endpoint opened", "url", is.endpoint)
+ is.listener, is.srv = listener, srv
+ return nil
+}
+
+func (is *ipcServer) stop() error {
+ is.mu.Lock()
+ defer is.mu.Unlock()
+
+ if is.listener == nil {
+ return nil // not running
+ }
+ err := is.listener.Close()
+ is.srv.Stop()
+ is.listener, is.srv = nil, nil
+ is.log.Info("IPC endpoint closed", "url", is.endpoint)
+ return err
+}
+
+// RegisterApisFromWhitelist checks the given modules' availability, generates a whitelist based on the allowed modules,
+// and then registers all of the APIs exposed by the services.
+func RegisterApisFromWhitelist(apis []rpc.API, modules []string, srv *rpc.Server, exposeAll bool) error {
+ if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 {
+ log.Error("Unavailable modules in HTTP API list", "unavailable", bad, "available", available)
+ }
+ // Generate the whitelist based on the allowed modules
+ whitelist := make(map[string]bool)
+ for _, module := range modules {
+ whitelist[module] = true
+ }
+ // Register all the APIs exposed by the services
+ for _, api := range apis {
+ if exposeAll || whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) {
+ if err := srv.RegisterName(api.Namespace, api.Service); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
new file mode 100644
index 000000000..a487713f8
--- /dev/null
+++ b/node/rpcstack_test.go
@@ -0,0 +1,375 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package node
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/core-coin/go-core/internal/testlog"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/rpc"
+ "github.com/golang-jwt/jwt/v4"
+ "github.com/gorilla/websocket"
+ "github.com/stretchr/testify/assert"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+// TestCorsHandler makes sure CORS are properly handled on the http server.
+func TestCorsHandler(t *testing.T) {
+ srv := createAndStartServer(t, &httpConfig{CorsAllowedOrigins: []string{"test", "test.com"}}, false, &wsConfig{})
+ defer srv.stop()
+ url := "http://" + srv.listenAddr()
+
+ resp := rpcRequest(t, url, "origin", "test.com")
+ assert.Equal(t, "test.com", resp.Header.Get("Access-Control-Allow-Origin"))
+
+ resp2 := rpcRequest(t, url, "origin", "bad")
+ assert.Equal(t, "", resp2.Header.Get("Access-Control-Allow-Origin"))
+}
+
+// TestVhosts makes sure vhosts are properly handled on the http server.
+func TestVhosts(t *testing.T) {
+ srv := createAndStartServer(t, &httpConfig{Vhosts: []string{"test"}}, false, &wsConfig{})
+ defer srv.stop()
+ url := "http://" + srv.listenAddr()
+
+ resp := rpcRequest(t, url, "host", "test")
+ assert.Equal(t, resp.StatusCode, http.StatusOK)
+
+ resp2 := rpcRequest(t, url, "host", "bad")
+ assert.Equal(t, resp2.StatusCode, http.StatusForbidden)
+}
+
+type originTest struct {
+ spec string
+ expOk []string
+ expFail []string
+}
+
+// splitAndTrim splits input separated by a comma
+// and trims excessive white space from the substrings.
+// Copied over from flags.go
+func splitAndTrim(input string) (ret []string) {
+ l := strings.Split(input, ",")
+ for _, r := range l {
+ r = strings.TrimSpace(r)
+ if len(r) > 0 {
+ ret = append(ret, r)
+ }
+ }
+ return ret
+}
+
+// TestWebsocketOrigins makes sure the websocket origins are properly handled on the websocket server.
+func TestWebsocketOrigins(t *testing.T) {
+ tests := []originTest{
+ {
+ spec: "*", // allow all
+ expOk: []string{"", "http://test", "https://test", "http://test:8540", "https://test:8540",
+ "http://test.com", "https://foo.test", "http://testa", "http://atestb:8540", "https://atestb:8540"},
+ },
+ {
+ spec: "test",
+ expOk: []string{"http://test", "https://test", "http://test:8540", "https://test:8540"},
+ expFail: []string{"http://test.com", "https://foo.test", "http://testa", "http://atestb:8540", "https://atestb:8540"},
+ },
+ // scheme tests
+ {
+ spec: "https://test",
+ expOk: []string{"https://test", "https://test:9999"},
+ expFail: []string{
+ "test", // no scheme, required by spec
+ "http://test", // wrong scheme
+ "http://test.foo", "https://a.test.x", // subdomain variatoins
+ "http://testx:8540", "https://xtest:8540"},
+ },
+ // ip tests
+ {
+ spec: "https://12.34.56.78",
+ expOk: []string{"https://12.34.56.78", "https://12.34.56.78:8540"},
+ expFail: []string{
+ "http://12.34.56.78", // wrong scheme
+ "http://12.34.56.78:443", // wrong scheme
+ "http://1.12.34.56.78", // wrong 'domain name'
+ "http://12.34.56.78.a", // wrong 'domain name'
+ "https://87.65.43.21", "http://87.65.43.21:8540", "https://87.65.43.21:8540"},
+ },
+ // port tests
+ {
+ spec: "test:8540",
+ expOk: []string{"http://test:8540", "https://test:8540"},
+ expFail: []string{
+ "http://test", "https://test", // spec says port required
+ "http://test:8541", "https://test:8541", // wrong port
+ "http://bad", "https://bad", "http://bad:8540", "https://bad:8540"},
+ },
+ // scheme and port
+ {
+ spec: "https://test:8540",
+ expOk: []string{"https://test:8540"},
+ expFail: []string{
+ "https://test", // missing port
+ "http://test", // missing port, + wrong scheme
+ "http://test:8540", // wrong scheme
+ "http://test:8541", "https://test:8541", // wrong port
+ "http://bad", "https://bad", "http://bad:8540", "https://bad:8540"},
+ },
+ // several allowed origins
+ {
+ spec: "localhost,http://127.0.0.1",
+ expOk: []string{"localhost", "http://localhost", "https://localhost:8443",
+ "http://127.0.0.1", "http://127.0.0.1:8080"},
+ expFail: []string{
+ "https://127.0.0.1", // wrong scheme
+ "http://bad", "https://bad", "http://bad:8540", "https://bad:8540"},
+ },
+ }
+ for _, tc := range tests {
+ srv := createAndStartServer(t, &httpConfig{}, true, &wsConfig{Origins: splitAndTrim(tc.spec)})
+ url := fmt.Sprintf("ws://%v", srv.listenAddr())
+ for _, origin := range tc.expOk {
+ if err := wsRequest(t, url, "Origin", origin); err != nil {
+ t.Errorf("spec '%v', origin '%v': expected ok, got %v", tc.spec, origin, err)
+ }
+ }
+ for _, origin := range tc.expFail {
+ if err := wsRequest(t, url, "Origin", origin); err == nil {
+ t.Errorf("spec '%v', origin '%v': expected not to allow, got ok", tc.spec, origin)
+ }
+ }
+ srv.stop()
+ }
+}
+
+// TestIsWebsocket tests if an incoming websocket upgrade request is handled properly.
+func TestIsWebsocket(t *testing.T) {
+ r, _ := http.NewRequest("GET", "/", nil)
+
+ assert.False(t, isWebsocket(r))
+ r.Header.Set("upgrade", "websocket")
+ assert.False(t, isWebsocket(r))
+ r.Header.Set("connection", "upgrade")
+ assert.True(t, isWebsocket(r))
+ r.Header.Set("connection", "upgrade,keep-alive")
+ assert.True(t, isWebsocket(r))
+ r.Header.Set("connection", " UPGRADE,keep-alive")
+ assert.True(t, isWebsocket(r))
+}
+
+func Test_checkPath(t *testing.T) {
+ tests := []struct {
+ req *http.Request
+ prefix string
+ expected bool
+ }{
+ {
+ req: &http.Request{URL: &url.URL{Path: "/test"}},
+ prefix: "/test",
+ expected: true,
+ },
+ {
+ req: &http.Request{URL: &url.URL{Path: "/testing"}},
+ prefix: "/test",
+ expected: true,
+ },
+ {
+ req: &http.Request{URL: &url.URL{Path: "/"}},
+ prefix: "/test",
+ expected: false,
+ },
+ {
+ req: &http.Request{URL: &url.URL{Path: "/fail"}},
+ prefix: "/test",
+ expected: false,
+ },
+ {
+ req: &http.Request{URL: &url.URL{Path: "/"}},
+ prefix: "",
+ expected: true,
+ },
+ {
+ req: &http.Request{URL: &url.URL{Path: "/fail"}},
+ prefix: "",
+ expected: false,
+ },
+ {
+ req: &http.Request{URL: &url.URL{Path: "/"}},
+ prefix: "/",
+ expected: true,
+ },
+ {
+ req: &http.Request{URL: &url.URL{Path: "/testing"}},
+ prefix: "/",
+ expected: true,
+ },
+ }
+
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ assert.Equal(t, tt.expected, checkPath(tt.req, tt.prefix))
+ })
+ }
+}
+
+func createAndStartServer(t *testing.T, conf *httpConfig, ws bool, wsConf *wsConfig) *httpServer {
+ t.Helper()
+
+ srv := newHTTPServer(testlog.Logger(t, log.LvlDebug), rpc.DefaultHTTPTimeouts)
+ assert.NoError(t, srv.enableRPC(nil, *conf))
+ if ws {
+ assert.NoError(t, srv.enableWS(nil, *wsConf))
+ }
+ assert.NoError(t, srv.setListenAddr("localhost", 0))
+ assert.NoError(t, srv.start())
+ return srv
+}
+
+// wsRequest attempts to open a WebSocket connection to the given URL.
+func wsRequest(t *testing.T, url string, extraHeaders ...string) error {
+ t.Helper()
+ //t.Logf("checking WebSocket on %s (origin %q)", url, browserOrigin)
+
+ headers := make(http.Header)
+ // Apply extra headers.
+ if len(extraHeaders)%2 != 0 {
+ panic("odd extraHeaders length")
+ }
+ for i := 0; i < len(extraHeaders); i += 2 {
+ key, value := extraHeaders[i], extraHeaders[i+1]
+ headers.Set(key, value)
+ }
+ conn, _, err := websocket.DefaultDialer.Dial(url, headers)
+ if conn != nil {
+ conn.Close()
+ }
+ return err
+}
+
+// rpcRequest performs a JSON-RPC request to the given URL.
+func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response {
+ t.Helper()
+
+ // Create the request.
+ body := bytes.NewReader([]byte(`{"jsonrpc":"2.0","id":1,"method":"rpc_modules","params":[]}`))
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ t.Fatal("could not create http request:", err)
+ }
+ req.Header.Set("content-type", "application/json")
+
+ // Apply extra headers.
+ if len(extraHeaders)%2 != 0 {
+ panic("odd extraHeaders length")
+ }
+ for i := 0; i < len(extraHeaders); i += 2 {
+ key, value := extraHeaders[i], extraHeaders[i+1]
+ if strings.ToLower(key) == "host" {
+ req.Host = value
+ } else {
+ req.Header.Set(key, value)
+ }
+ }
+
+ // Perform the request.
+ t.Logf("checking RPC/HTTP on %s %v", url, extraHeaders)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return resp
+}
+
+type testClaim map[string]interface{}
+
+func (testClaim) Valid() error {
+ return nil
+}
+
+func TestJWT(t *testing.T) {
+ var secret = []byte("secret")
+ issueToken := func(secret []byte, method jwt.SigningMethod, input map[string]interface{}) string {
+ if method == nil {
+ method = jwt.SigningMethodHS256
+ }
+ ss, _ := jwt.NewWithClaims(method, testClaim(input)).SignedString(secret)
+ return ss
+ }
+ expOk := []string{
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + 4})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() - 4})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{
+ "iat": time.Now().Unix(),
+ "exp": time.Now().Unix() + 2,
+ })),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{
+ "iat": time.Now().Unix(),
+ "bar": "baz",
+ })),
+ }
+ expFail := []string{
+ // future
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + 6})),
+ // stale
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() - 6})),
+ // wrong algo
+ fmt.Sprintf("Bearer %v", issueToken(secret, jwt.SigningMethodHS512, testClaim{"iat": time.Now().Unix() + 4})),
+ // expired
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix(), "exp": time.Now().Unix()})),
+ // missing mandatory iat
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{})),
+ // wrong secret
+ fmt.Sprintf("Bearer %v", issueToken([]byte("wrong"), nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken([]byte{}, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(nil, nil, testClaim{"iat": time.Now().Unix()})),
+ // Various malformed syntax
+ fmt.Sprintf("%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer: %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer:%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer\t%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ fmt.Sprintf("Bearer \t%v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix()})),
+ }
+ srv := createAndStartServer(t, &httpConfig{jwtSecret: []byte("secret")},
+ true, &wsConfig{Origins: []string{"*"}, jwtSecret: []byte("secret")})
+ wsUrl := fmt.Sprintf("ws://%v", srv.listenAddr())
+ htUrl := fmt.Sprintf("http://%v", srv.listenAddr())
+
+ for i, token := range expOk {
+ if err := wsRequest(t, wsUrl, "Authorization", token); err != nil {
+ t.Errorf("test %d-ws, token '%v': expected ok, got %v", i, token, err)
+ }
+ if resp := rpcRequest(t, htUrl, "Authorization", token); resp.StatusCode != 200 {
+ t.Errorf("test %d-http, token '%v': expected ok, got %v", i, token, resp.StatusCode)
+ }
+ }
+ for i, token := range expFail {
+ if err := wsRequest(t, wsUrl, "Authorization", token); err == nil {
+ t.Errorf("tc %d-ws, token '%v': expected not to allow, got ok", i, token)
+ }
+ if resp := rpcRequest(t, htUrl, "Authorization", token); resp.StatusCode != 403 {
+ t.Errorf("tc %d-http, token '%v': expected not to allow, got %v", i, token, resp.StatusCode)
+ }
+ }
+ srv.stop()
+}
diff --git a/node/service.go b/node/service.go
deleted file mode 100644
index 0bc0d756c..000000000
--- a/node/service.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2015 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package node
-
-import (
- "path/filepath"
- "reflect"
-
- "github.com/core-coin/go-core/accounts"
- "github.com/core-coin/go-core/core/rawdb"
- "github.com/core-coin/go-core/event"
- "github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/rpc"
- "github.com/core-coin/go-core/xcbdb"
-)
-
-// ServiceContext is a collection of service independent options inherited from
-// the protocol stack, that is passed to all constructors to be optionally used;
-// as well as utility methods to operate on the service environment.
-type ServiceContext struct {
- services map[reflect.Type]Service // Index of the already constructed services
- Config Config
- EventMux *event.TypeMux // Event multiplexer used for decoupled notifications
- AccountManager *accounts.Manager // Account manager created by the node.
-}
-
-// OpenDatabase opens an existing database with the given name (or creates one
-// if no previous can be found) from within the node's data directory. If the
-// node is an ephemeral one, a memory database is returned.
-func (ctx *ServiceContext) OpenDatabase(name string, cache int, handles int, namespace string) (xcbdb.Database, error) {
- if ctx.Config.DataDir == "" {
- return rawdb.NewMemoryDatabase(), nil
- }
- return rawdb.NewLevelDBDatabase(ctx.Config.ResolvePath(name), cache, handles, namespace)
-}
-
-// OpenDatabaseWithFreezer opens an existing database with the given name (or
-// creates one if no previous can be found) from within the node's data directory,
-// also attaching a chain freezer to it that moves ancient chain data from the
-// database to immutable append-only files. If the node is an ephemeral one, a
-// memory database is returned.
-func (ctx *ServiceContext) OpenDatabaseWithFreezer(name string, cache int, handles int, freezer string, namespace string) (xcbdb.Database, error) {
- if ctx.Config.DataDir == "" {
- return rawdb.NewMemoryDatabase(), nil
- }
- root := ctx.Config.ResolvePath(name)
-
- switch {
- case freezer == "":
- freezer = filepath.Join(root, "ancient")
- case !filepath.IsAbs(freezer):
- freezer = ctx.Config.ResolvePath(freezer)
- }
- return rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace)
-}
-
-// ResolvePath resolves a user path into the data directory if that was relative
-// and if the user actually uses persistent storage. It will return an empty string
-// for emphemeral storage and the user's own input for absolute paths.
-func (ctx *ServiceContext) ResolvePath(path string) string {
- return ctx.Config.ResolvePath(path)
-}
-
-// Service retrieves a currently running service registered of a specific type.
-func (ctx *ServiceContext) Service(service interface{}) error {
- element := reflect.ValueOf(service).Elem()
- if running, ok := ctx.services[element.Type()]; ok {
- element.Set(reflect.ValueOf(running))
- return nil
- }
- return ErrServiceUnknown
-}
-
-// ExtRPCEnabled returns the indicator whether node enables the external
-// RPC(http, ws or graphql).
-func (ctx *ServiceContext) ExtRPCEnabled() bool {
- return ctx.Config.ExtRPCEnabled()
-}
-
-// ServiceConstructor is the function signature of the constructors needed to be
-// registered for service instantiation.
-type ServiceConstructor func(ctx *ServiceContext) (Service, error)
-
-// Service is an individual protocol that can be registered into a node.
-//
-// Notes:
-//
-// • Service life-cycle management is delegated to the node. The service is allowed to
-// initialize itself upon creation, but no goroutines should be spun up outside of the
-// Start method.
-//
-// • Restart logic is not required as the node will create a fresh instance
-// every time a service is started.
-type Service interface {
- // Protocols retrieves the P2P protocols the service wishes to start.
- Protocols() []p2p.Protocol
-
- // APIs retrieves the list of RPC descriptors the service provides
- APIs() []rpc.API
-
- // Start is called after all services have been constructed and the networking
- // layer was also initialized to spawn any goroutines required by the service.
- Start(server *p2p.Server) error
-
- // Stop terminates all goroutines belonging to the service, blocking until they
- // are all terminated.
- Stop() error
-}
diff --git a/node/service_test.go b/node/service_test.go
deleted file mode 100644
index a929cb72c..000000000
--- a/node/service_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2015 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package node
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-)
-
-// Tests that databases are correctly created persistent or ephemeral based on
-// the configured service context.
-func TestContextDatabases(t *testing.T) {
- // Create a temporary folder and ensure no database is contained within
- dir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("failed to create temporary data directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- if _, err := os.Stat(filepath.Join(dir, "database")); err == nil {
- t.Fatalf("non-created database already exists")
- }
- // Request the opening/creation of a database and ensure it persists to disk
- ctx := &ServiceContext{Config: Config{Name: "unit-test", DataDir: dir}}
- db, err := ctx.OpenDatabase("persistent", 0, 0, "")
- if err != nil {
- t.Fatalf("failed to open persistent database: %v", err)
- }
- db.Close()
-
- if _, err := os.Stat(filepath.Join(dir, "unit-test", "persistent")); err != nil {
- t.Fatalf("persistent database doesn't exists: %v", err)
- }
- // Request th opening/creation of an ephemeral database and ensure it's not persisted
- ctx = &ServiceContext{Config: Config{DataDir: ""}}
- db, err = ctx.OpenDatabase("ephemeral", 0, 0, "")
- if err != nil {
- t.Fatalf("failed to open ephemeral database: %v", err)
- }
- db.Close()
-
- if _, err := os.Stat(filepath.Join(dir, "ephemeral")); err == nil {
- t.Fatalf("ephemeral database exists")
- }
-}
-
-// Tests that already constructed services can be retrieves by later ones.
-func TestContextServices(t *testing.T) {
- stack, err := New(testNodeConfig())
- if err != nil {
- t.Fatalf("failed to create protocol stack: %v", err)
- }
- defer stack.Close()
- // Define a verifier that ensures a NoopA is before it and NoopB after
- verifier := func(ctx *ServiceContext) (Service, error) {
- var objA *NoopServiceA
- if ctx.Service(&objA) != nil {
- return nil, fmt.Errorf("former service not found")
- }
- var objB *NoopServiceB
- if err := ctx.Service(&objB); err != ErrServiceUnknown {
- return nil, fmt.Errorf("latters lookup error mismatch: have %v, want %v", err, ErrServiceUnknown)
- }
- return new(NoopService), nil
- }
- // Register the collection of services
- if err := stack.Register(NewNoopServiceA); err != nil {
- t.Fatalf("former failed to register service: %v", err)
- }
- if err := stack.Register(verifier); err != nil {
- t.Fatalf("failed to register service verifier: %v", err)
- }
- if err := stack.Register(NewNoopServiceB); err != nil {
- t.Fatalf("latter failed to register service: %v", err)
- }
- // Start the protocol stack and ensure services are constructed in order
- if err := stack.Start(); err != nil {
- t.Fatalf("failed to start stack: %v", err)
- }
- defer stack.Stop()
-}
diff --git a/node/utils_test.go b/node/utils_test.go
index 3922a34d1..fe5827366 100644
--- a/node/utils_test.go
+++ b/node/utils_test.go
@@ -20,65 +20,43 @@
package node
import (
- "reflect"
-
"github.com/core-coin/go-core/p2p"
"github.com/core-coin/go-core/rpc"
)
-// NoopService is a trivial implementation of the Service interface.
-type NoopService struct{}
+// NoopLifecycle is a trivial implementation of the Service interface.
+type NoopLifecycle struct{}
-func (s *NoopService) Protocols() []p2p.Protocol { return nil }
-func (s *NoopService) APIs() []rpc.API { return nil }
-func (s *NoopService) Start(*p2p.Server) error { return nil }
-func (s *NoopService) Stop() error { return nil }
+func (s *NoopLifecycle) Start() error { return nil }
+func (s *NoopLifecycle) Stop() error { return nil }
-func NewNoopService(*ServiceContext) (Service, error) { return new(NoopService), nil }
+func NewNoop() *Noop {
+ noop := new(Noop)
+ return noop
+}
-// Set of services all wrapping the base NoopService resulting in the same method
+// Set of services all wrapping the base NoopLifecycle resulting in the same method
// signatures but different outer types.
-type NoopServiceA struct{ NoopService }
-type NoopServiceB struct{ NoopService }
-type NoopServiceC struct{ NoopService }
-
-func NewNoopServiceA(*ServiceContext) (Service, error) { return new(NoopServiceA), nil }
-func NewNoopServiceB(*ServiceContext) (Service, error) { return new(NoopServiceB), nil }
-func NewNoopServiceC(*ServiceContext) (Service, error) { return new(NoopServiceC), nil }
+type Noop struct{ NoopLifecycle }
-// InstrumentedService is an implementation of Service for which all interface
+// InstrumentedService is an implementation of Lifecycle for which all interface
// methods can be instrumented both return value as well as event hook wise.
type InstrumentedService struct {
- protocols []p2p.Protocol
- apis []rpc.API
- start error
- stop error
+ start error
+ stop error
- protocolsHook func()
- startHook func(*p2p.Server)
- stopHook func()
-}
-
-func NewInstrumentedService(*ServiceContext) (Service, error) { return new(InstrumentedService), nil }
-
-func (s *InstrumentedService) Protocols() []p2p.Protocol {
- if s.protocolsHook != nil {
- s.protocolsHook()
- }
- return s.protocols
-}
+ startHook func()
+ stopHook func()
-func (s *InstrumentedService) APIs() []rpc.API {
- return s.apis
+ protocols []p2p.Protocol
}
-func (s *InstrumentedService) Start(server *p2p.Server) error {
+func (s *InstrumentedService) Start() error {
if s.startHook != nil {
- s.startHook(server)
+ s.startHook()
}
return s.start
}
-
func (s *InstrumentedService) Stop() error {
if s.stopHook != nil {
s.stopHook()
@@ -86,48 +64,49 @@ func (s *InstrumentedService) Stop() error {
return s.stop
}
-// InstrumentingWrapper is a method to specialize a service constructor returning
-// a generic InstrumentedService into one returning a wrapping specific one.
-type InstrumentingWrapper func(base ServiceConstructor) ServiceConstructor
+type FullService struct{}
-func InstrumentingWrapperMaker(base ServiceConstructor, kind reflect.Type) ServiceConstructor {
- return func(ctx *ServiceContext) (Service, error) {
- obj, err := base(ctx)
- if err != nil {
- return nil, err
- }
- wrapper := reflect.New(kind)
- wrapper.Elem().Field(0).Set(reflect.ValueOf(obj).Elem())
+func NewFullService(stack *Node) (*FullService, error) {
+ fs := new(FullService)
- return wrapper.Interface().(Service), nil
- }
+ stack.RegisterProtocols(fs.Protocols())
+ stack.RegisterAPIs(fs.APIs())
+ stack.RegisterLifecycle(fs)
+ return fs, nil
}
-// Set of services all wrapping the base InstrumentedService resulting in the
-// same method signatures but different outer types.
-type InstrumentedServiceA struct{ InstrumentedService }
-type InstrumentedServiceB struct{ InstrumentedService }
-type InstrumentedServiceC struct{ InstrumentedService }
+func (f *FullService) Start() error { return nil }
-func InstrumentedServiceMakerA(base ServiceConstructor) ServiceConstructor {
- return InstrumentingWrapperMaker(base, reflect.TypeOf(InstrumentedServiceA{}))
-}
+func (f *FullService) Stop() error { return nil }
-func InstrumentedServiceMakerB(base ServiceConstructor) ServiceConstructor {
- return InstrumentingWrapperMaker(base, reflect.TypeOf(InstrumentedServiceB{}))
-}
-
-func InstrumentedServiceMakerC(base ServiceConstructor) ServiceConstructor {
- return InstrumentingWrapperMaker(base, reflect.TypeOf(InstrumentedServiceC{}))
-}
-
-// OneMethodAPI is a single-method API handler to be returned by test services.
-type OneMethodAPI struct {
- fun func()
+func (f *FullService) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{
+ p2p.Protocol{
+ Name: "test1",
+ Version: uint(1),
+ },
+ p2p.Protocol{
+ Name: "test2",
+ Version: uint(2),
+ },
+ }
}
-func (api *OneMethodAPI) TheOneMethod() {
- if api.fun != nil {
- api.fun()
+func (f *FullService) APIs() []rpc.API {
+ return []rpc.API{
+ {
+ Namespace: "admin",
+ Version: "1.0",
+ },
+ {
+ Namespace: "debug",
+ Version: "1.0",
+ Public: true,
+ },
+ {
+ Namespace: "net",
+ Version: "1.0",
+ Public: true,
+ },
}
}
diff --git a/p2p/discover/v5_encoding.go b/p2p/discover/v5_encoding.go
index ac8952abc..9196d0e89 100644
--- a/p2p/discover/v5_encoding.go
+++ b/p2p/discover/v5_encoding.go
@@ -21,7 +21,6 @@ import (
"crypto/aes"
"crypto/cipher"
crand "crypto/rand"
- "crypto/sha256"
"errors"
"fmt"
"golang.org/x/crypto/sha3"
@@ -211,7 +210,7 @@ func (h *authHeaderList) ephemeralKey() *eddsa.PublicKey {
// newWireCodec creates a wire codec.
func newWireCodec(ln *enode.LocalNode, key *eddsa.PrivateKey, clock mclock.Clock) *wireCodec {
c := &wireCodec{
- sha256: sha256.New(),
+ sha256: sha3.New256(),
localnode: ln,
privkey: key,
sc: newSessionCache(1024, clock),
diff --git a/p2p/enode/node.go b/p2p/enode/node.go
index c68e0c53c..711d51d36 100644
--- a/p2p/enode/node.go
+++ b/p2p/enode/node.go
@@ -217,7 +217,7 @@ func (n ID) MarshalText() ([]byte, error) {
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (n *ID) UnmarshalText(text []byte) error {
- id, err := parseID(string(text))
+ id, err := ParseID(string(text))
if err != nil {
return err
}
@@ -229,14 +229,14 @@ func (n *ID) UnmarshalText(text []byte) error {
// The string may be prefixed with 0x.
// It panics if the string is not a valid ID.
func HexID(in string) ID {
- id, err := parseID(in)
+ id, err := ParseID(in)
if err != nil {
panic(err)
}
return id
}
-func parseID(in string) (ID, error) {
+func ParseID(in string) (ID, error) {
var id ID
b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
if err != nil {
diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go
new file mode 100644
index 000000000..2922a2481
--- /dev/null
+++ b/p2p/nodestate/nodestate.go
@@ -0,0 +1,878 @@
+// Copyright 2020 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package nodestate
+
+import (
+ "errors"
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/metrics"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/enr"
+ "github.com/core-coin/go-core/rlp"
+ "github.com/core-coin/go-core/xcbdb"
+ "reflect"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+type (
+ // NodeStateMachine connects different system components operating on subsets of
+ // network nodes. Node states are represented by 64 bit vectors with each bit assigned
+ // to a state flag. Each state flag has a descriptor structure and the mapping is
+ // created automatically. It is possible to subscribe to subsets of state flags and
+ // receive a callback if one of the nodes has a relevant state flag changed.
+ // Callbacks can also modify further flags of the same node or other nodes. State
+ // updates only return after all immediate effects throughout the system have happened
+ // (deadlocks should be avoided by design of the implemented state logic). The caller
+ // can also add timeouts assigned to a certain node and a subset of state flags.
+ // If the timeout elapses, the flags are reset. If all relevant flags are reset then
+ // the timer is dropped. State flags with no timeout are persisted in the database
+ // if the flag descriptor enables saving. If a node has no state flags set at any
+ // moment then it is discarded.
+ //
+ // Extra node fields can also be registered so system components can also store more
+ // complex state for each node that is relevant to them, without creating a custom
+ // peer set. Fields can be shared across multiple components if they all know the
+ // field ID. Subscription to fields is also possible. Persistent fields should have
+ // an encoder and a decoder function.
+ NodeStateMachine struct {
+ started, stopped bool
+ lock sync.Mutex
+ clock mclock.Clock
+ db xcbdb.KeyValueStore
+ dbNodeKey []byte
+ nodes map[enode.ID]*nodeInfo
+ offlineCallbackList []offlineCallback
+
+ // Registered state flags or fields. Modifications are allowed
+ // only when the node state machine has not been started.
+ setup *Setup
+ fields []*fieldInfo
+ saveFlags bitMask
+
+ // Installed callbacks. Modifications are allowed only when the
+ // node state machine has not been started.
+ stateSubs []stateSub
+
+ // Testing hooks, only for testing purposes.
+ saveNodeHook func(*nodeInfo)
+ }
+
+ // Flags represents a set of flags from a certain setup
+ Flags struct {
+ mask bitMask
+ setup *Setup
+ }
+
+ // Field represents a field from a certain setup
+ Field struct {
+ index int
+ setup *Setup
+ }
+
+ // flagDefinition describes a node state flag. Each registered instance is automatically
+ // mapped to a bit of the 64 bit node states.
+ // If persistent is true then the node is saved when state machine is shutdown.
+ flagDefinition struct {
+ name string
+ persistent bool
+ }
+
+ // fieldDefinition describes an optional node field of the given type. The contents
+ // of the field are only retained for each node as long as at least one of the
+ // state flags is set.
+ fieldDefinition struct {
+ name string
+ ftype reflect.Type
+ encode func(interface{}) ([]byte, error)
+ decode func([]byte) (interface{}, error)
+ }
+
+ // stateSetup contains the list of flags and fields used by the application
+ Setup struct {
+ Version uint
+ flags []flagDefinition
+ fields []fieldDefinition
+ }
+
+ // bitMask describes a node state or state mask. It represents a subset
+ // of node flags with each bit assigned to a flag index (LSB represents flag 0).
+ bitMask uint64
+
+ // StateCallback is a subscription callback which is called when one of the
+ // state flags that is included in the subscription state mask is changed.
+ // Note: oldState and newState are also masked with the subscription mask so only
+ // the relevant bits are included.
+ StateCallback func(n *enode.Node, oldState, newState Flags)
+
+ // FieldCallback is a subscription callback which is called when the value of
+ // a specific field is changed.
+ FieldCallback func(n *enode.Node, state Flags, oldValue, newValue interface{})
+
+ // nodeInfo contains node state, fields and state timeouts
+ nodeInfo struct {
+ node *enode.Node
+ state bitMask
+ timeouts []*nodeStateTimeout
+ fields []interface{}
+ db, dirty bool
+ }
+
+ nodeInfoEnc struct {
+ Enr enr.Record
+ Version uint
+ State bitMask
+ Fields [][]byte
+ }
+
+ stateSub struct {
+ mask bitMask
+ callback StateCallback
+ }
+
+ nodeStateTimeout struct {
+ mask bitMask
+ timer mclock.Timer
+ }
+
+ fieldInfo struct {
+ fieldDefinition
+ subs []FieldCallback
+ }
+
+ offlineCallback struct {
+ node *enode.Node
+ state bitMask
+ fields []interface{}
+ }
+)
+
+// offlineState is a special state that is assumed to be set before a node is loaded from
+// the database and after it is shut down.
+const offlineState = bitMask(1)
+
+// NewFlag creates a new node state flag
+func (s *Setup) NewFlag(name string) Flags {
+ if s.flags == nil {
+ s.flags = []flagDefinition{{name: "offline"}}
+ }
+ f := Flags{mask: bitMask(1) << uint(len(s.flags)), setup: s}
+ s.flags = append(s.flags, flagDefinition{name: name})
+ return f
+}
+
+// NewPersistentFlag creates a new persistent node state flag
+func (s *Setup) NewPersistentFlag(name string) Flags {
+ if s.flags == nil {
+ s.flags = []flagDefinition{{name: "offline"}}
+ }
+ f := Flags{mask: bitMask(1) << uint(len(s.flags)), setup: s}
+ s.flags = append(s.flags, flagDefinition{name: name, persistent: true})
+ return f
+}
+
+// OfflineFlag returns the system-defined offline flag belonging to the given setup
+func (s *Setup) OfflineFlag() Flags {
+ return Flags{mask: offlineState, setup: s}
+}
+
+// NewField creates a new node state field
+func (s *Setup) NewField(name string, ftype reflect.Type) Field {
+ f := Field{index: len(s.fields), setup: s}
+ s.fields = append(s.fields, fieldDefinition{
+ name: name,
+ ftype: ftype,
+ })
+ return f
+}
+
+// NewPersistentField creates a new persistent node field
+func (s *Setup) NewPersistentField(name string, ftype reflect.Type, encode func(interface{}) ([]byte, error), decode func([]byte) (interface{}, error)) Field {
+ f := Field{index: len(s.fields), setup: s}
+ s.fields = append(s.fields, fieldDefinition{
+ name: name,
+ ftype: ftype,
+ encode: encode,
+ decode: decode,
+ })
+ return f
+}
+
+// flagOp implements binary flag operations and also checks whether the operands belong to the same setup
+func flagOp(a, b Flags, trueIfA, trueIfB, trueIfBoth bool) Flags {
+ if a.setup == nil {
+ if a.mask != 0 {
+ panic("Node state flags have no setup reference")
+ }
+ a.setup = b.setup
+ }
+ if b.setup == nil {
+ if b.mask != 0 {
+ panic("Node state flags have no setup reference")
+ }
+ b.setup = a.setup
+ }
+ if a.setup != b.setup {
+ panic("Node state flags belong to a different setup")
+ }
+ res := Flags{setup: a.setup}
+ if trueIfA {
+ res.mask |= a.mask & ^b.mask
+ }
+ if trueIfB {
+ res.mask |= b.mask & ^a.mask
+ }
+ if trueIfBoth {
+ res.mask |= a.mask & b.mask
+ }
+ return res
+}
+
+// And returns the set of flags present in both a and b
+func (a Flags) And(b Flags) Flags { return flagOp(a, b, false, false, true) }
+
+// AndNot returns the set of flags present in a but not in b
+func (a Flags) AndNot(b Flags) Flags { return flagOp(a, b, true, false, false) }
+
+// Or returns the set of flags present in either a or b
+func (a Flags) Or(b Flags) Flags { return flagOp(a, b, true, true, true) }
+
+// Xor returns the set of flags present in either a or b but not both
+func (a Flags) Xor(b Flags) Flags { return flagOp(a, b, true, true, false) }
+
+// HasAll returns true if b is a subset of a
+func (a Flags) HasAll(b Flags) bool { return flagOp(a, b, false, true, false).mask == 0 }
+
+// HasNone returns true if a and b have no shared flags
+func (a Flags) HasNone(b Flags) bool { return flagOp(a, b, false, false, true).mask == 0 }
+
+// Equals returns true if a and b have the same flags set
+func (a Flags) Equals(b Flags) bool { return flagOp(a, b, true, true, false).mask == 0 }
+
+// IsEmpty returns true if a has no flags set
+func (a Flags) IsEmpty() bool { return a.mask == 0 }
+
+// MergeFlags merges multiple sets of state flags
+func MergeFlags(list ...Flags) Flags {
+ if len(list) == 0 {
+ return Flags{}
+ }
+ res := list[0]
+ for i := 1; i < len(list); i++ {
+ res = res.Or(list[i])
+ }
+ return res
+}
+
+// String returns a list of the names of the flags specified in the bit mask
+func (f Flags) String() string {
+ if f.mask == 0 {
+ return "[]"
+ }
+ s := "["
+ comma := false
+ for index, flag := range f.setup.flags {
+ if f.mask&(bitMask(1)< 8*int(unsafe.Sizeof(bitMask(0))) {
+ panic("Too many node state flags")
+ }
+ ns := &NodeStateMachine{
+ db: db,
+ dbNodeKey: dbKey,
+ clock: clock,
+ setup: setup,
+ nodes: make(map[enode.ID]*nodeInfo),
+ fields: make([]*fieldInfo, len(setup.fields)),
+ }
+ stateNameMap := make(map[string]int)
+ for index, flag := range setup.flags {
+ if _, ok := stateNameMap[flag.name]; ok {
+ panic("Node state flag name collision")
+ }
+ stateNameMap[flag.name] = index
+ if flag.persistent {
+ ns.saveFlags |= bitMask(1) << uint(index)
+ }
+ }
+ fieldNameMap := make(map[string]int)
+ for index, field := range setup.fields {
+ if _, ok := fieldNameMap[field.name]; ok {
+ panic("Node field name collision")
+ }
+ ns.fields[index] = &fieldInfo{fieldDefinition: field}
+ fieldNameMap[field.name] = index
+ }
+ return ns
+}
+
+// stateMask checks whether the set of flags belongs to the same setup and returns its internal bit mask
+func (ns *NodeStateMachine) stateMask(flags Flags) bitMask {
+ if flags.setup != ns.setup && flags.mask != 0 {
+ panic("Node state flags belong to a different setup")
+ }
+ return flags.mask
+}
+
+// fieldIndex checks whether the field belongs to the same setup and returns its internal index
+func (ns *NodeStateMachine) fieldIndex(field Field) int {
+ if field.setup != ns.setup {
+ panic("Node field belongs to a different setup")
+ }
+ return field.index
+}
+
+// SubscribeState adds a node state subscription. The callback is called while the state
+// machine mutex is not held and it is allowed to make further state updates. All immediate
+// changes throughout the system are processed in the same thread/goroutine. It is the
+// responsibility of the implemented state logic to avoid deadlocks caused by the callbacks,
+// infinite toggling of flags or hazardous/non-deterministic state changes.
+// State subscriptions should be installed before loading the node database or making the
+// first state update.
+func (ns *NodeStateMachine) SubscribeState(flags Flags, callback StateCallback) {
+ ns.lock.Lock()
+ defer ns.lock.Unlock()
+
+ if ns.started {
+ panic("state machine already started")
+ }
+ ns.stateSubs = append(ns.stateSubs, stateSub{ns.stateMask(flags), callback})
+}
+
+// SubscribeField adds a node field subscription. Same rules apply as for SubscribeState.
+func (ns *NodeStateMachine) SubscribeField(field Field, callback FieldCallback) {
+ ns.lock.Lock()
+ defer ns.lock.Unlock()
+
+ if ns.started {
+ panic("state machine already started")
+ }
+ f := ns.fields[ns.fieldIndex(field)]
+ f.subs = append(f.subs, callback)
+}
+
+// newNode creates a new nodeInfo
+func (ns *NodeStateMachine) newNode(n *enode.Node) *nodeInfo {
+ return &nodeInfo{node: n, fields: make([]interface{}, len(ns.fields))}
+}
+
+// checkStarted checks whether the state machine has already been started and panics otherwise.
+func (ns *NodeStateMachine) checkStarted() {
+ if !ns.started {
+ panic("state machine not started yet")
+ }
+}
+
+// Start starts the state machine, enabling state and field operations and disabling
+// further subscriptions.
+func (ns *NodeStateMachine) Start() {
+ ns.lock.Lock()
+ if ns.started {
+ panic("state machine already started")
+ }
+ ns.started = true
+ if ns.db != nil {
+ ns.loadFromDb()
+ }
+ ns.lock.Unlock()
+ ns.offlineCallbacks(true)
+}
+
+// Stop stops the state machine and saves its state if a database was supplied
+func (ns *NodeStateMachine) Stop() {
+ ns.lock.Lock()
+ for _, node := range ns.nodes {
+ fields := make([]interface{}, len(node.fields))
+ copy(fields, node.fields)
+ ns.offlineCallbackList = append(ns.offlineCallbackList, offlineCallback{node.node, node.state, fields})
+ }
+ ns.stopped = true
+ if ns.db != nil {
+ ns.saveToDb()
+ ns.lock.Unlock()
+ } else {
+ ns.lock.Unlock()
+ }
+ ns.offlineCallbacks(false)
+}
+
+// loadFromDb loads persisted node states from the database
+func (ns *NodeStateMachine) loadFromDb() {
+ it := ns.db.NewIterator(ns.dbNodeKey, nil)
+ for it.Next() {
+ var id enode.ID
+ if len(it.Key()) != len(ns.dbNodeKey)+len(id) {
+ log.Error("Node state db entry with invalid length", "found", len(it.Key()), "expected", len(ns.dbNodeKey)+len(id))
+ continue
+ }
+ copy(id[:], it.Key()[len(ns.dbNodeKey):])
+ ns.decodeNode(id, it.Value())
+ }
+}
+
+type dummyIdentity enode.ID
+
+func (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil }
+func (id dummyIdentity) NodeAddr(r *enr.Record) []byte { return id[:] }
+
+// decodeNode decodes a node database entry and adds it to the node set if successful
+func (ns *NodeStateMachine) decodeNode(id enode.ID, data []byte) {
+ var enc nodeInfoEnc
+ if err := rlp.DecodeBytes(data, &enc); err != nil {
+ log.Error("Failed to decode node info", "id", id, "error", err)
+ return
+ }
+ n, _ := enode.New(dummyIdentity(id), &enc.Enr)
+ node := ns.newNode(n)
+ node.db = true
+
+ if enc.Version != ns.setup.Version {
+ log.Debug("Removing stored node with unknown version", "current", ns.setup.Version, "stored", enc.Version)
+ ns.deleteNode(id)
+ return
+ }
+ if len(enc.Fields) > len(ns.setup.fields) {
+ log.Error("Invalid node field count", "id", id, "stored", len(enc.Fields))
+ return
+ }
+ // Resolve persisted node fields
+ for i, encField := range enc.Fields {
+ if len(encField) == 0 {
+ continue
+ }
+ if decode := ns.fields[i].decode; decode != nil {
+ if field, err := decode(encField); err == nil {
+ node.fields[i] = field
+ } else {
+ log.Error("Failed to decode node field", "id", id, "field name", ns.fields[i].name, "error", err)
+ return
+ }
+ } else {
+ log.Error("Cannot decode node field", "id", id, "field name", ns.fields[i].name)
+ return
+ }
+ }
+ // It's a compatible node record, add it to set.
+ ns.nodes[id] = node
+ node.state = enc.State
+ fields := make([]interface{}, len(node.fields))
+ copy(fields, node.fields)
+ ns.offlineCallbackList = append(ns.offlineCallbackList, offlineCallback{node.node, node.state, fields})
+ log.Debug("Loaded node state", "id", id, "state", Flags{mask: enc.State, setup: ns.setup})
+}
+
+// saveNode saves the given node info to the database
+func (ns *NodeStateMachine) saveNode(id enode.ID, node *nodeInfo) error {
+ if ns.db == nil {
+ return nil
+ }
+
+ storedState := node.state & ns.saveFlags
+ for _, t := range node.timeouts {
+ storedState &= ^t.mask
+ }
+ if storedState == 0 {
+ if node.db {
+ node.db = false
+ ns.deleteNode(id)
+ }
+ node.dirty = false
+ return nil
+ }
+
+ enc := nodeInfoEnc{
+ Enr: *node.node.Record(),
+ Version: ns.setup.Version,
+ State: storedState,
+ Fields: make([][]byte, len(ns.fields)),
+ }
+ log.Debug("Saved node state", "id", id, "state", Flags{mask: enc.State, setup: ns.setup})
+ lastIndex := -1
+ for i, f := range node.fields {
+ if f == nil {
+ continue
+ }
+ encode := ns.fields[i].encode
+ if encode == nil {
+ continue
+ }
+ blob, err := encode(f)
+ if err != nil {
+ return err
+ }
+ enc.Fields[i] = blob
+ lastIndex = i
+ }
+ enc.Fields = enc.Fields[:lastIndex+1]
+ data, err := rlp.EncodeToBytes(&enc)
+ if err != nil {
+ return err
+ }
+ if err := ns.db.Put(append(ns.dbNodeKey, id[:]...), data); err != nil {
+ return err
+ }
+ node.dirty, node.db = false, true
+
+ if ns.saveNodeHook != nil {
+ ns.saveNodeHook(node)
+ }
+ return nil
+}
+
+// deleteNode removes a node info from the database
+func (ns *NodeStateMachine) deleteNode(id enode.ID) {
+ ns.db.Delete(append(ns.dbNodeKey, id[:]...))
+}
+
+// saveToDb saves the persistent flags and fields of all nodes that have been changed
+func (ns *NodeStateMachine) saveToDb() {
+ for id, node := range ns.nodes {
+ if node.dirty {
+ err := ns.saveNode(id, node)
+ if err != nil {
+ log.Error("Failed to save node", "id", id, "error", err)
+ }
+ }
+ }
+}
+
+// updateEnode updates the enode entry belonging to the given node if it already exists
+func (ns *NodeStateMachine) updateEnode(n *enode.Node) (enode.ID, *nodeInfo) {
+ id := n.ID()
+ node := ns.nodes[id]
+ if node != nil && n.Seq() > node.node.Seq() {
+ node.node = n
+ }
+ return id, node
+}
+
+// Persist saves the persistent state and fields of the given node immediately
+func (ns *NodeStateMachine) Persist(n *enode.Node) error {
+ ns.lock.Lock()
+ defer ns.lock.Unlock()
+ ns.checkStarted()
+ if id, node := ns.updateEnode(n); node != nil && node.dirty {
+ err := ns.saveNode(id, node)
+ if err != nil {
+ log.Error("Failed to save node", "id", id, "error", err)
+ }
+ return err
+ }
+ return nil
+}
+
+// SetState updates the given node state flags and processes all resulting callbacks.
+// It only returns after all subsequent immediate changes (including those changed by the
+// callbacks) have been processed. If a flag with a timeout is set again, the operation
+// removes or replaces the existing timeout.
+func (ns *NodeStateMachine) SetState(n *enode.Node, setFlags, resetFlags Flags, timeout time.Duration) {
+ ns.lock.Lock()
+ ns.checkStarted()
+ if ns.stopped {
+ ns.lock.Unlock()
+ return
+ }
+
+ set, reset := ns.stateMask(setFlags), ns.stateMask(resetFlags)
+ id, node := ns.updateEnode(n)
+ if node == nil {
+ if set == 0 {
+ ns.lock.Unlock()
+ return
+ }
+ node = ns.newNode(n)
+ ns.nodes[id] = node
+ }
+ oldState := node.state
+ newState := (node.state & (^reset)) | set
+ changed := oldState ^ newState
+ node.state = newState
+
+ // Remove the timeout callbacks for all reset and set flags,
+ // even they are not existent(it's noop).
+ ns.removeTimeouts(node, set|reset)
+
+ // Register the timeout callback if the new state is not empty
+ // and timeout itself is required.
+ if timeout != 0 && newState != 0 {
+ ns.addTimeout(n, set, timeout)
+ }
+ if newState == oldState {
+ ns.lock.Unlock()
+ return
+ }
+ if newState == 0 {
+ delete(ns.nodes, id)
+ if node.db {
+ ns.deleteNode(id)
+ }
+ } else {
+ if changed&ns.saveFlags != 0 {
+ node.dirty = true
+ }
+ }
+ ns.lock.Unlock()
+ // call state update subscription callbacks without holding the mutex
+ for _, sub := range ns.stateSubs {
+ if changed&sub.mask != 0 {
+ sub.callback(n, Flags{mask: oldState & sub.mask, setup: ns.setup}, Flags{mask: newState & sub.mask, setup: ns.setup})
+ }
+ }
+ if newState == 0 {
+ // call field subscriptions for discarded fields
+ for i, v := range node.fields {
+ if v != nil {
+ f := ns.fields[i]
+ if len(f.subs) > 0 {
+ for _, cb := range f.subs {
+ cb(n, Flags{setup: ns.setup}, v, nil)
+ }
+ }
+ }
+ }
+ }
+}
+
+// offlineCallbacks calls state update callbacks at startup or shutdown
+func (ns *NodeStateMachine) offlineCallbacks(start bool) {
+ for _, cb := range ns.offlineCallbackList {
+ for _, sub := range ns.stateSubs {
+ offState := offlineState & sub.mask
+ onState := cb.state & sub.mask
+ if offState != onState {
+ if start {
+ sub.callback(cb.node, Flags{mask: offState, setup: ns.setup}, Flags{mask: onState, setup: ns.setup})
+ } else {
+ sub.callback(cb.node, Flags{mask: onState, setup: ns.setup}, Flags{mask: offState, setup: ns.setup})
+ }
+ }
+ }
+ for i, f := range cb.fields {
+ if f != nil && ns.fields[i].subs != nil {
+ for _, fsub := range ns.fields[i].subs {
+ if start {
+ fsub(cb.node, Flags{mask: offlineState, setup: ns.setup}, nil, f)
+ } else {
+ fsub(cb.node, Flags{mask: offlineState, setup: ns.setup}, f, nil)
+ }
+ }
+ }
+ }
+ }
+ ns.offlineCallbackList = nil
+}
+
+// AddTimeout adds a node state timeout associated to the given state flag(s).
+// After the specified time interval, the relevant states will be reset.
+func (ns *NodeStateMachine) AddTimeout(n *enode.Node, flags Flags, timeout time.Duration) {
+ ns.lock.Lock()
+ defer ns.lock.Unlock()
+
+ ns.checkStarted()
+ if ns.stopped {
+ return
+ }
+ ns.addTimeout(n, ns.stateMask(flags), timeout)
+}
+
+// addTimeout adds a node state timeout associated to the given state flag(s).
+func (ns *NodeStateMachine) addTimeout(n *enode.Node, mask bitMask, timeout time.Duration) {
+ _, node := ns.updateEnode(n)
+ if node == nil {
+ return
+ }
+ mask &= node.state
+ if mask == 0 {
+ return
+ }
+ ns.removeTimeouts(node, mask)
+ t := &nodeStateTimeout{mask: mask}
+ t.timer = ns.clock.AfterFunc(timeout, func() {
+ ns.SetState(n, Flags{}, Flags{mask: t.mask, setup: ns.setup}, 0)
+ })
+ node.timeouts = append(node.timeouts, t)
+ if mask&ns.saveFlags != 0 {
+ node.dirty = true
+ }
+}
+
+// removeTimeout removes node state timeouts associated to the given state flag(s).
+// If a timeout was associated to multiple flags which are not all included in the
+// specified remove mask then only the included flags are de-associated and the timer
+// stays active.
+func (ns *NodeStateMachine) removeTimeouts(node *nodeInfo, mask bitMask) {
+ for i := 0; i < len(node.timeouts); i++ {
+ t := node.timeouts[i]
+ match := t.mask & mask
+ if match == 0 {
+ continue
+ }
+ t.mask -= match
+ if t.mask != 0 {
+ continue
+ }
+ t.timer.Stop()
+ node.timeouts[i] = node.timeouts[len(node.timeouts)-1]
+ node.timeouts = node.timeouts[:len(node.timeouts)-1]
+ i--
+ if match&ns.saveFlags != 0 {
+ node.dirty = true
+ }
+ }
+}
+
+// GetField retrieves the given field of the given node
+func (ns *NodeStateMachine) GetField(n *enode.Node, field Field) interface{} {
+ ns.lock.Lock()
+ defer ns.lock.Unlock()
+
+ ns.checkStarted()
+ if ns.stopped {
+ return nil
+ }
+ if _, node := ns.updateEnode(n); node != nil {
+ return node.fields[ns.fieldIndex(field)]
+ }
+ return nil
+}
+
+// SetField sets the given field of the given node
+func (ns *NodeStateMachine) SetField(n *enode.Node, field Field, value interface{}) error {
+ ns.lock.Lock()
+ ns.checkStarted()
+ if ns.stopped {
+ ns.lock.Unlock()
+ return nil
+ }
+ _, node := ns.updateEnode(n)
+ if node == nil {
+ ns.lock.Unlock()
+ return nil
+ }
+ fieldIndex := ns.fieldIndex(field)
+ f := ns.fields[fieldIndex]
+ if value != nil && reflect.TypeOf(value) != f.ftype {
+ log.Error("Invalid field type", "type", reflect.TypeOf(value), "required", f.ftype)
+ ns.lock.Unlock()
+ return errors.New("invalid field type")
+ }
+ oldValue := node.fields[fieldIndex]
+ if value == oldValue {
+ ns.lock.Unlock()
+ return nil
+ }
+ node.fields[fieldIndex] = value
+ if f.encode != nil {
+ node.dirty = true
+ }
+
+ state := node.state
+ ns.lock.Unlock()
+ if len(f.subs) > 0 {
+ for _, cb := range f.subs {
+ cb(n, Flags{mask: state, setup: ns.setup}, oldValue, value)
+ }
+ }
+ return nil
+}
+
+// ForEach calls the callback for each node having all of the required and none of the
+// disabled flags set
+func (ns *NodeStateMachine) ForEach(requireFlags, disableFlags Flags, cb func(n *enode.Node, state Flags)) {
+ ns.lock.Lock()
+ ns.checkStarted()
+ type callback struct {
+ node *enode.Node
+ state bitMask
+ }
+ require, disable := ns.stateMask(requireFlags), ns.stateMask(disableFlags)
+ var callbacks []callback
+ for _, node := range ns.nodes {
+ if node.state&require == require && node.state&disable == 0 {
+ callbacks = append(callbacks, callback{node.node, node.state & (require | disable)})
+ }
+ }
+ ns.lock.Unlock()
+ for _, c := range callbacks {
+ cb(c.node, Flags{mask: c.state, setup: ns.setup})
+ }
+}
+
+// GetNode returns the enode currently associated with the given ID
+func (ns *NodeStateMachine) GetNode(id enode.ID) *enode.Node {
+ ns.lock.Lock()
+ defer ns.lock.Unlock()
+
+ ns.checkStarted()
+ if node := ns.nodes[id]; node != nil {
+ return node.node
+ }
+ return nil
+}
+
+// AddLogMetrics adds logging and/or metrics for nodes entering, exiting and currently
+// being in a given set specified by required and disabled state flags
+func (ns *NodeStateMachine) AddLogMetrics(requireFlags, disableFlags Flags, name string, inMeter, outMeter metrics.Meter, gauge metrics.Gauge) {
+ var count int64
+ ns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState Flags) {
+ oldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags)
+ newMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags)
+ if newMatch == oldMatch {
+ return
+ }
+
+ if newMatch {
+ count++
+ if name != "" {
+ log.Debug("Node entered", "set", name, "id", n.ID(), "count", count)
+ }
+ if inMeter != nil {
+ inMeter.Mark(1)
+ }
+ } else {
+ count--
+ if name != "" {
+ log.Debug("Node left", "set", name, "id", n.ID(), "count", count)
+ }
+ if outMeter != nil {
+ outMeter.Mark(1)
+ }
+ }
+ if gauge != nil {
+ gauge.Update(count)
+ }
+ })
+}
diff --git a/p2p/nodestate/nodestate_test.go b/p2p/nodestate/nodestate_test.go
new file mode 100644
index 000000000..8ce08d420
--- /dev/null
+++ b/p2p/nodestate/nodestate_test.go
@@ -0,0 +1,372 @@
+package nodestate
+
+import (
+ "errors"
+ "fmt"
+ "github.com/core-coin/go-core/common/mclock"
+ "github.com/core-coin/go-core/core/rawdb"
+ "github.com/core-coin/go-core/p2p/enode"
+ "github.com/core-coin/go-core/p2p/enr"
+ "github.com/core-coin/go-core/rlp"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func testSetup(flagPersist []bool, fieldType []reflect.Type) (*Setup, []Flags, []Field) {
+ setup := &Setup{}
+ flags := make([]Flags, len(flagPersist))
+ for i, persist := range flagPersist {
+ if persist {
+ flags[i] = setup.NewPersistentFlag(fmt.Sprintf("flag-%d", i))
+ } else {
+ flags[i] = setup.NewFlag(fmt.Sprintf("flag-%d", i))
+ }
+ }
+ fields := make([]Field, len(fieldType))
+ for i, ftype := range fieldType {
+ switch ftype {
+ case reflect.TypeOf(uint64(0)):
+ fields[i] = setup.NewPersistentField(fmt.Sprintf("field-%d", i), ftype, uint64FieldEnc, uint64FieldDec)
+ case reflect.TypeOf(""):
+ fields[i] = setup.NewPersistentField(fmt.Sprintf("field-%d", i), ftype, stringFieldEnc, stringFieldDec)
+ default:
+ fields[i] = setup.NewField(fmt.Sprintf("field-%d", i), ftype)
+ }
+ }
+ return setup, flags, fields
+}
+
+func testNode(b byte) *enode.Node {
+ r := &enr.Record{}
+ r.SetSig(dummyIdentity{b}, []byte{42})
+ n, _ := enode.New(dummyIdentity{b}, r)
+ return n
+}
+
+func TestCallback(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, _ := testSetup([]bool{false, false, false}, nil)
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ set0 := make(chan struct{}, 1)
+ set1 := make(chan struct{}, 1)
+ set2 := make(chan struct{}, 1)
+ ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) { set0 <- struct{}{} })
+ ns.SubscribeState(flags[1], func(n *enode.Node, oldState, newState Flags) { set1 <- struct{}{} })
+ ns.SubscribeState(flags[2], func(n *enode.Node, oldState, newState Flags) { set2 <- struct{}{} })
+
+ ns.Start()
+
+ ns.SetState(testNode(1), flags[0], Flags{}, 0)
+ ns.SetState(testNode(1), flags[1], Flags{}, time.Second)
+ ns.SetState(testNode(1), flags[2], Flags{}, 2*time.Second)
+
+ for i := 0; i < 3; i++ {
+ select {
+ case <-set0:
+ case <-set1:
+ case <-set2:
+ case <-time.After(time.Second):
+ t.Fatalf("failed to invoke callback")
+ }
+ }
+}
+
+func TestPersistentFlags(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, _ := testSetup([]bool{true, true, true, false}, nil)
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ saveNode := make(chan *nodeInfo, 5)
+ ns.saveNodeHook = func(node *nodeInfo) {
+ saveNode <- node
+ }
+
+ ns.Start()
+
+ ns.SetState(testNode(1), flags[0], Flags{}, time.Second) // state with timeout should not be saved
+ ns.SetState(testNode(2), flags[1], Flags{}, 0)
+ ns.SetState(testNode(3), flags[2], Flags{}, 0)
+ ns.SetState(testNode(4), flags[3], Flags{}, 0)
+ ns.SetState(testNode(5), flags[0], Flags{}, 0)
+ ns.Persist(testNode(5))
+ select {
+ case <-saveNode:
+ case <-time.After(time.Second):
+ t.Fatalf("Timeout")
+ }
+ ns.Stop()
+
+ for i := 0; i < 2; i++ {
+ select {
+ case <-saveNode:
+ case <-time.After(time.Second):
+ t.Fatalf("Timeout")
+ }
+ }
+ select {
+ case <-saveNode:
+ t.Fatalf("Unexpected saveNode")
+ case <-time.After(time.Millisecond * 100):
+ }
+}
+
+func TestSetField(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf("")})
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ saveNode := make(chan *nodeInfo, 1)
+ ns.saveNodeHook = func(node *nodeInfo) {
+ saveNode <- node
+ }
+
+ ns.Start()
+
+ // Set field before setting state
+ ns.SetField(testNode(1), fields[0], "hello world")
+ field := ns.GetField(testNode(1), fields[0])
+ if field != nil {
+ t.Fatalf("Field shouldn't be set before setting states")
+ }
+ // Set field after setting state
+ ns.SetState(testNode(1), flags[0], Flags{}, 0)
+ ns.SetField(testNode(1), fields[0], "hello world")
+ field = ns.GetField(testNode(1), fields[0])
+ if field == nil {
+ t.Fatalf("Field should be set after setting states")
+ }
+ if err := ns.SetField(testNode(1), fields[0], 123); err == nil {
+ t.Fatalf("Invalid field should be rejected")
+ }
+ // Dirty node should be written back
+ ns.Stop()
+ select {
+ case <-saveNode:
+ case <-time.After(time.Second):
+ t.Fatalf("Timeout")
+ }
+}
+
+func TestUnsetField(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, fields := testSetup([]bool{false}, []reflect.Type{reflect.TypeOf("")})
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ ns.Start()
+
+ ns.SetState(testNode(1), flags[0], Flags{}, time.Second)
+ ns.SetField(testNode(1), fields[0], "hello world")
+
+ ns.SetState(testNode(1), Flags{}, flags[0], 0)
+ if field := ns.GetField(testNode(1), fields[0]); field != nil {
+ t.Fatalf("Field should be unset")
+ }
+}
+
+func TestSetState(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, _ := testSetup([]bool{false, false, false}, nil)
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ type change struct{ old, new Flags }
+ set := make(chan change, 1)
+ ns.SubscribeState(flags[0].Or(flags[1]), func(n *enode.Node, oldState, newState Flags) {
+ set <- change{
+ old: oldState,
+ new: newState,
+ }
+ })
+
+ ns.Start()
+
+ check := func(expectOld, expectNew Flags, expectChange bool) {
+ if expectChange {
+ select {
+ case c := <-set:
+ if !c.old.Equals(expectOld) {
+ t.Fatalf("Old state mismatch")
+ }
+ if !c.new.Equals(expectNew) {
+ t.Fatalf("New state mismatch")
+ }
+ case <-time.After(time.Second):
+ }
+ return
+ }
+ select {
+ case <-set:
+ t.Fatalf("Unexpected change")
+ case <-time.After(time.Millisecond * 100):
+ return
+ }
+ }
+ ns.SetState(testNode(1), flags[0], Flags{}, 0)
+ check(Flags{}, flags[0], true)
+
+ ns.SetState(testNode(1), flags[1], Flags{}, 0)
+ check(flags[0], flags[0].Or(flags[1]), true)
+
+ ns.SetState(testNode(1), flags[2], Flags{}, 0)
+ check(Flags{}, Flags{}, false)
+
+ ns.SetState(testNode(1), Flags{}, flags[0], 0)
+ check(flags[0].Or(flags[1]), flags[1], true)
+
+ ns.SetState(testNode(1), Flags{}, flags[1], 0)
+ check(flags[1], Flags{}, true)
+
+ ns.SetState(testNode(1), Flags{}, flags[2], 0)
+ check(Flags{}, Flags{}, false)
+
+ ns.SetState(testNode(1), flags[0].Or(flags[1]), Flags{}, time.Second)
+ check(Flags{}, flags[0].Or(flags[1]), true)
+ clock.Run(time.Second)
+ check(flags[0].Or(flags[1]), Flags{}, true)
+}
+
+func uint64FieldEnc(field interface{}) ([]byte, error) {
+ if u, ok := field.(uint64); ok {
+ enc, err := rlp.EncodeToBytes(&u)
+ return enc, err
+ } else {
+ return nil, errors.New("invalid field type")
+ }
+}
+
+func uint64FieldDec(enc []byte) (interface{}, error) {
+ var u uint64
+ err := rlp.DecodeBytes(enc, &u)
+ return u, err
+}
+
+func stringFieldEnc(field interface{}) ([]byte, error) {
+ if s, ok := field.(string); ok {
+ return []byte(s), nil
+ } else {
+ return nil, errors.New("invalid field type")
+ }
+}
+
+func stringFieldDec(enc []byte) (interface{}, error) {
+ return string(enc), nil
+}
+
+func TestPersistentFields(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf(uint64(0)), reflect.TypeOf("")})
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ ns.Start()
+ ns.SetState(testNode(1), flags[0], Flags{}, 0)
+ ns.SetField(testNode(1), fields[0], uint64(100))
+ ns.SetField(testNode(1), fields[1], "hello world")
+ ns.Stop()
+
+ ns2 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ ns2.Start()
+ field0 := ns2.GetField(testNode(1), fields[0])
+ if !reflect.DeepEqual(field0, uint64(100)) {
+ t.Fatalf("Field changed")
+ }
+ field1 := ns2.GetField(testNode(1), fields[1])
+ if !reflect.DeepEqual(field1, "hello world") {
+ t.Fatalf("Field changed")
+ }
+
+ s.Version++
+ ns3 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+ ns3.Start()
+ if ns3.GetField(testNode(1), fields[0]) != nil {
+ t.Fatalf("Old field version should have been discarded")
+ }
+}
+
+func TestFieldSub(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf(uint64(0))})
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ var (
+ lastState Flags
+ lastOldValue, lastNewValue interface{}
+ )
+ ns.SubscribeField(fields[0], func(n *enode.Node, state Flags, oldValue, newValue interface{}) {
+ lastState, lastOldValue, lastNewValue = state, oldValue, newValue
+ })
+ check := func(state Flags, oldValue, newValue interface{}) {
+ if !lastState.Equals(state) || lastOldValue != oldValue || lastNewValue != newValue {
+ t.Fatalf("Incorrect field sub callback (expected [%v %v %v], got [%v %v %v])", state, oldValue, newValue, lastState, lastOldValue, lastNewValue)
+ }
+ }
+ ns.Start()
+ ns.SetState(testNode(1), flags[0], Flags{}, 0)
+ ns.SetField(testNode(1), fields[0], uint64(100))
+ check(flags[0], nil, uint64(100))
+ ns.Stop()
+ check(s.OfflineFlag(), uint64(100), nil)
+
+ ns2 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+ ns2.SubscribeField(fields[0], func(n *enode.Node, state Flags, oldValue, newValue interface{}) {
+ lastState, lastOldValue, lastNewValue = state, oldValue, newValue
+ })
+ ns2.Start()
+ check(s.OfflineFlag(), nil, uint64(100))
+ ns2.SetState(testNode(1), Flags{}, flags[0], 0)
+ check(Flags{}, uint64(100), nil)
+ ns2.Stop()
+}
+
+func TestDuplicatedFlags(t *testing.T) {
+ mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{}
+
+ s, flags, _ := testSetup([]bool{true}, nil)
+ ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s)
+
+ type change struct{ old, new Flags }
+ set := make(chan change, 1)
+ ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) {
+ set <- change{oldState, newState}
+ })
+
+ ns.Start()
+ defer ns.Stop()
+
+ check := func(expectOld, expectNew Flags, expectChange bool) {
+ if expectChange {
+ select {
+ case c := <-set:
+ if !c.old.Equals(expectOld) {
+ t.Fatalf("Old state mismatch")
+ }
+ if !c.new.Equals(expectNew) {
+ t.Fatalf("New state mismatch")
+ }
+ case <-time.After(time.Second):
+ }
+ return
+ }
+ select {
+ case <-set:
+ t.Fatalf("Unexpected change")
+ case <-time.After(time.Millisecond * 100):
+ return
+ }
+ }
+ ns.SetState(testNode(1), flags[0], Flags{}, time.Second)
+ check(Flags{}, flags[0], true)
+ ns.SetState(testNode(1), flags[0], Flags{}, 2*time.Second) // extend the timeout to 2s
+ check(Flags{}, flags[0], false)
+
+ clock.Run(2 * time.Second)
+ check(flags[0], Flags{}, true)
+}
diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go
index 0ce21fefb..c705586ba 100644
--- a/p2p/simulations/adapters/exec.go
+++ b/p2p/simulations/adapters/exec.go
@@ -34,12 +34,12 @@ import (
"syscall"
"time"
- "github.com/docker/docker/pkg/reexec"
"github.com/core-coin/go-core/log"
"github.com/core-coin/go-core/node"
"github.com/core-coin/go-core/p2p"
"github.com/core-coin/go-core/p2p/enode"
"github.com/core-coin/go-core/rpc"
+ "github.com/docker/docker/pkg/reexec"
"github.com/gorilla/websocket"
)
@@ -75,11 +75,11 @@ func (e *ExecAdapter) Name() string {
// NewNode returns a new ExecNode using the given config
func (e *ExecAdapter) NewNode(config *NodeConfig) (Node, error) {
- if len(config.Services) == 0 {
- return nil, errors.New("node must have at least one service")
+ if len(config.Lifecycles) == 0 {
+ return nil, errors.New("node must have at least one lifecycle")
}
- for _, service := range config.Services {
- if _, exists := serviceFuncs[service]; !exists {
+ for _, service := range config.Lifecycles {
+ if _, exists := lifecycleConstructorFuncs[service]; !exists {
return nil, fmt.Errorf("unknown node service %q", service)
}
}
@@ -263,7 +263,7 @@ func (n *ExecNode) waitForStartupJSON(ctx context.Context) (string, chan nodeSta
func (n *ExecNode) execCommand() *exec.Cmd {
return &exec.Cmd{
Path: reexec.Self(),
- Args: []string{"p2p-node", strings.Join(n.Config.Node.Services, ","), n.ID.String()},
+ Args: []string{"p2p-node", strings.Join(n.Config.Node.Lifecycles, ","), n.ID.String()},
}
}
@@ -400,7 +400,7 @@ func execP2PNode() {
defer signal.Stop(sigc)
<-sigc
log.Info("Received SIGTERM, shutting down...")
- stack.Stop()
+ stack.Close()
}()
stack.Wait() // Wait for the stack to exit.
}
@@ -434,44 +434,35 @@ func startExecNodeStack() (*node.Node, error) {
return nil, fmt.Errorf("error creating node stack: %v", err)
}
- // register the services, collecting them into a map so we can wrap
- // them in a snapshot service
- services := make(map[string]node.Service, len(serviceNames))
+ // Register the services, collecting them into a map so they can
+ // be accessed by the snapshot API.
+ services := make(map[string]node.Lifecycle, len(serviceNames))
for _, name := range serviceNames {
- serviceFunc, exists := serviceFuncs[name]
+ lifecycleFunc, exists := lifecycleConstructorFuncs[name]
if !exists {
return nil, fmt.Errorf("unknown node service %q", err)
}
- constructor := func(nodeCtx *node.ServiceContext) (node.Service, error) {
- ctx := &ServiceContext{
- RPCDialer: &wsRPCDialer{addrs: conf.PeerAddrs},
- NodeContext: nodeCtx,
- Config: conf.Node,
- }
- if conf.Snapshots != nil {
- ctx.Snapshot = conf.Snapshots[name]
- }
- service, err := serviceFunc(ctx)
- if err != nil {
- return nil, err
- }
- services[name] = service
- return service, nil
+ ctx := &ServiceContext{
+ RPCDialer: &wsRPCDialer{addrs: conf.PeerAddrs},
+ Config: conf.Node,
}
- if err := stack.Register(constructor); err != nil {
- return stack, fmt.Errorf("error registering service %q: %v", name, err)
+ if conf.Snapshots != nil {
+ ctx.Snapshot = conf.Snapshots[name]
}
+ service, err := lifecycleFunc(ctx, stack)
+ if err != nil {
+ return nil, err
+ }
+ services[name] = service
+ stack.RegisterLifecycle(service)
}
- // register the snapshot service
- err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- return &snapshotService{services}, nil
- })
- if err != nil {
- return stack, fmt.Errorf("error starting snapshot service: %v", err)
- }
-
- // start the stack
+ // Add the snapshot API.
+ stack.RegisterAPIs([]rpc.API{{
+ Namespace: "simulation",
+ Version: "1.0",
+ Service: SnapshotAPI{services},
+ }})
if err = stack.Start(); err != nil {
err = fmt.Errorf("error starting stack: %v", err)
}
@@ -490,35 +481,9 @@ type nodeStartupJSON struct {
NodeInfo *p2p.NodeInfo
}
-// snapshotService is a node.Service which wraps a list of services and
-// exposes an API to generate a snapshot of those services
-type snapshotService struct {
- services map[string]node.Service
-}
-
-func (s *snapshotService) APIs() []rpc.API {
- return []rpc.API{{
- Namespace: "simulation",
- Version: "1.0",
- Service: SnapshotAPI{s.services},
- }}
-}
-
-func (s *snapshotService) Protocols() []p2p.Protocol {
- return nil
-}
-
-func (s *snapshotService) Start(*p2p.Server) error {
- return nil
-}
-
-func (s *snapshotService) Stop() error {
- return nil
-}
-
// SnapshotAPI provides an RPC method to create snapshots of services
type SnapshotAPI struct {
- services map[string]node.Service
+ services map[string]node.Lifecycle
}
func (api SnapshotAPI) Snapshot() (map[string][]byte, error) {
diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go
index 5d5fdb996..ec1eca446 100644
--- a/p2p/simulations/adapters/inproc.go
+++ b/p2p/simulations/adapters/inproc.go
@@ -37,29 +37,21 @@ import (
// SimAdapter is a NodeAdapter which creates in-memory simulation nodes and
// connects them using net.Pipe
type SimAdapter struct {
- pipe func() (net.Conn, net.Conn, error)
- mtx sync.RWMutex
- nodes map[enode.ID]*SimNode
- services map[string]ServiceFunc
+ pipe func() (net.Conn, net.Conn, error)
+ mtx sync.RWMutex
+ nodes map[enode.ID]*SimNode
+ lifecycles LifecycleConstructors
}
// NewSimAdapter creates a SimAdapter which is capable of running in-memory
// simulation nodes running any of the given services (the services to run on a
// particular node are passed to the NewNode function in the NodeConfig)
// the adapter uses a net.Pipe for in-memory simulated network connections
-func NewSimAdapter(services map[string]ServiceFunc) *SimAdapter {
+func NewSimAdapter(services LifecycleConstructors) *SimAdapter {
return &SimAdapter{
- pipe: pipes.NetPipe,
- nodes: make(map[enode.ID]*SimNode),
- services: services,
- }
-}
-
-func NewTCPAdapter(services map[string]ServiceFunc) *SimAdapter {
- return &SimAdapter{
- pipe: pipes.TCPPipe,
- nodes: make(map[enode.ID]*SimNode),
- services: services,
+ pipe: pipes.NetPipe,
+ nodes: make(map[enode.ID]*SimNode),
+ lifecycles: services,
}
}
@@ -85,11 +77,11 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
}
// check the services are valid
- if len(config.Services) == 0 {
+ if len(config.Lifecycles) == 0 {
return nil, errors.New("node must have at least one service")
}
- for _, service := range config.Services {
- if _, exists := s.services[service]; !exists {
+ for _, service := range config.Lifecycles {
+ if _, exists := s.lifecycles[service]; !exists {
return nil, fmt.Errorf("unknown node service %q", service)
}
}
@@ -119,7 +111,7 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
config: config,
node: n,
adapter: s,
- running: make(map[string]node.Service),
+ running: make(map[string]node.Lifecycle),
}
s.nodes[id] = simNode
return simNode, nil
@@ -155,11 +147,7 @@ func (s *SimAdapter) DialRPC(id enode.ID) (*rpc.Client, error) {
if !ok {
return nil, fmt.Errorf("unknown node: %s", id)
}
- handler, err := node.node.RPCHandler()
- if err != nil {
- return nil, err
- }
- return rpc.DialInProc(handler), nil
+ return node.node.Attach()
}
// GetNode returns the node with the given ID if it exists
@@ -179,7 +167,7 @@ type SimNode struct {
config *NodeConfig
adapter *SimAdapter
node *node.Node
- running map[string]node.Service
+ running map[string]node.Lifecycle
client *rpc.Client
registerOnce sync.Once
}
@@ -227,7 +215,7 @@ func (sn *SimNode) ServeRPC(conn *websocket.Conn) error {
// simulation_snapshot RPC method
func (sn *SimNode) Snapshots() (map[string][]byte, error) {
sn.lock.RLock()
- services := make(map[string]node.Service, len(sn.running))
+ services := make(map[string]node.Lifecycle, len(sn.running))
for name, service := range sn.running {
services[name] = service
}
@@ -252,53 +240,46 @@ func (sn *SimNode) Snapshots() (map[string][]byte, error) {
// Start registers the services and starts the underlying devp2p node
func (sn *SimNode) Start(snapshots map[string][]byte) error {
- newService := func(name string) func(ctx *node.ServiceContext) (node.Service, error) {
- return func(nodeCtx *node.ServiceContext) (node.Service, error) {
+ // ensure we only register the services once in the case of the node
+ // being stopped and then started again
+ var regErr error
+ sn.registerOnce.Do(func() {
+ for _, name := range sn.config.Lifecycles {
ctx := &ServiceContext{
- RPCDialer: sn.adapter,
- NodeContext: nodeCtx,
- Config: sn.config,
+ RPCDialer: sn.adapter,
+ Config: sn.config,
}
if snapshots != nil {
ctx.Snapshot = snapshots[name]
}
- serviceFunc := sn.adapter.services[name]
- service, err := serviceFunc(ctx)
+ serviceFunc := sn.adapter.lifecycles[name]
+ service, err := serviceFunc(ctx, sn.node)
if err != nil {
- return nil, err
- }
- sn.running[name] = service
- return service, nil
- }
- }
-
- // ensure we only register the services once in the case of the node
- // being stopped and then started again
- var regErr error
- sn.registerOnce.Do(func() {
- for _, name := range sn.config.Services {
- if err := sn.node.Register(newService(name)); err != nil {
regErr = err
break
}
+ // if the service has already been registered, don't register it again.
+ if _, ok := sn.running[name]; ok {
+ continue
+ }
+ sn.running[name] = service
+ sn.node.RegisterLifecycle(service)
}
})
if regErr != nil {
return regErr
}
-
if err := sn.node.Start(); err != nil {
return err
}
// create an in-process RPC client
- handler, err := sn.node.RPCHandler()
+ client, err := sn.node.Attach()
if err != nil {
return err
}
-
sn.lock.Lock()
- sn.client = rpc.DialInProc(handler)
+ sn.client = client
sn.lock.Unlock()
return nil
@@ -312,21 +293,21 @@ func (sn *SimNode) Stop() error {
sn.client = nil
}
sn.lock.Unlock()
- return sn.node.Stop()
+ return sn.node.Close()
}
// Service returns a running service by name
-func (sn *SimNode) Service(name string) node.Service {
+func (sn *SimNode) Service(name string) node.Lifecycle {
sn.lock.RLock()
defer sn.lock.RUnlock()
return sn.running[name]
}
// Services returns a copy of the underlying services
-func (sn *SimNode) Services() []node.Service {
+func (sn *SimNode) Services() []node.Lifecycle {
sn.lock.RLock()
defer sn.lock.RUnlock()
- services := make([]node.Service, 0, len(sn.running))
+ services := make([]node.Lifecycle, 0, len(sn.running))
for _, service := range sn.running {
services = append(services, service)
}
@@ -334,10 +315,10 @@ func (sn *SimNode) Services() []node.Service {
}
// ServiceMap returns a map by names of the underlying services
-func (sn *SimNode) ServiceMap() map[string]node.Service {
+func (sn *SimNode) ServiceMap() map[string]node.Lifecycle {
sn.lock.RLock()
defer sn.lock.RUnlock()
- services := make(map[string]node.Service, len(sn.running))
+ services := make(map[string]node.Lifecycle, len(sn.running))
for name, service := range sn.running {
services[name] = service
}
diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go
index 873ca65c1..a546c5db2 100644
--- a/p2p/simulations/adapters/types.go
+++ b/p2p/simulations/adapters/types.go
@@ -98,11 +98,11 @@ type NodeConfig struct {
// Use an existing database instead of a temporary one if non-empty
DataDir string
- // Services are the names of the services which should be run when
- // starting the node (for SimNodes it should be the names of services
- // contained in SimAdapter.services, for other nodes it should be
- // services registered by calling the RegisterService function)
- Services []string
+ // Lifecycles are the names of the service lifecycles which should be run when
+ // starting the node (for SimNodes it should be the names of service lifecycles
+ // contained in SimAdapter.lifecycles, for other nodes it should be
+ // service lifecycles registered by calling the RegisterLifecycle function)
+ Lifecycles []string
// Properties are the names of the properties this node should hold
// within running services (e.g. "bootnode", "lightnode" or any custom values)
@@ -139,7 +139,7 @@ func (n *NodeConfig) MarshalJSON() ([]byte, error) {
confJSON := nodeConfigJSON{
ID: n.ID.String(),
Name: n.Name,
- Services: n.Services,
+ Services: n.Lifecycles,
Properties: n.Properties,
Port: n.Port,
EnableMsgEvents: n.EnableMsgEvents,
@@ -177,7 +177,7 @@ func (n *NodeConfig) UnmarshalJSON(data []byte) error {
}
n.Name = confJSON.Name
- n.Services = confJSON.Services
+ n.Lifecycles = confJSON.Services
n.Properties = confJSON.Properties
n.Port = confJSON.Port
n.EnableMsgEvents = confJSON.EnableMsgEvents
@@ -235,9 +235,8 @@ func assignTCPPort() (uint16, error) {
type ServiceContext struct {
RPCDialer
- NodeContext *node.ServiceContext
- Config *NodeConfig
- Snapshot []byte
+ Config *NodeConfig
+ Snapshot []byte
}
// RPCDialer is used when initialising services which need to connect to
@@ -247,27 +246,29 @@ type RPCDialer interface {
DialRPC(id enode.ID) (*rpc.Client, error)
}
-// Services is a collection of services which can be run in a simulation
-type Services map[string]ServiceFunc
+// LifecycleConstructor allows a Lifecycle to be constructed during node start-up.
+// While the service-specific package usually takes care of Lifecycle creation and registration,
+// for testing purposes, it is useful to be able to construct a Lifecycle on spot.
+type LifecycleConstructor func(ctx *ServiceContext, stack *node.Node) (node.Lifecycle, error)
-// ServiceFunc returns a node.Service which can be used to boot a devp2p node
-type ServiceFunc func(ctx *ServiceContext) (node.Service, error)
+// LifecycleConstructors stores LifecycleConstructor functions to call during node start-up.
+type LifecycleConstructors map[string]LifecycleConstructor
-// serviceFuncs is a map of registered services which are used to boot devp2p
+// lifecycleConstructorFuncs is a map of registered services which are used to boot devp2p
// nodes
-var serviceFuncs = make(Services)
+var lifecycleConstructorFuncs = make(LifecycleConstructors)
-// RegisterServices registers the given Services which can then be used to
+// RegisterLifecycles registers the given Services which can then be used to
// start devp2p nodes using either the Exec or Docker adapters.
//
// It should be called in an init function so that it has the opportunity to
// execute the services before main() is called.
-func RegisterServices(services Services) {
- for name, f := range services {
- if _, exists := serviceFuncs[name]; exists {
+func RegisterLifecycles(lifecycles LifecycleConstructors) {
+ for name, f := range lifecycles {
+ if _, exists := lifecycleConstructorFuncs[name]; exists {
panic(fmt.Sprintf("node service already exists: %q", name))
}
- serviceFuncs[name] = f
+ lifecycleConstructorFuncs[name] = f
}
// now we have registered the services, run reexec.Init() which will
diff --git a/p2p/simulations/connect_test.go b/p2p/simulations/connect_test.go
index 7cc0a54f8..c603906b8 100644
--- a/p2p/simulations/connect_test.go
+++ b/p2p/simulations/connect_test.go
@@ -26,8 +26,8 @@ import (
func newTestNetwork(t *testing.T, nodeCount int) (*Network, []enode.ID) {
t.Helper()
- adapter := adapters.NewSimAdapter(adapters.Services{
- "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
+ "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
return NewNoopService(nil), nil
},
})
diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go
index 88e76df5d..8cb65c7ff 100644
--- a/p2p/simulations/examples/ping-pong.go
+++ b/p2p/simulations/examples/ping-pong.go
@@ -31,7 +31,6 @@ import (
"github.com/core-coin/go-core/p2p/enode"
"github.com/core-coin/go-core/p2p/simulations"
"github.com/core-coin/go-core/p2p/simulations/adapters"
- "github.com/core-coin/go-core/rpc"
)
var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim", "exec" or "docker")`)
@@ -44,13 +43,14 @@ func main() {
// set the log level to Trace
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
- // register a single ping-pong service
- services := map[string]adapters.ServiceFunc{
- "ping-pong": func(ctx *adapters.ServiceContext) (node.Service, error) {
- return newPingPongService(ctx.Config.ID), nil
+ services := map[string]adapters.LifecycleConstructor{
+ "ping-pong": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
+ pps := newPingPongService(ctx.Config.ID)
+ stack.RegisterProtocols(pps.Protocols())
+ return pps, nil
},
}
- adapters.RegisterServices(services)
+ adapters.RegisterLifecycles(services)
// create the NodeAdapter
var adapter adapters.NodeAdapter
@@ -110,11 +110,7 @@ func (p *pingPongService) Protocols() []p2p.Protocol {
}}
}
-func (p *pingPongService) APIs() []rpc.API {
- return nil
-}
-
-func (p *pingPongService) Start(server *p2p.Server) error {
+func (p *pingPongService) Start() error {
p.log.Info("ping-pong service starting")
return nil
}
diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go
index 0320be302..45753201b 100644
--- a/p2p/simulations/http_test.go
+++ b/p2p/simulations/http_test.go
@@ -64,12 +64,15 @@ type testService struct {
state atomic.Value
}
-func newTestService(ctx *adapters.ServiceContext) (node.Service, error) {
+func newTestService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
svc := &testService{
id: ctx.Config.ID,
peers: make(map[enode.ID]*testPeer),
}
svc.state.Store(ctx.Snapshot)
+
+ stack.RegisterProtocols(svc.Protocols())
+ stack.RegisterAPIs(svc.APIs())
return svc, nil
}
@@ -126,7 +129,7 @@ func (t *testService) APIs() []rpc.API {
}}
}
-func (t *testService) Start(server *p2p.Server) error {
+func (t *testService) Start() error {
return nil
}
@@ -288,7 +291,7 @@ func (t *TestAPI) Events(ctx context.Context) (*rpc.Subscription, error) {
return rpcSub, nil
}
-var testServices = adapters.Services{
+var testServices = adapters.LifecycleConstructors{
"test": newTestService,
}
diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go
index a374f1a05..1e5fbdae7 100644
--- a/p2p/simulations/network.go
+++ b/p2p/simulations/network.go
@@ -110,8 +110,8 @@ func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error)
}
// if no services are configured, use the default service
- if len(conf.Services) == 0 {
- conf.Services = []string{net.DefaultService}
+ if len(conf.Lifecycles) == 0 {
+ conf.Lifecycles = []string{net.DefaultService}
}
// use the NodeAdapter to create the node
@@ -797,7 +797,7 @@ func (n *Node) MarshalJSON() ([]byte, error) {
// status. IMPORTANT: The implementation is incomplete; we lose p2p.NodeInfo.
func (n *Node) UnmarshalJSON(raw []byte) error {
// TODO: How should we turn back NodeInfo into n.Node?
- // Ticket: https://github.com/ethersphere/go-ethereum/issues/1177
+ // Ticket: https://github.com/ethersphere/go-core/issues/1177
var node struct {
Config *adapters.NodeConfig `json:"config,omitempty"`
Up bool `json:"up"`
@@ -913,19 +913,19 @@ func (net *Network) snapshot(addServices []string, removeServices []string) (*Sn
snap.Nodes[i].Snapshots = snapshots
for _, addSvc := range addServices {
haveSvc := false
- for _, svc := range snap.Nodes[i].Node.Config.Services {
+ for _, svc := range snap.Nodes[i].Node.Config.Lifecycles {
if svc == addSvc {
haveSvc = true
break
}
}
if !haveSvc {
- snap.Nodes[i].Node.Config.Services = append(snap.Nodes[i].Node.Config.Services, addSvc)
+ snap.Nodes[i].Node.Config.Lifecycles = append(snap.Nodes[i].Node.Config.Lifecycles, addSvc)
}
}
if len(removeServices) > 0 {
var cleanedServices []string
- for _, svc := range snap.Nodes[i].Node.Config.Services {
+ for _, svc := range snap.Nodes[i].Node.Config.Lifecycles {
haveSvc := false
for _, rmSvc := range removeServices {
if rmSvc == svc {
@@ -938,7 +938,7 @@ func (net *Network) snapshot(addServices []string, removeServices []string) (*Sn
}
}
- snap.Nodes[i].Node.Config.Services = cleanedServices
+ snap.Nodes[i].Node.Config.Lifecycles = cleanedServices
}
}
for _, conn := range net.Conns {
diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go
index 4e2e7cdb9..64d38c199 100644
--- a/p2p/simulations/network_test.go
+++ b/p2p/simulations/network_test.go
@@ -41,8 +41,8 @@ func TestSnapshot(t *testing.T) {
// create snapshot from ring network
// this is a minimal service, whose protocol will take exactly one message OR close of connection before quitting
- adapter := adapters.NewSimAdapter(adapters.Services{
- "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
+ "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
return NewNoopService(nil), nil
},
})
@@ -165,8 +165,8 @@ OUTER:
// PART II
// load snapshot and verify that exactly same connections are formed
- adapter = adapters.NewSimAdapter(adapters.Services{
- "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ adapter = adapters.NewSimAdapter(adapters.LifecycleConstructors{
+ "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
return NewNoopService(nil), nil
},
})
@@ -256,8 +256,8 @@ OuterTwo:
t.Run("conns after load", func(t *testing.T) {
// Create new network.
n := NewNetwork(
- adapters.NewSimAdapter(adapters.Services{
- "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ adapters.NewSimAdapter(adapters.LifecycleConstructors{
+ "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
return NewNoopService(nil), nil
},
}),
@@ -288,7 +288,7 @@ OuterTwo:
// with each other and that a snapshot fully represents the desired topology
func TestNetworkSimulation(t *testing.T) {
// create simulation network with 20 testService nodes
- adapter := adapters.NewSimAdapter(adapters.Services{
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
"test": newTestService,
})
network := NewNetwork(adapter, &NetworkConfig{
@@ -437,7 +437,7 @@ func createTestNodesWithProperty(property string, count int, network *Network) (
// It then tests again whilst excluding a node ID from being returned.
// If a node ID is not returned, or more node IDs than expected are returned, the test fails.
func TestGetNodeIDs(t *testing.T) {
- adapter := adapters.NewSimAdapter(adapters.Services{
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
"test": newTestService,
})
network := NewNetwork(adapter, &NetworkConfig{
@@ -486,7 +486,7 @@ func TestGetNodeIDs(t *testing.T) {
// It then tests again whilst excluding a node from being returned.
// If a node is not returned, or more nodes than expected are returned, the test fails.
func TestGetNodes(t *testing.T) {
- adapter := adapters.NewSimAdapter(adapters.Services{
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
"test": newTestService,
})
network := NewNetwork(adapter, &NetworkConfig{
@@ -534,7 +534,7 @@ func TestGetNodes(t *testing.T) {
// TestGetNodesByID creates a set of nodes and attempts to retrieve a subset of them by ID
// If a node is not returned, or more nodes than expected are returned, the test fails.
func TestGetNodesByID(t *testing.T) {
- adapter := adapters.NewSimAdapter(adapters.Services{
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
"test": newTestService,
})
network := NewNetwork(adapter, &NetworkConfig{
@@ -579,7 +579,7 @@ func TestGetNodesByID(t *testing.T) {
// GetNodesByProperty is then checked for correctness by comparing the nodes returned to those initially created.
// If a node with a property is not found, or more nodes than expected are returned, the test fails.
func TestGetNodesByProperty(t *testing.T) {
- adapter := adapters.NewSimAdapter(adapters.Services{
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
"test": newTestService,
})
network := NewNetwork(adapter, &NetworkConfig{
@@ -624,7 +624,7 @@ func TestGetNodesByProperty(t *testing.T) {
// GetNodeIDsByProperty is then checked for correctness by comparing the node IDs returned to those initially created.
// If a node ID with a property is not found, or more nodes IDs than expected are returned, the test fails.
func TestGetNodeIDsByProperty(t *testing.T) {
- adapter := adapters.NewSimAdapter(adapters.Services{
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
"test": newTestService,
})
network := NewNetwork(adapter, &NetworkConfig{
@@ -705,8 +705,8 @@ func benchmarkMinimalServiceTmp(b *testing.B) {
// this is a minimal service, whose protocol will close a channel upon run of protocol
// making it possible to bench the time it takes for the service to start and protocol actually to be run
protoCMap := make(map[enode.ID]map[enode.ID]chan struct{})
- adapter := adapters.NewSimAdapter(adapters.Services{
- "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{
+ "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
protoCMap[ctx.Config.ID] = make(map[enode.ID]chan struct{})
svc := NewNoopService(protoCMap[ctx.Config.ID])
return svc, nil
diff --git a/p2p/simulations/test.go b/p2p/simulations/test.go
index 5504ea210..5478b562a 100644
--- a/p2p/simulations/test.go
+++ b/p2p/simulations/test.go
@@ -66,7 +66,7 @@ func (t *NoopService) APIs() []rpc.API {
return []rpc.API{}
}
-func (t *NoopService) Start(server *p2p.Server) error {
+func (t *NoopService) Start() error {
return nil
}
diff --git a/p2p/testing/peerpool.go b/p2p/testing/peerpool.go
deleted file mode 100644
index 6301ee73f..000000000
--- a/p2p/testing/peerpool.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package testing
-
-import (
- "fmt"
- "sync"
-
- "github.com/core-coin/go-core/log"
- "github.com/core-coin/go-core/p2p/enode"
-)
-
-type TestPeer interface {
- ID() enode.ID
- Drop()
-}
-
-// TestPeerPool is an example peerPool to demonstrate registration of peer connections
-type TestPeerPool struct {
- lock sync.Mutex
- peers map[enode.ID]TestPeer
-}
-
-func NewTestPeerPool() *TestPeerPool {
- return &TestPeerPool{peers: make(map[enode.ID]TestPeer)}
-}
-
-func (p *TestPeerPool) Add(peer TestPeer) {
- p.lock.Lock()
- defer p.lock.Unlock()
- log.Trace(fmt.Sprintf("pp add peer %v", peer.ID()))
- p.peers[peer.ID()] = peer
-
-}
-
-func (p *TestPeerPool) Remove(peer TestPeer) {
- p.lock.Lock()
- defer p.lock.Unlock()
- delete(p.peers, peer.ID())
-}
-
-func (p *TestPeerPool) Has(id enode.ID) bool {
- p.lock.Lock()
- defer p.lock.Unlock()
- _, ok := p.peers[id]
- return ok
-}
-
-func (p *TestPeerPool) Get(id enode.ID) TestPeer {
- p.lock.Lock()
- defer p.lock.Unlock()
- return p.peers[id]
-}
diff --git a/p2p/testing/protocolsession.go b/p2p/testing/protocolsession.go
deleted file mode 100644
index aebdbfbd3..000000000
--- a/p2p/testing/protocolsession.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2018 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package testing
-
-import (
- "errors"
- "fmt"
- "sync"
- "time"
-
- "github.com/core-coin/go-core/log"
- "github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/p2p/enode"
- "github.com/core-coin/go-core/p2p/simulations/adapters"
-)
-
-var errTimedOut = errors.New("timed out")
-
-// ProtocolSession is a quasi simulation of a pivot node running
-// a service and a number of dummy peers that can send (trigger) or
-// receive (expect) messages
-type ProtocolSession struct {
- Server *p2p.Server
- Nodes []*enode.Node
- adapter *adapters.SimAdapter
- events chan *p2p.PeerEvent
-}
-
-// Exchange is the basic units of protocol tests
-// the triggers and expects in the arrays are run immediately and asynchronously
-// thus one cannot have multiple expects for the SAME peer with DIFFERENT message types
-// because it's unpredictable which expect will receive which message
-// (with expect #1 and #2, messages might be sent #2 and #1, and both expects will complain about wrong message code)
-// an exchange is defined on a session
-type Exchange struct {
- Label string
- Triggers []Trigger
- Expects []Expect
- Timeout time.Duration
-}
-
-// Trigger is part of the exchange, incoming message for the pivot node
-// sent by a peer
-type Trigger struct {
- Msg interface{} // type of message to be sent
- Code uint64 // code of message is given
- Peer enode.ID // the peer to send the message to
- Timeout time.Duration // timeout duration for the sending
-}
-
-// Expect is part of an exchange, outgoing message from the pivot node
-// received by a peer
-type Expect struct {
- Msg interface{} // type of message to expect
- Code uint64 // code of message is now given
- Peer enode.ID // the peer that expects the message
- Timeout time.Duration // timeout duration for receiving
-}
-
-// Disconnect represents a disconnect event, used and checked by TestDisconnected
-type Disconnect struct {
- Peer enode.ID // discconnected peer
- Error error // disconnect reason
-}
-
-// trigger sends messages from peers
-func (s *ProtocolSession) trigger(trig Trigger) error {
- simNode, ok := s.adapter.GetNode(trig.Peer)
- if !ok {
- return fmt.Errorf("trigger: peer %v does not exist (1- %v)", trig.Peer, len(s.Nodes))
- }
- mockNode, ok := simNode.Services()[0].(*mockNode)
- if !ok {
- return fmt.Errorf("trigger: peer %v is not a mock", trig.Peer)
- }
-
- errc := make(chan error)
-
- go func() {
- log.Trace(fmt.Sprintf("trigger %v (%v)....", trig.Msg, trig.Code))
- errc <- mockNode.Trigger(&trig)
- log.Trace(fmt.Sprintf("triggered %v (%v)", trig.Msg, trig.Code))
- }()
-
- t := trig.Timeout
- if t == time.Duration(0) {
- t = 1000 * time.Millisecond
- }
- select {
- case err := <-errc:
- return err
- case <-time.After(t):
- return fmt.Errorf("timout expecting %v to send to peer %v", trig.Msg, trig.Peer)
- }
-}
-
-// expect checks an expectation of a message sent out by the pivot node
-func (s *ProtocolSession) expect(exps []Expect) error {
- // construct a map of expectations for each node
- peerExpects := make(map[enode.ID][]Expect)
- for _, exp := range exps {
- if exp.Msg == nil {
- return errors.New("no message to expect")
- }
- peerExpects[exp.Peer] = append(peerExpects[exp.Peer], exp)
- }
-
- // construct a map of mockNodes for each node
- mockNodes := make(map[enode.ID]*mockNode)
- for nodeID := range peerExpects {
- simNode, ok := s.adapter.GetNode(nodeID)
- if !ok {
- return fmt.Errorf("trigger: peer %v does not exist (1- %v)", nodeID, len(s.Nodes))
- }
- mockNode, ok := simNode.Services()[0].(*mockNode)
- if !ok {
- return fmt.Errorf("trigger: peer %v is not a mock", nodeID)
- }
- mockNodes[nodeID] = mockNode
- }
-
- // done chanell cancels all created goroutines when function returns
- done := make(chan struct{})
- defer close(done)
- // errc catches the first error from
- errc := make(chan error)
-
- wg := &sync.WaitGroup{}
- wg.Add(len(mockNodes))
- for nodeID, mockNode := range mockNodes {
- nodeID := nodeID
- mockNode := mockNode
- go func() {
- defer wg.Done()
-
- // Sum all Expect timeouts to give the maximum
- // time for all expectations to finish.
- // mockNode.Expect checks all received messages against
- // a list of expected messages and timeout for each
- // of them can not be checked separately.
- var t time.Duration
- for _, exp := range peerExpects[nodeID] {
- if exp.Timeout == time.Duration(0) {
- t += 2000 * time.Millisecond
- } else {
- t += exp.Timeout
- }
- }
- alarm := time.NewTimer(t)
- defer alarm.Stop()
-
- // expectErrc is used to check if error returned
- // from mockNode.Expect is not nil and to send it to
- // errc only in that case.
- // done channel will be closed when function
- expectErrc := make(chan error)
- go func() {
- select {
- case expectErrc <- mockNode.Expect(peerExpects[nodeID]...):
- case <-done:
- case <-alarm.C:
- }
- }()
-
- select {
- case err := <-expectErrc:
- if err != nil {
- select {
- case errc <- err:
- case <-done:
- case <-alarm.C:
- errc <- errTimedOut
- }
- }
- case <-done:
- case <-alarm.C:
- errc <- errTimedOut
- }
-
- }()
- }
-
- go func() {
- wg.Wait()
- // close errc when all goroutines finish to return nill err from errc
- close(errc)
- }()
-
- return <-errc
-}
-
-// TestExchanges tests a series of exchanges against the session
-func (s *ProtocolSession) TestExchanges(exchanges ...Exchange) error {
- for i, e := range exchanges {
- if err := s.testExchange(e); err != nil {
- return fmt.Errorf("exchange #%d %q: %v", i, e.Label, err)
- }
- log.Trace(fmt.Sprintf("exchange #%d %q: run successfully", i, e.Label))
- }
- return nil
-}
-
-// testExchange tests a single Exchange.
-// Default timeout value is 2 seconds.
-func (s *ProtocolSession) testExchange(e Exchange) error {
- errc := make(chan error)
- done := make(chan struct{})
- defer close(done)
-
- go func() {
- for _, trig := range e.Triggers {
- err := s.trigger(trig)
- if err != nil {
- errc <- err
- return
- }
- }
-
- select {
- case errc <- s.expect(e.Expects):
- case <-done:
- }
- }()
-
- // time out globally or finish when all expectations satisfied
- t := e.Timeout
- if t == 0 {
- t = 2000 * time.Millisecond
- }
- alarm := time.NewTimer(t)
- defer alarm.Stop()
- select {
- case err := <-errc:
- return err
- case <-alarm.C:
- return errTimedOut
- }
-}
-
-// TestDisconnected tests the disconnections given as arguments
-// the disconnect structs describe what disconnect error is expected on which peer
-func (s *ProtocolSession) TestDisconnected(disconnects ...*Disconnect) error {
- expects := make(map[enode.ID]error)
- for _, disconnect := range disconnects {
- expects[disconnect.Peer] = disconnect.Error
- }
-
- timeout := time.After(time.Second)
- for len(expects) > 0 {
- select {
- case event := <-s.events:
- if event.Type != p2p.PeerEventTypeDrop {
- continue
- }
- expectErr, ok := expects[event.Peer]
- if !ok {
- continue
- }
-
- if !(expectErr == nil && event.Error == "" || expectErr != nil && expectErr.Error() == event.Error) {
- return fmt.Errorf("unexpected error on peer %v. expected '%v', got '%v'", event.Peer, expectErr, event.Error)
- }
- delete(expects, event.Peer)
- case <-timeout:
- return fmt.Errorf("timed out waiting for peers to disconnect")
- }
- }
- return nil
-}
diff --git a/p2p/testing/protocoltester.go b/p2p/testing/protocoltester.go
deleted file mode 100644
index 0a94f5ea1..000000000
--- a/p2p/testing/protocoltester.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2018 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-/*
-the p2p/testing package provides a unit test scheme to check simple
-protocol message exchanges with one pivot node and a number of dummy peers
-The pivot test node runs a node.Service, the dummy peers run a mock node
-that can be used to send and receive messages
-*/
-
-package testing
-
-import (
- "bytes"
- "fmt"
- eddsa "github.com/core-coin/go-goldilocks"
- "io"
- "io/ioutil"
- "strings"
- "sync"
-
- "github.com/core-coin/go-core/log"
- "github.com/core-coin/go-core/node"
- "github.com/core-coin/go-core/p2p"
- "github.com/core-coin/go-core/p2p/enode"
- "github.com/core-coin/go-core/p2p/simulations"
- "github.com/core-coin/go-core/p2p/simulations/adapters"
- "github.com/core-coin/go-core/rlp"
- "github.com/core-coin/go-core/rpc"
-)
-
-// ProtocolTester is the tester environment used for unit testing protocol
-// message exchanges. It uses p2p/simulations framework
-type ProtocolTester struct {
- *ProtocolSession
- network *simulations.Network
-}
-
-// NewProtocolTester constructs a new ProtocolTester
-// it takes as argument the pivot node id, the number of dummy peers and the
-// protocol run function called on a peer connection by the p2p server
-func NewProtocolTester(prvkey *eddsa.PrivateKey, nodeCount int, run func(*p2p.Peer, p2p.MsgReadWriter) error) *ProtocolTester {
- services := adapters.Services{
- "test": func(ctx *adapters.ServiceContext) (node.Service, error) {
- return &testNode{run}, nil
- },
- "mock": func(ctx *adapters.ServiceContext) (node.Service, error) {
- return newMockNode(), nil
- },
- }
- adapter := adapters.NewSimAdapter(services)
- net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{})
- nodeConfig := &adapters.NodeConfig{
- PrivateKey: prvkey,
- EnableMsgEvents: true,
- Services: []string{"test"},
- }
- if _, err := net.NewNodeWithConfig(nodeConfig); err != nil {
- panic(err.Error())
- }
- if err := net.Start(nodeConfig.ID); err != nil {
- panic(err.Error())
- }
-
- node := net.GetNode(nodeConfig.ID).Node.(*adapters.SimNode)
- peers := make([]*adapters.NodeConfig, nodeCount)
- nodes := make([]*enode.Node, nodeCount)
- for i := 0; i < nodeCount; i++ {
- peers[i] = adapters.RandomNodeConfig()
- peers[i].Services = []string{"mock"}
- if _, err := net.NewNodeWithConfig(peers[i]); err != nil {
- panic(fmt.Sprintf("error initializing peer %v: %v", peers[i].ID, err))
- }
- if err := net.Start(peers[i].ID); err != nil {
- panic(fmt.Sprintf("error starting peer %v: %v", peers[i].ID, err))
- }
- nodes[i] = peers[i].Node()
- }
- events := make(chan *p2p.PeerEvent, 1000)
- node.SubscribeEvents(events)
- ps := &ProtocolSession{
- Server: node.Server(),
- Nodes: nodes,
- adapter: adapter,
- events: events,
- }
- self := &ProtocolTester{
- ProtocolSession: ps,
- network: net,
- }
-
- self.Connect(nodeConfig.ID, peers...)
-
- return self
-}
-
-// Stop stops the p2p server
-func (t *ProtocolTester) Stop() {
- t.Server.Stop()
- t.network.Shutdown()
-}
-
-// Connect brings up the remote peer node and connects it using the
-// p2p/simulations network connection with the in memory network adapter
-func (t *ProtocolTester) Connect(selfID enode.ID, peers ...*adapters.NodeConfig) {
- for _, peer := range peers {
- log.Trace(fmt.Sprintf("connect to %v", peer.ID))
- if err := t.network.Connect(selfID, peer.ID); err != nil {
- panic(fmt.Sprintf("error connecting to peer %v: %v", peer.ID, err))
- }
- }
-
-}
-
-// testNode wraps a protocol run function and implements the node.Service
-// interface
-type testNode struct {
- run func(*p2p.Peer, p2p.MsgReadWriter) error
-}
-
-func (t *testNode) Protocols() []p2p.Protocol {
- return []p2p.Protocol{{
- Length: 100,
- Run: t.run,
- }}
-}
-
-func (t *testNode) APIs() []rpc.API {
- return nil
-}
-
-func (t *testNode) Start(server *p2p.Server) error {
- return nil
-}
-
-func (t *testNode) Stop() error {
- return nil
-}
-
-// mockNode is a testNode which doesn't actually run a protocol, instead
-// exposing channels so that tests can manually trigger and expect certain
-// messages
-type mockNode struct {
- testNode
-
- trigger chan *Trigger
- expect chan []Expect
- err chan error
- stop chan struct{}
- stopOnce sync.Once
-}
-
-func newMockNode() *mockNode {
- mock := &mockNode{
- trigger: make(chan *Trigger),
- expect: make(chan []Expect),
- err: make(chan error),
- stop: make(chan struct{}),
- }
- mock.testNode.run = mock.Run
- return mock
-}
-
-// Run is a protocol run function which just loops waiting for tests to
-// instruct it to either trigger or expect a message from the peer
-func (m *mockNode) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
- for {
- select {
- case trig := <-m.trigger:
- wmsg := Wrap(trig.Msg)
- m.err <- p2p.Send(rw, trig.Code, wmsg)
- case exps := <-m.expect:
- m.err <- expectMsgs(rw, exps)
- case <-m.stop:
- return nil
- }
- }
-}
-
-func (m *mockNode) Trigger(trig *Trigger) error {
- m.trigger <- trig
- return <-m.err
-}
-
-func (m *mockNode) Expect(exp ...Expect) error {
- m.expect <- exp
- return <-m.err
-}
-
-func (m *mockNode) Stop() error {
- m.stopOnce.Do(func() { close(m.stop) })
- return nil
-}
-
-func expectMsgs(rw p2p.MsgReadWriter, exps []Expect) error {
- matched := make([]bool, len(exps))
- for {
- msg, err := rw.ReadMsg()
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
- actualContent, err := ioutil.ReadAll(msg.Payload)
- if err != nil {
- return err
- }
- var found bool
- for i, exp := range exps {
- if exp.Code == msg.Code && bytes.Equal(actualContent, mustEncodeMsg(Wrap(exp.Msg))) {
- if matched[i] {
- return fmt.Errorf("message #%d received two times", i)
- }
- matched[i] = true
- found = true
- break
- }
- }
- if !found {
- expected := make([]string, 0)
- for i, exp := range exps {
- if matched[i] {
- continue
- }
- expected = append(expected, fmt.Sprintf("code %d payload %x", exp.Code, mustEncodeMsg(Wrap(exp.Msg))))
- }
- return fmt.Errorf("unexpected message code %d payload %x, expected %s", msg.Code, actualContent, strings.Join(expected, " or "))
- }
- done := true
- for _, m := range matched {
- if !m {
- done = false
- break
- }
- }
- if done {
- return nil
- }
- }
- for i, m := range matched {
- if !m {
- return fmt.Errorf("expected message #%d not received", i)
- }
- }
- return nil
-}
-
-// mustEncodeMsg uses rlp to encode a message.
-// In case of error it panics.
-func mustEncodeMsg(msg interface{}) []byte {
- contentEnc, err := rlp.EncodeToBytes(msg)
- if err != nil {
- panic("content encode error: " + err.Error())
- }
- return contentEnc
-}
-
-type WrappedMsg struct {
- Context []byte
- Size uint32
- Payload []byte
-}
-
-func Wrap(msg interface{}) interface{} {
- data, _ := rlp.EncodeToBytes(msg)
- return &WrappedMsg{
- Size: uint32(len(data)),
- Payload: data,
- }
-}
diff --git a/params/bootnodes.go b/params/bootnodes.go
index 28af7781d..97e4e58bb 100644
--- a/params/bootnodes.go
+++ b/params/bootnodes.go
@@ -63,9 +63,9 @@ var DiscoveryV5Bootnodes = []string{}
const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@"
-// These DNS names provide bootstrap connectivity for public testnets and the mainnet.
-// See https://github.com/core-coin/discv4-dns-lists for more information.
-var KnownDNSNetworks = map[common.Hash]string{
- MainnetGenesisHash: dnsPrefix + "all.mainnet.corenode.stream",
- DevinGenesisHash: dnsPrefix + "all.devin.corenode.stream",
+// KnownDNSNetwork returns the address of a public DNS-based node list for the given
+// genesis hash and protocol. See https://github.com/core-coin/discv4-dns-lists for more
+// information.
+func KnownDNSNetwork(genesis common.Hash, protocol string) string {
+ return ""
}
diff --git a/params/version.go b/params/version.go
index c2b315738..3d4bdf346 100644
--- a/params/version.go
+++ b/params/version.go
@@ -16,51 +16,12 @@
package params
-import (
- "fmt"
-)
-
-const (
- VersionMajor = 1 // Major version component of the current release
- VersionMinor = 1 // Minor version component of the current release
- VersionPatch = 4 // Patch version component of the current release
- VersionMeta = "stable" // Version metadata to append to the version string
-)
-
-// Version holds the textual version string.
-var Version = func() string {
- return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch)
-}()
-
-// VersionWithMeta holds the textual version string including the metadata.
-var VersionWithMeta = func() string {
- v := Version
- if VersionMeta != "" {
- v += "-" + VersionMeta
- }
- return v
-}()
-
-// ArchiveVersion holds the textual version string used for Gocore archives.
-// e.g. "1.8.11-dea1ce05" for stable releases, or
-// "1.8.13-unstable-21c059b6" for unstable releases
-func ArchiveVersion(gitCommit string) string {
- vsn := Version
- if VersionMeta != "stable" {
- vsn += "-" + VersionMeta
- }
- if len(gitCommit) >= 8 {
- vsn += "-" + gitCommit[:8]
- }
- return vsn
-}
-
-func VersionWithCommit(gitCommit, gitDate string) string {
- vsn := VersionWithMeta
+func VersionWithTag(gitTag, gitCommit, gitDate string) string {
+ vsn := gitTag
if len(gitCommit) >= 8 {
vsn += "-" + gitCommit[:8]
}
- if (VersionMeta != "stable") && (gitDate != "") {
+ if (gitDate != "") {
vsn += "-" + gitDate
}
return vsn
diff --git a/rlp/iterator.go b/rlp/iterator.go
new file mode 100644
index 000000000..b566b4840
--- /dev/null
+++ b/rlp/iterator.go
@@ -0,0 +1,60 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package rlp
+
+type listIterator struct {
+ data []byte
+ next []byte
+ err error
+}
+
+// NewListIterator creates an iterator for the (list) represented by data
+func NewListIterator(data RawValue) (*listIterator, error) {
+ k, t, c, err := readKind(data)
+ if err != nil {
+ return nil, err
+ }
+ if k != List {
+ return nil, ErrExpectedList
+ }
+ it := &listIterator{
+ data: data[t : t+c],
+ }
+ return it, nil
+
+}
+
+// Next forwards the iterator one step, returns true if it was not at end yet
+func (it *listIterator) Next() bool {
+ if len(it.data) == 0 {
+ return false
+ }
+ _, t, c, err := readKind(it.data)
+ it.next = it.data[:t+c]
+ it.data = it.data[t+c:]
+ it.err = err
+ return true
+}
+
+// Value returns the current value
+func (it *listIterator) Value() []byte {
+ return it.next
+}
+
+func (it *listIterator) Err() error {
+ return it.err
+}
diff --git a/rlp/iterator_test.go b/rlp/iterator_test.go
new file mode 100644
index 000000000..0eb54f9fd
--- /dev/null
+++ b/rlp/iterator_test.go
@@ -0,0 +1,58 @@
+// Copyright 2015 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package rlp
+
+import (
+ "github.com/core-coin/go-core/common/hexutil"
+ "testing"
+)
+
+// TestIterator tests some basic things about the ListIterator. A more
+// comprehensive test can be found in core/rlp_test.go, where we can
+// use both types and rlp without dependency cycles
+func TestIterator(t *testing.T) {
+ bodyRlpHex := "0xf902cbf8d6f869800182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ba01025c66fad28b4ce3370222624d952c35529e602af7cbe04f667371f61b0e3b3a00ab8813514d1217059748fd903288ace1b4001a4bc5fbde2790debdc8167de2ff869010182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ca05ac4cf1d19be06f3742c21df6c49a7e929ceb3dbaf6a09f3cfb56ff6828bd9a7a06875970133a35e63ac06d360aa166d228cc013e9b96e0a2cae7f55b22e1ee2e8f901f0f901eda0c75448377c0e426b8017b23c5f77379ecf69abc1d5c224284ad3ba1c46c59adaa00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808080808080a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
+ bodyRlp := hexutil.MustDecode(bodyRlpHex)
+
+ it, err := NewListIterator(bodyRlp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Check that txs exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got zero")
+ }
+ txs := it.Value()
+ // Check that uncles exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got one")
+ }
+ txit, err := NewListIterator(txs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var i = 0
+ for txit.Next() {
+ if txit.err != nil {
+ t.Fatal(txit.err)
+ }
+ i++
+ }
+ if exp := 2; i != exp {
+ t.Errorf("count wrong, expected %d got %d", i, exp)
+ }
+}
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 3b0281f10..7674b65bf 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -156,10 +156,10 @@ func testClientCancel(transport string, t *testing.T) {
//
// Once a connection is dead, there is a fair chance it won't connect
// successfully because the accept is delayed by 1s.
- maxContextCancelTimeout := 300 * time.Millisecond
+ maxContextCancelTimeout := 600 * time.Millisecond
fl := &flakeyListener{
- maxAcceptDelay: 1 * time.Second,
- maxKillTimeout: 600 * time.Millisecond,
+ maxAcceptDelay: 2 * time.Second,
+ maxKillTimeout: 1200 * time.Millisecond,
}
var client *Client
diff --git a/rpc/endpoints.go b/rpc/endpoints.go
index d4930b9f1..e380f2c0b 100644
--- a/rpc/endpoints.go
+++ b/rpc/endpoints.go
@@ -22,89 +22,6 @@ import (
"github.com/core-coin/go-core/log"
)
-// checkModuleAvailability checks that all names given in modules are actually
-// available API services. It assumes that the MetadataApi module ("rpc") is always available;
-// the registration of this "rpc" module happens in NewServer() and is thus common to all endpoints.
-func checkModuleAvailability(modules []string, apis []API) (bad, available []string) {
- availableSet := make(map[string]struct{})
- for _, api := range apis {
- if _, ok := availableSet[api.Namespace]; !ok {
- availableSet[api.Namespace] = struct{}{}
- available = append(available, api.Namespace)
- }
- }
- for _, name := range modules {
- if _, ok := availableSet[name]; !ok && name != MetadataApi {
- bad = append(bad, name)
- }
- }
- return bad, available
-}
-
-// StartHTTPEndpoint starts the HTTP RPC endpoint, configured with cors/vhosts/modules.
-func StartHTTPEndpoint(endpoint string, apis []API, modules []string, cors []string, vhosts []string, timeouts HTTPTimeouts) (net.Listener, *Server, error) {
- if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 {
- log.Error("Unavailable modules in HTTP API list", "unavailable", bad, "available", available)
- }
- // Generate the whitelist based on the allowed modules
- whitelist := make(map[string]bool)
- for _, module := range modules {
- whitelist[module] = true
- }
- // Register all the APIs exposed by the services
- handler := NewServer()
- for _, api := range apis {
- if whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) {
- if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
- return nil, nil, err
- }
- log.Debug("HTTP registered", "namespace", api.Namespace)
- }
- }
- // All APIs registered, start the HTTP listener
- var (
- listener net.Listener
- err error
- )
- if listener, err = net.Listen("tcp", endpoint); err != nil {
- return nil, nil, err
- }
- go NewHTTPServer(cors, vhosts, timeouts, handler).Serve(listener)
- return listener, handler, err
-}
-
-// StartWSEndpoint starts a websocket endpoint.
-func StartWSEndpoint(endpoint string, apis []API, modules []string, wsOrigins []string, exposeAll bool) (net.Listener, *Server, error) {
- if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 {
- log.Error("Unavailable modules in WS API list", "unavailable", bad, "available", available)
- }
- // Generate the whitelist based on the allowed modules
- whitelist := make(map[string]bool)
- for _, module := range modules {
- whitelist[module] = true
- }
- // Register all the APIs exposed by the services
- handler := NewServer()
- for _, api := range apis {
- if exposeAll || whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) {
- if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
- return nil, nil, err
- }
- log.Debug("WebSocket registered", "service", api.Service, "namespace", api.Namespace)
- }
- }
- // All APIs registered, start the HTTP listener
- var (
- listener net.Listener
- err error
- )
- if listener, err = net.Listen("tcp", endpoint); err != nil {
- return nil, nil, err
- }
- go NewWSServer(wsOrigins, handler).Serve(listener)
- return listener, handler, err
-}
-
// StartIPCEndpoint starts an IPC endpoint.
func StartIPCEndpoint(ipcEndpoint string, apis []API) (net.Listener, *Server, error) {
// Register all the APIs exposed by the services.
diff --git a/rpc/gzip.go b/rpc/gzip.go
deleted file mode 100644
index 419c083dc..000000000
--- a/rpc/gzip.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2019 by the Authors
-// This file is part of the go-core library.
-//
-// The go-core library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-core library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-core library. If not, see .
-
-package rpc
-
-import (
- "compress/gzip"
- "io"
- "io/ioutil"
- "net/http"
- "strings"
- "sync"
-)
-
-var gzPool = sync.Pool{
- New: func() interface{} {
- w := gzip.NewWriter(ioutil.Discard)
- return w
- },
-}
-
-type gzipResponseWriter struct {
- io.Writer
- http.ResponseWriter
-}
-
-func (w *gzipResponseWriter) WriteHeader(status int) {
- w.Header().Del("Content-Length")
- w.ResponseWriter.WriteHeader(status)
-}
-
-func (w *gzipResponseWriter) Write(b []byte) (int, error) {
- return w.Writer.Write(b)
-}
-
-func newGzipHandler(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
- next.ServeHTTP(w, r)
- return
- }
-
- w.Header().Set("Content-Encoding", "gzip")
-
- gz := gzPool.Get().(*gzip.Writer)
- defer gzPool.Put(gz)
-
- gz.Reset(w)
- defer gz.Close()
-
- next.ServeHTTP(&gzipResponseWriter{ResponseWriter: w, Writer: gz}, r)
- })
-}
diff --git a/rpc/http.go b/rpc/http.go
index 1cdf028ee..0fb28e805 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -25,15 +25,10 @@ import (
"io"
"io/ioutil"
"mime"
- "net"
"net/http"
"net/url"
- "strings"
"sync"
"time"
-
- "github.com/core-coin/go-core/log"
- "github.com/rs/cors"
)
const (
@@ -228,37 +223,6 @@ func (t *httpServerConn) RemoteAddr() string {
// SetWriteDeadline does nothing and always returns nil.
func (t *httpServerConn) SetWriteDeadline(time.Time) error { return nil }
-// NewHTTPServer creates a new HTTP RPC server around an API provider.
-//
-// Deprecated: Server implements http.Handler
-func NewHTTPServer(cors []string, vhosts []string, timeouts HTTPTimeouts, srv http.Handler) *http.Server {
- // Wrap the CORS-handler within a host-handler
- handler := newCorsHandler(srv, cors)
- handler = newVHostHandler(vhosts, handler)
- handler = newGzipHandler(handler)
-
- // Make sure timeout values are meaningful
- if timeouts.ReadTimeout < time.Second {
- log.Warn("Sanitizing invalid HTTP read timeout", "provided", timeouts.ReadTimeout, "updated", DefaultHTTPTimeouts.ReadTimeout)
- timeouts.ReadTimeout = DefaultHTTPTimeouts.ReadTimeout
- }
- if timeouts.WriteTimeout < time.Second {
- log.Warn("Sanitizing invalid HTTP write timeout", "provided", timeouts.WriteTimeout, "updated", DefaultHTTPTimeouts.WriteTimeout)
- timeouts.WriteTimeout = DefaultHTTPTimeouts.WriteTimeout
- }
- if timeouts.IdleTimeout < time.Second {
- log.Warn("Sanitizing invalid HTTP idle timeout", "provided", timeouts.IdleTimeout, "updated", DefaultHTTPTimeouts.IdleTimeout)
- timeouts.IdleTimeout = DefaultHTTPTimeouts.IdleTimeout
- }
- // Bundle and start the HTTP server
- return &http.Server{
- Handler: handler,
- ReadTimeout: timeouts.ReadTimeout,
- WriteTimeout: timeouts.WriteTimeout,
- IdleTimeout: timeouts.IdleTimeout,
- }
-}
-
// ServeHTTP serves JSON-RPC requests over HTTP.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Permit dumb empty requests for remote health-checks (AWS)
@@ -315,64 +279,3 @@ func validateRequest(r *http.Request) (int, error) {
err := fmt.Errorf("invalid content type, only %s is supported", contentType)
return http.StatusUnsupportedMediaType, err
}
-
-func newCorsHandler(srv http.Handler, allowedOrigins []string) http.Handler {
- // disable CORS support if user has not specified a custom CORS configuration
- if len(allowedOrigins) == 0 {
- return srv
- }
- c := cors.New(cors.Options{
- AllowedOrigins: allowedOrigins,
- AllowedMethods: []string{http.MethodPost, http.MethodGet},
- MaxAge: 600,
- AllowedHeaders: []string{"*"},
- })
- return c.Handler(srv)
-}
-
-// virtualHostHandler is a handler which validates the Host-header of incoming requests.
-// The virtualHostHandler can prevent DNS rebinding attacks, which do not utilize CORS-headers,
-// since they do in-domain requests against the RPC api. Instead, we can see on the Host-header
-// which domain was used, and validate that against a whitelist.
-type virtualHostHandler struct {
- vhosts map[string]struct{}
- next http.Handler
-}
-
-// ServeHTTP serves JSON-RPC requests over HTTP, implements http.Handler
-func (h *virtualHostHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- // if r.Host is not set, we can continue serving since a browser would set the Host header
- if r.Host == "" {
- h.next.ServeHTTP(w, r)
- return
- }
- host, _, err := net.SplitHostPort(r.Host)
- if err != nil {
- // Either invalid (too many colons) or no port specified
- host = r.Host
- }
- if ipAddr := net.ParseIP(host); ipAddr != nil {
- // It's an IP address, we can serve that
- h.next.ServeHTTP(w, r)
- return
-
- }
- // Not an IP address, but a hostname. Need to validate
- if _, exist := h.vhosts["*"]; exist {
- h.next.ServeHTTP(w, r)
- return
- }
- if _, exist := h.vhosts[host]; exist {
- h.next.ServeHTTP(w, r)
- return
- }
- http.Error(w, "invalid host specified", http.StatusForbidden)
-}
-
-func newVHostHandler(vhosts []string, next http.Handler) http.Handler {
- vhostMap := make(map[string]struct{})
- for _, allowedHost := range vhosts {
- vhostMap[strings.ToLower(allowedHost)] = struct{}{}
- }
- return &virtualHostHandler{vhostMap, next}
-}
diff --git a/rpc/types.go b/rpc/types.go
index 91e129ffd..335e4c8fa 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -29,10 +29,11 @@ import (
// API describes the set of methods offered over the RPC interface
type API struct {
- Namespace string // namespace under which the rpc methods of Service are exposed
- Version string // api version for DApp's
- Service interface{} // receiver instance which holds the methods
- Public bool // indication if the methods must be considered safe for public use
+ Namespace string // namespace under which the rpc methods of Service are exposed
+ Version string // api version for DApp's
+ Service interface{} // receiver instance which holds the methods
+ Public bool // indication if the methods must be considered safe for public use
+ Authenticated bool // whether the api should only be available behind authentication.
}
// Error wraps RPC errors, which contain an error code in addition to the message.
diff --git a/rpc/websocket.go b/rpc/websocket.go
index 641cf85d0..e8c806954 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -26,8 +26,8 @@ import (
"strings"
"sync"
- mapset "github.com/deckarep/golang-set"
"github.com/core-coin/go-core/log"
+ mapset "github.com/deckarep/golang-set"
"github.com/gorilla/websocket"
)
@@ -38,13 +38,6 @@ const (
var wsBufferPool = new(sync.Pool)
-// NewWSServer creates a new websocket RPC server around an API provider.
-//
-// Deprecated: use Server.WebsocketHandler
-func NewWSServer(allowedOrigins []string, srv *Server) *http.Server {
- return &http.Server{Handler: srv.WebsocketHandler(allowedOrigins)}
-}
-
// WebsocketHandler returns a handler that serves JSON-RPC to WebSocket connections.
//
// allowedOrigins should be a comma-separated list of allowed origin URLs.
@@ -79,14 +72,14 @@ func wsHandshakeValidator(allowedOrigins []string) func(*http.Request) bool {
allowAllOrigins = true
}
if origin != "" {
- origins.Add(strings.ToLower(origin))
+ origins.Add(origin)
}
}
// allow localhost if no allowedOrigins are specified.
if len(origins.ToSlice()) == 0 {
origins.Add("http://localhost")
if hostname, err := os.Hostname(); err == nil {
- origins.Add("http://" + strings.ToLower(hostname))
+ origins.Add("http://" + hostname)
}
}
log.Debug(fmt.Sprintf("Allowed origin(s) for WS RPC interface %v", origins.ToSlice()))
@@ -101,7 +94,7 @@ func wsHandshakeValidator(allowedOrigins []string) func(*http.Request) bool {
}
// Verify origin against whitelist.
origin := strings.ToLower(req.Header.Get("Origin"))
- if allowAllOrigins || origins.Contains(origin) {
+ if allowAllOrigins || originIsAllowed(origins, origin) {
return true
}
log.Warn("Rejected WebSocket connection", "origin", origin)
@@ -124,6 +117,65 @@ func (e wsHandshakeError) Error() string {
return s
}
+func originIsAllowed(allowedOrigins mapset.Set, browserOrigin string) bool {
+ it := allowedOrigins.Iterator()
+ for origin := range it.C {
+ if ruleAllowsOrigin(origin.(string), browserOrigin) {
+ return true
+ }
+ }
+ return false
+}
+
+func ruleAllowsOrigin(allowedOrigin string, browserOrigin string) bool {
+ var (
+ allowedScheme, allowedHostname, allowedPort string
+ browserScheme, browserHostname, browserPort string
+ err error
+ )
+ allowedScheme, allowedHostname, allowedPort, err = parseOriginURL(allowedOrigin)
+ if err != nil {
+ log.Warn("Error parsing allowed origin specification", "spec", allowedOrigin, "error", err)
+ return false
+ }
+ browserScheme, browserHostname, browserPort, err = parseOriginURL(browserOrigin)
+ if err != nil {
+ log.Warn("Error parsing browser 'Origin' field", "Origin", browserOrigin, "error", err)
+ return false
+ }
+ if allowedScheme != "" && allowedScheme != browserScheme {
+ return false
+ }
+ if allowedHostname != "" && allowedHostname != browserHostname {
+ return false
+ }
+ if allowedPort != "" && allowedPort != browserPort {
+ return false
+ }
+ return true
+}
+
+func parseOriginURL(origin string) (string, string, string, error) {
+ parsedURL, err := url.Parse(strings.ToLower(origin))
+ if err != nil {
+ return "", "", "", err
+ }
+ var scheme, hostname, port string
+ if strings.Contains(origin, "://") {
+ scheme = parsedURL.Scheme
+ hostname = parsedURL.Hostname()
+ port = parsedURL.Port()
+ } else {
+ scheme = ""
+ hostname = parsedURL.Scheme
+ port = parsedURL.Opaque
+ if hostname == "" {
+ hostname = origin
+ }
+ }
+ return scheme, hostname, port, nil
+}
+
// DialWebsocketWithDialer creates a new RPC client that communicates with a JSON-RPC server
// that is listening on the given endpoint using the provided dialer.
func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, dialer websocket.Dialer) (*Client, error) {
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index 132a21182..8aff1e4a8 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -72,7 +72,7 @@ func TestWebsocketOriginCheck(t *testing.T) {
// Connections without origin header should work.
client, err = DialWebsocket(context.Background(), wsURL, "")
if err != nil {
- t.Fatal("error for empty origin")
+ t.Fatalf("error for empty origin: %v", err)
}
client.Close()
}
diff --git a/signer/fourbyte/4byte.go b/signer/fourbyte/4byte.go
index 032f6e873..a48ea182b 100644
--- a/signer/fourbyte/4byte.go
+++ b/signer/fourbyte/4byte.go
@@ -146899,7 +146899,7 @@ func _4byteJson() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "4byte.json", size: 5954391, mode: os.FileMode(0664), modTime: time.Unix(1619030906, 0)}
+ info := bindataFileInfo{name: "4byte.json", size: 5954391, mode: os.FileMode(0664), modTime: time.Unix(1657276926, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x78, 0xa2, 0xbc, 0xfb, 0x95, 0x5a, 0x5f, 0x6c, 0xdd, 0xc4, 0x5, 0x2c, 0xd0, 0x37, 0x7a, 0x8f, 0x25, 0xd2, 0x9a, 0x27, 0x3c, 0xb2, 0x91, 0x82, 0x97, 0x5e, 0xcf, 0x86, 0x46, 0x9c, 0x83, 0x13}}
return a, nil
}
diff --git a/signer/fourbyte/abi.go b/signer/fourbyte/abi.go
index b9d516206..c8fdf476e 100644
--- a/signer/fourbyte/abi.go
+++ b/signer/fourbyte/abi.go
@@ -140,7 +140,7 @@ func parseCallData(calldata []byte, abidata string) (*decodedCallData, error) {
return nil, fmt.Errorf("signature %q matches, but arguments mismatch: %v", method.String(), err)
}
// Everything valid, assemble the call infos for the signer
- decoded := decodedCallData{signature: method.Sig(), name: method.RawName}
+ decoded := decodedCallData{signature: method.Sig, name: method.RawName}
for i := 0; i < len(method.Inputs); i++ {
decoded.inputs = append(decoded.inputs, decodedArgument{
soltype: method.Inputs[i],
@@ -158,7 +158,7 @@ func parseCallData(calldata []byte, abidata string) (*decodedCallData, error) {
if !bytes.Equal(encoded, argdata) {
was := common.Bytes2Hex(encoded)
exp := common.Bytes2Hex(argdata)
- return nil, fmt.Errorf("WARNING: Supplied data is stuffed with extra data. \nWant %s\nHave %s\nfor method %v", exp, was, method.Sig())
+ return nil, fmt.Errorf("WARNING: Supplied data is stuffed with extra data. \nWant %s\nHave %s\nfor method %v", exp, was, method.Sig)
}
return &decoded, nil
}
diff --git a/signer/fourbyte/fourbyte_test.go b/signer/fourbyte/fourbyte_test.go
index 263962d8c..986d119b2 100644
--- a/signer/fourbyte/fourbyte_test.go
+++ b/signer/fourbyte/fourbyte_test.go
@@ -48,8 +48,8 @@ func TestEmbeddedDatabase(t *testing.T) {
t.Errorf("Failed to get method by id (%s): %v", id, err)
continue
}
- if m.Sig() != selector {
- t.Errorf("Selector mismatch: have %v, want %v", m.Sig(), selector)
+ if m.Sig != selector {
+ t.Errorf("Selector mismatch: have %v, want %v", m.Sig, selector)
}
}
}
diff --git a/signer/rules/deps/bindata.go b/signer/rules/deps/bindata.go
index 556c84064..c37870ce2 100644
--- a/signer/rules/deps/bindata.go
+++ b/signer/rules/deps/bindata.go
@@ -84,7 +84,7 @@ func bignumberJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "bignumber.js", size: 17314, mode: os.FileMode(0664), modTime: time.Unix(1603198464, 0)}
+ info := bindataFileInfo{name: "bignumber.js", size: 17314, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5b, 0x75, 0xfc, 0x15, 0x5e, 0x7d, 0x27, 0x1a, 0x9a, 0xb5, 0xfb, 0x16, 0x90, 0xf4, 0x93, 0xac, 0xcb, 0x6c, 0x9c, 0xcd, 0x68, 0xe6, 0xd0, 0x3a, 0xcf, 0xa3, 0x83, 0x5c, 0x20, 0x34, 0x66, 0x45}}
return a, nil
}
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index f5ae4e1b7..b6b8d1d68 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -123,7 +123,7 @@ func (t *BlockTest) Run(snapshotter bool) error {
cache.SnapshotLimit = 1
cache.SnapshotWait = true
}
- chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil)
+ chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil, nil)
if err != nil {
return err
}
diff --git a/tests/init.go b/tests/init.go
index a257203f9..717702efd 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -19,10 +19,15 @@ package tests
import (
"fmt"
"github.com/core-coin/go-core/params"
+ "math/big"
)
// Forks table defines supported forks and their chain config.
var Forks = map[string]*params.ChainConfig{
+ "Mainnet": {
+ NetworkID: big.NewInt(1),
+ Cryptore: ¶ms.CryptoreConfig{},
+ },
}
// UnsupportedForkError is returned when a test requests a fork that isn't implemented.
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 8137c4678..4660ffa16 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -112,11 +112,11 @@ type stTransactionMarshaling struct {
PrivateKey hexutil.Bytes
}
-// getVMConfig takes a fork definition and returns a chain config.
+// GetChainConfig takes a fork definition and returns a chain config.
// The fork definition can be
// - a plain forkname, e.g. `Nucleus`,
// - a fork basename, and a list of CIPs to enable; e.g. `Nucleus+1884+1283`.
-func getVMConfig(forkString string) (baseConfig *params.ChainConfig, cips []int, err error) {
+func GetChainConfig(forkString string) (baseConfig *params.ChainConfig, cips []int, err error) {
var (
splitForks = strings.Split(forkString, "+")
ok bool
@@ -166,7 +166,7 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, common.Hash, error) {
- config, cips, err := getVMConfig(subtest.Fork)
+ config, cips, err := GetChainConfig(subtest.Fork)
if err != nil {
return nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
@@ -186,7 +186,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
energypool := new(core.EnergyPool)
energypool.AddEnergy(block.EnergyLimit())
snapshot := statedb.Snapshot()
- if _, _, _, err := core.ApplyMessage(cvm, msg, energypool); err != nil {
+ if _, err := core.ApplyMessage(cvm, msg, energypool); err != nil {
statedb.RevertToSnapshot(snapshot)
}
// Commit block
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index abab91561..d7efdbcab 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -120,7 +120,7 @@ func TestNodeIteratorCoverage(t *testing.T) {
}
}
}
- it := db.diskdb.NewIterator()
+ it := db.diskdb.NewIterator(nil, nil)
for it.Next() {
key := it.Key()
if _, ok := hashes[common.BytesToHash(key)]; !ok {
@@ -312,7 +312,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
if memonly {
memKeys = triedb.Nodes()
} else {
- it := diskdb.NewIterator()
+ it := diskdb.NewIterator(nil, nil)
for it.Next() {
diskKeys = append(diskKeys, it.Key())
}
diff --git a/trie/proof_test.go b/trie/proof_test.go
index f5d559236..f96b79906 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -106,7 +106,7 @@ func TestBadProof(t *testing.T) { //TODO: TEST
if proof == nil {
t.Fatalf("prover %d: nil proof", i)
}
- it := proof.NewIterator()
+ it := proof.NewIterator(nil, nil)
for i, d := 0, mrand.Intn(proof.Len()); i <= d; i++ {
it.Next()
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 4bbbfa7dd..0b26c4330 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -178,9 +178,9 @@ func (t *SecureTrie) hashKey(key []byte) []byte {
h := newHasher(false)
h.sha.Reset()
h.sha.Write(key)
- buf := h.sha.Sum(t.hashKeyBuf[:0])
+ h.sha.Read(t.hashKeyBuf[:])
returnHasherToPool(h)
- return buf
+ return t.hashKeyBuf[:]
}
// getSecKeyCache returns the current secure key cache, creating a new one if
diff --git a/trie/sync.go b/trie/sync.go
index b72079e3f..fd14f45b8 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -115,7 +115,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb
if s.membatch.hasNode(root) {
return
}
- if s.bloom.Contains(root[:]) {
+ if s.bloom == nil || s.bloom.Contains(root[:]) {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
@@ -155,7 +155,7 @@ func (s *Sync) AddCodeEntry(hash common.Hash, depth int, parent common.Hash) {
if s.membatch.hasCode(hash) {
return
}
- if s.bloom.Contains(hash[:]) {
+ if s.bloom == nil || s.bloom.Contains(hash[:]) {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, the blob is present for sure.
// Note we only check the existence with new code scheme, fast
@@ -334,7 +334,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
if s.membatch.hasNode(hash) {
continue
}
- if s.bloom.Contains(node) {
+ if s.bloom == nil || s.bloom.Contains(node) {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go
index fd0395735..4c3bb0da3 100644
--- a/trie/sync_bloom.go
+++ b/trie/sync_bloom.go
@@ -100,7 +100,7 @@ func (b *SyncBloom) init(database xcbdb.Iteratee) {
// Note, this is fine, because everything inserted into leveldb by fast sync is
// also pushed into the bloom directly, so we're not missing anything when the
// iterator is swapped out for a new one.
- it := database.NewIterator()
+ it := database.NewIterator(nil, nil)
var (
start = time.Now()
@@ -123,7 +123,7 @@ func (b *SyncBloom) init(database xcbdb.Iteratee) {
key := common.CopyBytes(it.Key())
it.Release()
- it = database.NewIteratorWithStart(key)
+ it = database.NewIterator(nil, key)
log.Info("Initializing fast sync bloom", "items", b.bloom.N(), "errorrate", b.errorRate(), "elapsed", common.PrettyDuration(time.Since(start)))
swap = time.Now()
diff --git a/xcb/api_backend.go b/xcb/api_backend.go
index 06891b5d4..47e510667 100644
--- a/xcb/api_backend.go
+++ b/xcb/api_backend.go
@@ -19,6 +19,8 @@ package xcb
import (
"context"
"errors"
+ "github.com/core-coin/go-core/consensus"
+ "github.com/core-coin/go-core/miner"
"math/big"
"github.com/core-coin/go-core/accounts"
@@ -257,6 +259,10 @@ func (b *XcbAPIBackend) TxPoolContent() (map[common.Address]types.Transactions,
return b.xcb.TxPool().Content()
}
+func (b *XcbAPIBackend) TxPool() *core.TxPool {
+ return b.xcb.TxPool()
+}
+
func (b *XcbAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.xcb.TxPool().SubscribeNewTxsEvent(ch)
}
@@ -293,6 +299,10 @@ func (b *XcbAPIBackend) RPCEnergyCap() *big.Int {
return b.xcb.config.RPCEnergyCap
}
+func (b *XcbAPIBackend) RPCTxFeeCap() float64 {
+ return b.xcb.config.RPCTxFeeCap
+}
+
func (b *XcbAPIBackend) BloomStatus() (uint64, uint64) {
sections, _, _ := b.xcb.bloomIndexer.Sections()
return params.BloomBitsBlocks, sections
@@ -303,3 +313,19 @@ func (b *XcbAPIBackend) ServiceFilter(ctx context.Context, session *bloombits.Ma
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.xcb.bloomRequests)
}
}
+
+func (b *XcbAPIBackend) Engine() consensus.Engine {
+ return b.xcb.engine
+}
+
+func (b *XcbAPIBackend) CurrentHeader() *types.Header {
+ return b.xcb.blockchain.CurrentHeader()
+}
+
+func (b *XcbAPIBackend) Miner() *miner.Miner {
+ return b.xcb.Miner()
+}
+
+func (b *XcbAPIBackend) StartMining(threads int) error {
+ return b.xcb.StartMining(threads)
+}
diff --git a/xcb/api_tracer.go b/xcb/api_tracer.go
index 42fdd6637..893041b25 100644
--- a/xcb/api_tracer.go
+++ b/xcb/api_tracer.go
@@ -501,7 +501,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
vmctx := core.NewCVMContext(msg, block.Header(), api.xcb.blockchain, nil)
vmenv := vm.NewCVM(vmctx, statedb, api.xcb.blockchain.Config(), vm.Config{})
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(msg.Energy())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(msg.Energy())); err != nil {
failed = err
break
}
@@ -594,7 +594,7 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
}
// Execute the transaction and flush any traces to disk
vmenv := vm.NewCVM(vmctx, statedb, api.xcb.blockchain.Config(), vmConf)
- _, _, _, err = core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(msg.Energy()))
+ _, err = core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(msg.Energy()))
if writer != nil {
writer.Flush()
}
@@ -721,7 +721,7 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha
return api.traceTx(ctx, msg, vmctx, statedb, config)
}
-// TraceCall lets you trace a given eth_call. It collects the structured logs created during the execution of EVM
+// TraceCall lets you trace a given xcb_call. It collects the structured logs created during the execution of CVM
// if the given transaction was added on top of the provided block and returns them as a JSON object.
// You can provide -2 as a block number to trace on top of the pending block.
func (api *PrivateDebugAPI) TraceCall(ctx context.Context, args xcbapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceConfig) (interface{}, error) {
@@ -794,7 +794,7 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v
// Run the transaction with tracing enabled.
vmenv := vm.NewCVM(vmctx, statedb, api.xcb.blockchain.Config(), vm.Config{Debug: true, Tracer: tracer})
- ret, energy, failed, err := core.ApplyMessage(vmenv, message, new(core.EnergyPool).AddEnergy(message.Energy()))
+ result, err := core.ApplyMessage(vmenv, message, new(core.EnergyPool).AddEnergy(message.Energy()))
if err != nil {
return nil, fmt.Errorf("tracing failed: %v", err)
}
@@ -802,9 +802,9 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v
switch tracer := tracer.(type) {
case *vm.StructLogger:
return &xcbapi.ExecutionResult{
- Energy: energy,
- Failed: failed,
- ReturnValue: fmt.Sprintf("%x", ret),
+ Energy: result.UsedEnergy,
+ Failed: result.Failed(),
+ ReturnValue: fmt.Sprintf("%x", result.Return()),
StructLogs: xcbapi.FormatLogs(tracer.StructLogs()),
}, nil
@@ -844,7 +844,7 @@ func (api *PrivateDebugAPI) computeTxEnv(block *types.Block, txIndex int, reexec
}
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewCVM(context, statedb, api.xcb.blockchain.Config(), vm.Config{})
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(tx.Energy())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.EnergyPool).AddEnergy(tx.Energy())); err != nil {
return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
diff --git a/xcb/backend.go b/xcb/backend.go
index e5333c684..8285f986f 100644
--- a/xcb/backend.go
+++ b/xcb/backend.go
@@ -26,7 +26,6 @@ import (
"sync/atomic"
"github.com/core-coin/go-core/accounts"
- "github.com/core-coin/go-core/accounts/abi/bind"
"github.com/core-coin/go-core/common"
"github.com/core-coin/go-core/common/hexutil"
"github.com/core-coin/go-core/consensus"
@@ -54,15 +53,6 @@ import (
"github.com/core-coin/go-core/xcbdb"
)
-type LesServer interface {
- Start(srvr *p2p.Server)
- Stop()
- APIs() []rpc.API
- Protocols() []p2p.Protocol
- SetBloomBitsIndexer(bbIndexer *core.ChainIndexer)
- SetContractBackend(bind.ContractBackend)
-}
-
// Core implements the Core full node service.
type Core struct {
config *Config
@@ -71,8 +61,7 @@ type Core struct {
txPool *core.TxPool
blockchain *core.BlockChain
protocolManager *ProtocolManager
- lesServer LesServer
- dialCandiates enode.Iterator
+ dialCandidates enode.Iterator
// DB interfaces
chainDb xcbdb.Database // Block chain database
@@ -94,25 +83,14 @@ type Core struct {
networkID uint64
netRPCService *xcbapi.PublicNetAPI
- lock sync.RWMutex // Protects the variadic fields (e.g. energy price and corebase)
-}
+ p2pServer *p2p.Server
-func (s *Core) AddLesServer(ls LesServer) {
- s.lesServer = ls
- ls.SetBloomBitsIndexer(s.bloomIndexer)
-}
-
-// SetClient sets a rpc client which connecting to our local node.
-func (s *Core) SetContractBackend(backend bind.ContractBackend) {
- // Pass the rpc client to les server if it is enabled.
- if s.lesServer != nil {
- s.lesServer.SetContractBackend(backend)
- }
+ lock sync.RWMutex // Protects the variadic fields (e.g. energy price and corebase)
}
// New creates a new Core object (including the
// initialisation of the common Core object)
-func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
+func New(stack *node.Node, config *Config) (*Core, error) {
// Ensure configuration values are compatible and sane
if config.SyncMode == downloader.LightSync {
return nil, errors.New("can't run xcb.Core in light sync mode, use les.LightCore")
@@ -132,7 +110,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
// Assemble the Core object
- chainDb, err := ctx.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "xcb/db/chaindata/")
+ chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "xcb/db/chaindata/")
if err != nil {
return nil, err
}
@@ -148,15 +126,16 @@ func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
xcb := &Core{
config: config,
chainDb: chainDb,
- eventMux: ctx.EventMux,
- accountManager: ctx.AccountManager,
- engine: CreateConsensusEngine(ctx, chainConfig, &config.Cryptore, config.Miner.Notify, config.Miner.Noverify, chainDb),
+ eventMux: stack.EventMux(),
+ accountManager: stack.AccountManager(),
+ engine: CreateConsensusEngine(stack, chainConfig, &config.Cryptore, config.Miner.Notify, config.Miner.Noverify, chainDb),
closeBloomHandler: make(chan struct{}),
networkID: config.NetworkId,
energyPrice: config.Miner.EnergyPrice,
corebase: config.Miner.Corebase,
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),
+ p2pServer: stack.Server(),
}
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
@@ -168,7 +147,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
if !config.SkipBcVersionCheck {
if bcVersion != nil && *bcVersion > core.BlockChainVersion {
- return nil, fmt.Errorf("database version is v%d, Gocore %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
+ return nil, fmt.Errorf("database version is v%d, Gocore only supports v%d", *bcVersion, core.BlockChainVersion)
} else if bcVersion == nil || *bcVersion < core.BlockChainVersion {
log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion)
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
@@ -182,7 +161,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
}
cacheConfig = &core.CacheConfig{
TrieCleanLimit: config.TrieCleanCache,
- TrieCleanJournal: ctx.ResolvePath(config.TrieCleanCacheJournal),
+ TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal),
TrieCleanRejournal: config.TrieCleanCacheRejournal,
TrieCleanNoPrefetch: config.NoPrefetch,
TrieDirtyLimit: config.TrieDirtyCache,
@@ -191,7 +170,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
SnapshotLimit: config.SnapshotCache,
}
)
- xcb.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, xcb.engine, vmConfig, xcb.shouldPreserve)
+ xcb.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, xcb.engine, vmConfig, xcb.shouldPreserve, &config.TxLookupLimit)
if err != nil {
return nil, err
}
@@ -204,7 +183,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
xcb.bloomIndexer.Start(xcb.blockchain)
if config.TxPool.Journal != "" {
- config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal)
+ config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)
}
xcb.txPool = core.NewTxPool(config.TxPool, chainConfig, xcb.blockchain)
@@ -214,24 +193,31 @@ func New(ctx *node.ServiceContext, config *Config) (*Core, error) {
if checkpoint == nil {
checkpoint = params.TrustedCheckpoints[genesisHash]
}
- if xcb.protocolManager, err = NewProtocolManager(chainConfig, checkpoint, config.SyncMode, config.NetworkId, xcb.eventMux, xcb.txPool, xcb.engine, xcb.blockchain, chainDb, cacheLimit, config.Whitelist, ctx.Config.BTTP); err != nil {
+ if xcb.protocolManager, err = NewProtocolManager(chainConfig, checkpoint, config.SyncMode, config.NetworkId, xcb.eventMux, xcb.txPool, xcb.engine, xcb.blockchain, chainDb, cacheLimit, config.Whitelist, stack.Config().BTTP); err != nil {
return nil, err
}
xcb.miner = miner.New(xcb, &config.Miner, chainConfig, xcb.EventMux(), xcb.engine, xcb.isLocalBlock)
xcb.miner.SetExtra(makeExtraData(config.Miner.ExtraData))
- xcb.APIBackend = &XcbAPIBackend{ctx.ExtRPCEnabled(), xcb, nil}
+ xcb.APIBackend = &XcbAPIBackend{stack.Config().ExtRPCEnabled(), xcb, nil}
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.Miner.EnergyPrice
}
xcb.APIBackend.gpo = energyprice.NewOracle(xcb.APIBackend, gpoParams)
- xcb.dialCandiates, err = xcb.setupDiscovery(&ctx.Config.P2P)
+ xcb.dialCandidates, err = xcb.setupDiscovery(&stack.Config().P2P)
if err != nil {
return nil, err
}
+ // Start the RPC service
+ xcb.netRPCService = xcbapi.NewPublicNetAPI(xcb.p2pServer, xcb.NetVersion())
+
+ // Register the backend on the node
+ stack.RegisterAPIs(xcb.APIs())
+ stack.RegisterProtocols(xcb.Protocols())
+ stack.RegisterLifecycle(xcb)
return xcb, nil
}
@@ -239,7 +225,6 @@ func makeExtraData(extra []byte) []byte {
if len(extra) == 0 {
// create default extradata
extra, _ = rlp.EncodeToBytes([]interface{}{
- uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch),
"gocore",
runtime.Version(),
runtime.GOOS,
@@ -253,7 +238,7 @@ func makeExtraData(extra []byte) []byte {
}
// CreateConsensusEngine creates the required type of consensus engine instance for an Core service
-func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *cryptore.Config, notify []string, noverify bool, db xcbdb.Database) consensus.Engine {
+func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *cryptore.Config, notify []string, noverify bool, db xcbdb.Database) consensus.Engine {
// If proof-of-authority is requested, set it up
if chainConfig.Clique != nil {
return clique.New(chainConfig.Clique, db)
@@ -281,18 +266,9 @@ func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainCo
func (s *Core) APIs() []rpc.API {
apis := xcbapi.GetAPIs(s.APIBackend)
- // Append any APIs exposed explicitly by the les server
- if s.lesServer != nil {
- apis = append(apis, s.lesServer.APIs()...)
- }
// Append any APIs exposed explicitly by the consensus engine
apis = append(apis, s.engine.APIs(s.BlockChain())...)
- // Append any APIs exposed explicitly by the les server
- if s.lesServer != nil {
- apis = append(apis, s.lesServer.APIs()...)
- }
-
// Append all the local APIs and return
return append(apis, []rpc.API{
{
@@ -507,57 +483,46 @@ func (s *Core) NetVersion() uint64 { return s.networkID }
func (s *Core) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
func (s *Core) Synced() bool { return atomic.LoadUint32(&s.protocolManager.acceptTxs) == 1 }
func (s *Core) ArchiveMode() bool { return s.config.NoPruning }
+func (s *Core) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer }
-// Protocols implements node.Service, returning all the currently configured
+// Protocols returns all the currently configured
// network protocols to start.
func (s *Core) Protocols() []p2p.Protocol {
protos := make([]p2p.Protocol, len(ProtocolVersions))
for i, vsn := range ProtocolVersions {
protos[i] = s.protocolManager.makeProtocol(vsn)
protos[i].Attributes = []enr.Entry{s.currentXcbEntry()}
- protos[i].DialCandidates = s.dialCandiates
- }
- if s.lesServer != nil {
- protos = append(protos, s.lesServer.Protocols()...)
+ protos[i].DialCandidates = s.dialCandidates
}
return protos
}
-// Start implements node.Service, starting all internal goroutines needed by the
+// Start implements node.Lifecycle, starting all internal goroutines needed by the
// Core protocol implementation.
-func (s *Core) Start(srvr *p2p.Server) error {
- s.startXcbEntryUpdate(srvr.LocalNode())
+func (s *Core) Start() error {
+ s.startXcbEntryUpdate(s.p2pServer.LocalNode())
// Start the bloom bits servicing goroutines
s.startBloomHandlers(params.BloomBitsBlocks)
- // Start the RPC service
- s.netRPCService = xcbapi.NewPublicNetAPI(srvr, s.NetVersion())
-
// Figure out a max peers count based on the server limits
- maxPeers := srvr.MaxPeers
+ maxPeers := s.p2pServer.MaxPeers
if s.config.LightServ > 0 {
- if s.config.LightPeers >= srvr.MaxPeers {
- return fmt.Errorf("invalid peer config: light peer count (%d) >= total peer count (%d)", s.config.LightPeers, srvr.MaxPeers)
+ if s.config.LightPeers >= s.p2pServer.MaxPeers {
+ return fmt.Errorf("invalid peer config: light peer count (%d) >= total peer count (%d)", s.config.LightPeers, s.p2pServer.MaxPeers)
}
maxPeers -= s.config.LightPeers
}
// Start the networking layer and the light server if requested
s.protocolManager.Start(maxPeers)
- if s.lesServer != nil {
- s.lesServer.Start(srvr)
- }
return nil
}
-// Stop implements node.Service, terminating all internal goroutines used by the
+// Stop implements node.Lifecycle, terminating all internal goroutines used by the
// Core protocol.
func (s *Core) Stop() error {
// Stop all the peer-related stuff first.
s.protocolManager.Stop()
- if s.lesServer != nil {
- s.lesServer.Stop()
- }
// Then stop everything else.
s.bloomIndexer.Close()
diff --git a/xcb/config.go b/xcb/config.go
index 65cbe8a41..bf254332c 100644
--- a/xcb/config.go
+++ b/xcb/config.go
@@ -31,14 +31,14 @@ import (
"github.com/core-coin/go-core/xcb/energyprice"
)
-// DefaultFullGPOConfig contains default gasprice oracle settings for full node.
+// DefaultFullGPOConfig contains default energyprice oracle settings for full node.
var DefaultFullGPOConfig = energyprice.Config{
Blocks: 20,
Percentile: 60,
MaxPrice: energyprice.DefaultMaxPrice,
}
-// DefaultLightGPOConfig contains default gasprice oracle settings for light client.
+// DefaultLightGPOConfig contains default energyprice oracle settings for light client.
var DefaultLightGPOConfig = energyprice.Config{
Blocks: 2,
Percentile: 60,
@@ -65,8 +65,9 @@ var DefaultConfig = Config{
EnergyPrice: big.NewInt(params.Nucle),
Recommit: 3 * time.Second,
},
- TxPool: core.DefaultTxPoolConfig,
- GPO: DefaultFullGPOConfig,
+ TxPool: core.DefaultTxPoolConfig,
+ GPO: DefaultFullGPOConfig,
+ RPCTxFeeCap: 1, // 1 core
}
func init() {
@@ -97,6 +98,8 @@ type Config struct {
NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand
+ TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
+
// Whitelist of required block number -> hash values to accept
Whitelist map[uint64]common.Hash `toml:"-"`
@@ -155,6 +158,10 @@ type Config struct {
// RPCEnergyCap is the global energy cap for xcb-call variants.
RPCEnergyCap *big.Int `toml:",omitempty"`
+ // RPCTxFeeCap is the global transaction fee(price * energylimit) cap for
+ // send-transction variants. The unit is core.
+ RPCTxFeeCap float64 `toml:",omitempty"`
+
// Checkpoint is a hardcoded checkpoint which can be nil.
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
diff --git a/xcb/downloader/downloader.go b/xcb/downloader/downloader.go
index de647dad0..8c826a862 100644
--- a/xcb/downloader/downloader.go
+++ b/xcb/downloader/downloader.go
@@ -179,8 +179,8 @@ type LightChain interface {
// InsertHeaderChain inserts a batch of headers into the local chain.
InsertHeaderChain([]*types.Header, int) (int, error)
- // Rollback removes a few recently added elements from the local chain.
- Rollback([]common.Hash)
+ // SetHead rewinds the local chain to a new head.
+ SetHead(uint64) error
}
// BlockChain encapsulates functions required to sync a (full or fast) blockchain.
@@ -222,7 +222,7 @@ func New(checkpoint uint64, stateDb xcbdb.Database, stateBloom *trie.SyncBloom,
stateBloom: stateBloom,
mux: mux,
checkpoint: checkpoint,
- queue: newQueue(),
+ queue: newQueue(blockCacheItems),
peers: newPeerSet(),
rttEstimate: uint64(rttMaxEstimate),
rttConfidence: uint64(100000),
@@ -340,9 +340,22 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
return err
}
- if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||
- errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||
- errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {
+ if errors.Is(err, errInvalidChain) {
+ log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
+ if d.dropPeer == nil {
+ // The dropPeer method is nil when `--copydb` is used for a local copy.
+ // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
+ log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
+ } else {
+ d.dropPeer(id)
+ }
+ return err
+ }
+
+ switch err {
+ case errTimeout, errBadPeer, errStallingPeer, errUnsyncedPeer,
+ errEmptyHeaderSet, errPeersUnavailable, errTooOld,
+ errInvalidAncestor:
log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
if d.dropPeer == nil {
// The dropPeer method is nil when `--copydb` is used for a local copy.
@@ -383,7 +396,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
d.stateBloom.Close()
}
// Reset the queue, peer set and wake channels to clean any internal leftover state
- d.queue.Reset()
+ d.queue.Reset(blockCacheItems)
d.peers.Reset()
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
@@ -488,6 +501,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
if pivotNumber <= origin {
origin = pivotNumber - 1
}
+ // Write out the pivot into the database so a rollback beyond it will
+ // reenable fast sync
+ rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
}
}
d.committed = 1
@@ -525,11 +541,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
}
// Rewind the ancient store and blockchain if reorg happens.
if origin+1 < frozen {
- var hashes []common.Hash
- for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ {
- hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i))
+ if err := d.lightchain.SetHead(origin + 1); err != nil {
+ return err
}
- d.lightchain.Rollback(hashes)
}
}
// Initiate the sync using a concurrent header and content retrieval algorithm
@@ -619,6 +633,9 @@ func (d *Downloader) Terminate() {
default:
close(d.quitCh)
}
+ if d.stateBloom != nil {
+ d.stateBloom.Close()
+ }
d.quitLock.Unlock()
// Cancel any pending download requests
@@ -827,7 +844,7 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
expectNumber := from + int64(i)*int64(skip+1)
if number := header.Number.Int64(); number != expectNumber {
p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
- return 0, errInvalidChain
+ return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering"))
}
}
// Check if a common ancestor was found
@@ -905,7 +922,7 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
// Make sure the peer actually gave something valid
headers := packet.(*headerPack).headers
if len(headers) != 1 {
- p.log.Debug("Multiple headers for single request", "headers", len(headers))
+ p.log.Warn("Multiple headers for single request", "headers", len(headers))
return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
}
arrived = true
@@ -1090,7 +1107,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
filled, proced, err := d.fillHeaderSkeleton(from, headers)
if err != nil {
p.log.Debug("Skeleton chain invalid", "err", err)
- return errInvalidChain
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
headers = filled[proced:]
from += uint64(proced)
@@ -1200,17 +1217,18 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
pack := packet.(*headerPack)
return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
}
- expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
- throttle = func() bool { return false }
- reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
- return d.queue.ReserveHeaders(p, count), false, nil
+ expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
+ reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) {
+ return d.queue.ReserveHeaders(p, count), false, false
}
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
- setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
+ setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
+ p.SetHeadersIdle(accepted, deliveryTime)
+ }
)
err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire,
- d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
+ d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve,
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
log.Debug("Skeleton fill terminated", "err", err)
@@ -1233,10 +1251,10 @@ func (d *Downloader) fetchBodies(from uint64) error {
expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
- setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
+ setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) }
)
err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire,
- d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
+ d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies,
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
log.Debug("Block body download terminated", "err", err)
@@ -1257,10 +1275,12 @@ func (d *Downloader) fetchReceipts(from uint64) error {
expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
- setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
+ setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
+ p.SetReceiptsIdle(accepted, deliveryTime)
+ }
)
err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire,
- d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
+ d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts,
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
log.Debug("Transaction receipt download terminated", "err", err)
@@ -1293,9 +1313,9 @@ func (d *Downloader) fetchReceipts(from uint64) error {
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
// - kind: textual label of the type being downloaded to display in log mesages
func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
- expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
+ expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
- idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
+ idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error {
// Create a ticker to detect expired retrieval tasks
ticker := time.NewTicker(100 * time.Millisecond)
@@ -1311,19 +1331,20 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
return errCanceled
case packet := <-deliveryCh:
+ deliveryTime := time.Now()
// If the peer was previously banned and failed to deliver its pack
// in a reasonable time frame, ignore its message.
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of data and check chain validity
accepted, err := deliver(packet)
- if err == errInvalidChain {
+ if errors.Is(err, errInvalidChain) {
return err
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
- if err != errStaleDelivery {
- setIdle(peer, accepted)
+ if !errors.Is(err, errStaleDelivery) {
+ setIdle(peer, accepted, deliveryTime)
}
// Issue a log to the user to see what's going on
switch {
@@ -1376,7 +1397,7 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
if fails > 2 {
peer.log.Trace("Data delivery timed out", "type", kind)
- setIdle(peer, 0)
+ setIdle(peer, 0, time.Now())
} else {
peer.log.Debug("Stalling delivery, dropping", "type", kind)
@@ -1411,27 +1432,27 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
// Send a download request to all idle peers, until throttled
progressed, throttled, running := false, false, inFlight()
idles, total := idle()
-
+ pendCount := pending()
for _, peer := range idles {
// Short circuit if throttling activated
- if throttle() {
- throttled = true
+ if throttled {
break
}
// Short circuit if there is no more available task.
- if pending() == 0 {
+ if pendCount = pending(); pendCount == 0 {
break
}
// Reserve a chunk of fetches for a peer. A nil can mean either that
// no more headers are available, or that the peer is known not to
// have them.
- request, progress, err := reserve(peer, capacity(peer))
- if err != nil {
- return err
- }
+ request, progress, throttle := reserve(peer, capacity(peer))
if progress {
progressed = true
}
+ if throttle {
+ throttled = true
+ throttleCounter.Inc(1)
+ }
if request == nil {
continue
}
@@ -1456,7 +1477,7 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
- if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
+ if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 {
return errPeersUnavailable
}
}
@@ -1468,30 +1489,31 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
// queue until the stream ends or a failure occurs.
func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
// Keep a count of uncertain headers to roll back
- var rollback []*types.Header
- mode := d.getMode()
+ var (
+ rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis)
+ rollbackErr error
+ mode = d.getMode()
+ )
defer func() {
- if len(rollback) > 0 {
- // Flatten the headers and roll them back
- hashes := make([]common.Hash, len(rollback))
- for i, header := range rollback {
- hashes[i] = header.Hash()
- }
+ if rollback > 0 {
lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
if mode != LightSync {
lastFastBlock = d.blockchain.CurrentFastBlock().Number()
lastBlock = d.blockchain.CurrentBlock().Number()
}
- d.lightchain.Rollback(hashes)
+ if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
+ // We're already unwinding the stack, only print the error to make it more visible
+ log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
+ }
curFastBlock, curBlock := common.Big0, common.Big0
if mode != LightSync {
curFastBlock = d.blockchain.CurrentFastBlock().Number()
curBlock = d.blockchain.CurrentBlock().Number()
}
- log.Warn("Rolled back headers", "count", len(hashes),
+ log.Warn("Rolled back chain segment",
"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
- "block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
+ "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
}
}()
@@ -1501,6 +1523,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
for {
select {
case <-d.cancelCh:
+ rollbackErr = errCanceled
return errCanceled
case headers := <-d.headerProcCh:
@@ -1545,7 +1568,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
}
}
// Disable any rollback and return
- rollback = nil
+ rollback = 0
return nil
}
// Otherwise split the chunk of headers into batches and process them
@@ -1554,6 +1577,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
// Terminate if something failed in between processing chunks
select {
case <-d.cancelCh:
+ rollbackErr = errCanceled
return errCanceled
default:
}
@@ -1565,13 +1589,6 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
chunk := headers[:limit]
// In case of header only syncing, validate the chunk immediately
if mode == FastSync || mode == LightSync {
- // Collect the yet unknown headers to mark them as uncertain
- unknown := make([]*types.Header, 0, len(chunk))
- for _, header := range chunk {
- if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
- unknown = append(unknown, header)
- }
- }
// If we're importing pure headers, verify based on their recentness
var pivot uint64
@@ -1586,17 +1603,18 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
frequency = 1
}
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
- // If some headers were inserted, add them too to the rollback list
- if n > 0 {
- rollback = append(rollback, chunk[:n]...)
+ rollbackErr = err
+ // If some headers were inserted, track them as uncertain
+ if n > 0 && rollback == 0 {
+ rollback = chunk[0].Number.Uint64()
}
- log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
- return errInvalidChain
+ log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
- // All verifications passed, store newly found uncertain headers
- rollback = append(rollback, unknown...)
- if len(rollback) > fsHeaderSafetyNet {
- rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
+ // All verifications passed, track all headers within the alloted limits
+ head := chunk[len(chunk)-1].Number.Uint64()
+ if head-rollback > uint64(fsHeaderSafetyNet) {
+ rollback = head - uint64(fsHeaderSafetyNet)
}
}
// Unless we're doing light chains, schedule the headers for associated content retrieval
@@ -1605,6 +1623,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
select {
case <-d.cancelCh:
+ rollbackErr = errCanceled
return errCanceled
case <-time.After(time.Second):
}
@@ -1612,7 +1631,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
// Otherwise insert the headers for content retrieval
inserts := d.queue.Schedule(chunk, origin)
if len(inserts) != len(chunk) {
- log.Debug("Stale headers")
+ rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk))
return fmt.Errorf("%w: stale headers", errBadPeer)
}
}
@@ -1683,7 +1702,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
// of the blocks delivered from the downloader, and the indexing will be off.
log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
}
- return errInvalidChain
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
return nil
}
@@ -1764,6 +1783,10 @@ func (d *Downloader) processFastSyncContent() error {
d.pivotLock.Lock()
d.pivotHeader = pivot
d.pivotLock.Unlock()
+
+ // Write out the pivot into the database so a rollback beyond it will
+ // reenable fast sync
+ rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
}
}
P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
@@ -1804,6 +1827,14 @@ func (d *Downloader) processFastSyncContent() error {
}
func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
+ if len(results) == 0 {
+ return nil, nil, nil
+ }
+ if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
+ // the pivot is somewhere in the future
+ return nil, results, nil
+ }
+ // This can also be optimized, but only happens very seldom
for _, result := range results {
num := result.Header.Number.Uint64()
switch {
@@ -1846,7 +1877,7 @@ func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *state
}
if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
- return errInvalidChain
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
return nil
}
diff --git a/xcb/downloader/downloader_test.go b/xcb/downloader/downloader_test.go
index 771197e2e..21779204b 100644
--- a/xcb/downloader/downloader_test.go
+++ b/xcb/downloader/downloader_test.go
@@ -145,7 +145,12 @@ func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
dl.lock.RLock()
defer dl.lock.RUnlock()
+ return dl.getHeaderByHash(hash)
+}
+// getHeaderByHash returns the header if found either within ancients or own blocks)
+// This method assumes that the caller holds at least the read-lock (dl.lock)
+func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
header := dl.ancientHeaders[hash]
if header != nil {
return header
@@ -232,7 +237,13 @@ func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
dl.lock.RLock()
defer dl.lock.RUnlock()
+ return dl.getTd(hash)
+}
+// getTd retrieves the block's total difficulty if found either within
+// ancients or own blocks).
+// This method assumes that the caller holds at least the read-lock (dl.lock)
+func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
if td := dl.ancientChainTd[hash]; td != nil {
return td
}
@@ -245,25 +256,33 @@ func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq i
defer dl.lock.Unlock()
// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
- if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
- return 0, errors.New("unknown parent")
+ if dl.getHeaderByHash(headers[0].ParentHash) == nil {
+ return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
}
+ var hashes []common.Hash
for i := 1; i < len(headers); i++ {
+ hash := headers[i-1].Hash()
if headers[i].ParentHash != headers[i-1].Hash() {
- return i, errors.New("unknown parent")
+ return i, fmt.Errorf("non-contiguous import at position %d", i)
}
+ hashes = append(hashes, hash)
}
+ hashes = append(hashes, headers[len(headers)-1].Hash())
// Do a full insert if pre-checks passed
for i, header := range headers {
- if _, ok := dl.ownHeaders[header.Hash()]; ok {
+ hash := hashes[i]
+ if dl.getHeaderByHash(hash) != nil {
continue
}
- if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
- return i, errors.New("unknown parent")
+ if dl.getHeaderByHash(header.ParentHash) == nil {
+ // This _should_ be impossible, due to precheck and induction
+ return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
}
- dl.ownHashes = append(dl.ownHashes, header.Hash())
- dl.ownHeaders[header.Hash()] = header
- dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty)
+ dl.ownHashes = append(dl.ownHashes, hash)
+ dl.ownHeaders[hash] = header
+
+ td := dl.getTd(header.ParentHash)
+ dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
}
return len(headers), nil
}
@@ -275,18 +294,20 @@ func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
for i, block := range blocks {
if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
- return i, errors.New("unknown parent")
+ return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
- return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
+ return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
}
- if _, ok := dl.ownHeaders[block.Hash()]; !ok {
+ if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
dl.ownHashes = append(dl.ownHashes, block.Hash())
dl.ownHeaders[block.Hash()] = block.Header()
}
dl.ownBlocks[block.Hash()] = block
dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
- dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty())
+
+ td := dl.getTd(block.ParentHash())
+ dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
}
return len(blocks), nil
}
@@ -302,7 +323,7 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ
}
if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
- return i, errors.New("unknown parent")
+ return i, errors.New("InsertReceiptChain: unknown parent")
}
}
if blocks[i].NumberU64() <= ancientLimit {
@@ -323,25 +344,52 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ
return len(blocks), nil
}
-// Rollback removes some recently added elements from the chain.
-func (dl *downloadTester) Rollback(hashes []common.Hash) {
+// SetHead rewinds the local chain to a new head.
+func (dl *downloadTester) SetHead(head uint64) error {
dl.lock.Lock()
defer dl.lock.Unlock()
- for i := len(hashes) - 1; i >= 0; i-- {
- if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
- dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
+ // Find the hash of the head to reset to
+ var hash common.Hash
+ for h, header := range dl.ownHeaders {
+ if header.Number.Uint64() == head {
+ hash = h
}
- delete(dl.ownChainTd, hashes[i])
- delete(dl.ownHeaders, hashes[i])
- delete(dl.ownReceipts, hashes[i])
- delete(dl.ownBlocks, hashes[i])
+ }
+ for h, header := range dl.ancientHeaders {
+ if header.Number.Uint64() == head {
+ hash = h
+ }
+ }
+ if hash == (common.Hash{}) {
+ return fmt.Errorf("unknown head to set: %d", head)
+ }
+ // Find the offset in the header chain
+ var offset int
+ for o, h := range dl.ownHashes {
+ if h == hash {
+ offset = o
+ break
+ }
+ }
+ // Remove all the hashes and associated data afterwards
+ for i := offset + 1; i < len(dl.ownHashes); i++ {
+ delete(dl.ownChainTd, dl.ownHashes[i])
+ delete(dl.ownHeaders, dl.ownHashes[i])
+ delete(dl.ownReceipts, dl.ownHashes[i])
+ delete(dl.ownBlocks, dl.ownHashes[i])
- delete(dl.ancientChainTd, hashes[i])
- delete(dl.ancientHeaders, hashes[i])
- delete(dl.ancientReceipts, hashes[i])
- delete(dl.ancientBlocks, hashes[i])
+ delete(dl.ancientChainTd, dl.ownHashes[i])
+ delete(dl.ancientHeaders, dl.ownHashes[i])
+ delete(dl.ancientReceipts, dl.ownHashes[i])
+ delete(dl.ancientBlocks, dl.ownHashes[i])
}
+ dl.ownHashes = dl.ownHashes[:offset+1]
+ return nil
+}
+
+// Rollback removes some recently added elements from the chain.
+func (dl *downloadTester) Rollback(hashes []common.Hash) {
}
// newPeer registers a new block download source into the downloader.
@@ -511,7 +559,6 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
- defer tester.terminate()
// Create a long block chain to download and the tester
targetBlocks := testChainBase.len() - 1
@@ -543,19 +590,20 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
time.Sleep(25 * time.Millisecond)
tester.lock.Lock()
- tester.downloader.queue.lock.Lock()
- cached = len(tester.downloader.queue.blockDonePool)
- if mode == FastSync {
- if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached {
- cached = receipts
- }
+ {
+ tester.downloader.queue.resultCache.lock.Lock()
+ cached = tester.downloader.queue.resultCache.countCompleted()
+ tester.downloader.queue.resultCache.lock.Unlock()
+ frozen = int(atomic.LoadUint32(&blocked))
+ retrieved = len(tester.ownBlocks)
+
}
- frozen = int(atomic.LoadUint32(&blocked))
- retrieved = len(tester.ownBlocks)
- tester.downloader.queue.lock.Unlock()
tester.lock.Unlock()
- if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
+ if cached == blockCacheItems ||
+ cached == blockCacheItems-reorgProtHeaderDelay ||
+ retrieved+cached+frozen == targetBlocks+1 ||
+ retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
break
}
}
@@ -579,6 +627,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
if err := <-errc; err != nil {
t.Fatalf("block synchronization failed: %v", err)
}
+ tester.terminate()
}
// Tests that simple synchronization against a forked chain works correctly. In
@@ -695,13 +744,11 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
- defer tester.terminate()
// Create a long enough forked chain
chainA := testChainForkLightA
chainB := testChainForkHeavy
tester.newPeer("original", protocol, chainA)
- tester.newPeer("heavy-rewriter", protocol, chainB)
// Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("original", nil, mode); err != nil {
@@ -709,10 +756,12 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
}
assertOwnChain(t, tester, chainA.len())
+ tester.newPeer("heavy-rewriter", protocol, chainB)
// Synchronise with the second peer and ensure that the fork is rejected to being too old
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
}
+ tester.terminate()
}
// Tests that an inactive downloader will not accept incoming block headers and
@@ -825,7 +874,6 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
- defer tester.terminate()
// Create a small enough block chain to download
chain := testChainBase.shorten(blockCacheItems - 15)
@@ -848,6 +896,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
t.Errorf("%s dropped", peer)
}
}
+ tester.terminate()
}
// Tests that if a block is empty (e.g. header only), no body request should be
@@ -977,6 +1026,7 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
+ t.Skip("skip long-running tests")
t.Parallel()
tester := newTester()
@@ -1075,13 +1125,13 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
- defer tester.terminate()
chain := testChainBase.shorten(1)
tester.newPeer("attack", protocol, chain)
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
}
+ tester.terminate()
}
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
diff --git a/xcb/downloader/metrics.go b/xcb/downloader/metrics.go
index 3734cc08d..55e37cb67 100644
--- a/xcb/downloader/metrics.go
+++ b/xcb/downloader/metrics.go
@@ -40,4 +40,6 @@ var (
stateInMeter = metrics.NewRegisteredMeter("xcb/downloader/states/in", nil)
stateDropMeter = metrics.NewRegisteredMeter("xcb/downloader/states/drop", nil)
+
+ throttleCounter = metrics.NewRegisteredCounter("xcb/downloader/throttle", nil)
)
diff --git a/xcb/downloader/peer.go b/xcb/downloader/peer.go
index e0fde83b5..23baf233c 100644
--- a/xcb/downloader/peer.go
+++ b/xcb/downloader/peer.go
@@ -117,8 +117,7 @@ func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *pe
return &peerConnection{
id: id,
lacking: make(map[common.Hash]struct{}),
-
- peer: peer,
+ peer: peer,
version: version,
log: logger,
@@ -173,12 +172,14 @@ func (p *peerConnection) FetchBodies(request *fetchRequest) error {
}
p.blockStarted = time.Now()
- // Convert the header set to a retrievable slice
- hashes := make([]common.Hash, 0, len(request.Headers))
- for _, header := range request.Headers {
- hashes = append(hashes, header.Hash())
- }
- go p.peer.RequestBodies(hashes)
+ go func() {
+ // Convert the header set to a retrievable slice
+ hashes := make([]common.Hash, 0, len(request.Headers))
+ for _, header := range request.Headers {
+ hashes = append(hashes, header.Hash())
+ }
+ p.peer.RequestBodies(hashes)
+ }()
return nil
}
@@ -195,12 +196,14 @@ func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
}
p.receiptStarted = time.Now()
- // Convert the header set to a retrievable slice
- hashes := make([]common.Hash, 0, len(request.Headers))
- for _, header := range request.Headers {
- hashes = append(hashes, header.Hash())
- }
- go p.peer.RequestReceipts(hashes)
+ go func() {
+ // Convert the header set to a retrievable slice
+ hashes := make([]common.Hash, 0, len(request.Headers))
+ for _, header := range request.Headers {
+ hashes = append(hashes, header.Hash())
+ }
+ p.peer.RequestReceipts(hashes)
+ }()
return nil
}
@@ -225,47 +228,48 @@ func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
// requests. Its estimated header retrieval throughput is updated with that measured
// just now.
-func (p *peerConnection) SetHeadersIdle(delivered int) {
- p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle)
+func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) {
+ p.setIdle(deliveryTime.Sub(p.headerStarted), delivered, &p.headerThroughput, &p.headerIdle)
}
// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
// requests. Its estimated body retrieval throughput is updated with that measured
// just now.
-func (p *peerConnection) SetBodiesIdle(delivered int) {
- p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle)
+func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) {
+ p.setIdle(deliveryTime.Sub(p.blockStarted), delivered, &p.blockThroughput, &p.blockIdle)
}
// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
// retrieval requests. Its estimated receipt retrieval throughput is updated
// with that measured just now.
-func (p *peerConnection) SetReceiptsIdle(delivered int) {
- p.setIdle(p.receiptStarted, delivered, &p.receiptThroughput, &p.receiptIdle)
+func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) {
+ p.setIdle(deliveryTime.Sub(p.receiptStarted), delivered, &p.receiptThroughput, &p.receiptIdle)
}
// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
// data retrieval requests. Its estimated state retrieval throughput is updated
// with that measured just now.
-func (p *peerConnection) SetNodeDataIdle(delivered int) {
- p.setIdle(p.stateStarted, delivered, &p.stateThroughput, &p.stateIdle)
+func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) {
+ p.setIdle(deliveryTime.Sub(p.stateStarted), delivered, &p.stateThroughput, &p.stateIdle)
}
// setIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its estimated retrieval throughput is updated with that measured just now.
-func (p *peerConnection) setIdle(started time.Time, delivered int, throughput *float64, idle *int32) {
+func (p *peerConnection) setIdle(elapsed time.Duration, delivered int, throughput *float64, idle *int32) {
// Irrelevant of the scaling, make sure the peer ends up idle
defer atomic.StoreInt32(idle, 0)
p.lock.Lock()
defer p.lock.Unlock()
-
// If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum
if delivered == 0 {
*throughput = 0
return
}
// Otherwise update the throughput with a new measurement
- elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor
+ if elapsed <= 0 {
+ elapsed = 1 // +1 (ns) to ensure non-zero divisor
+ }
measured := float64(delivered) / (float64(elapsed) / float64(time.Second))
*throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured
@@ -522,23 +526,21 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
ps.lock.RLock()
defer ps.lock.RUnlock()
+ tps := make([]float64, 0, len(ps.peers))
idle, total := make([]*peerConnection, 0, len(ps.peers)), 0
for _, p := range ps.peers {
if p.version >= minProtocol && p.version <= maxProtocol {
if idleCheck(p) {
idle = append(idle, p)
+ tps = append(tps, throughput(p))
}
total++
}
}
- for i := 0; i < len(idle); i++ {
- for j := i + 1; j < len(idle); j++ {
- if throughput(idle[i]) < throughput(idle[j]) {
- idle[i], idle[j] = idle[j], idle[i]
- }
- }
- }
- return idle, total
+ // And sort them
+ sortPeers := &peerThroughputSort{idle, tps}
+ sort.Sort(sortPeers)
+ return sortPeers.p, total
}
// medianRTT returns the median RTT of the peerset, considering only the tuning
@@ -571,3 +573,24 @@ func (ps *peerSet) medianRTT() time.Duration {
}
return median
}
+
+// peerThroughputSort implements the Sort interface, and allows for
+// sorting a set of peers by their throughput
+// The sorted data is with the _highest_ throughput first
+type peerThroughputSort struct {
+ p []*peerConnection
+ tp []float64
+}
+
+func (ps *peerThroughputSort) Len() int {
+ return len(ps.p)
+}
+
+func (ps *peerThroughputSort) Less(i, j int) bool {
+ return ps.tp[i] > ps.tp[j]
+}
+
+func (ps *peerThroughputSort) Swap(i, j int) {
+ ps.p[i], ps.p[j] = ps.p[j], ps.p[i]
+ ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i]
+}
diff --git a/xcb/downloader/peer_test.go b/xcb/downloader/peer_test.go
new file mode 100644
index 000000000..2ed6010d8
--- /dev/null
+++ b/xcb/downloader/peer_test.go
@@ -0,0 +1,53 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package downloader
+
+import (
+ "sort"
+ "testing"
+)
+
+func TestPeerThroughputSorting(t *testing.T) {
+ a := &peerConnection{
+ id: "a",
+ headerThroughput: 1.25,
+ }
+ b := &peerConnection{
+ id: "b",
+ headerThroughput: 1.21,
+ }
+ c := &peerConnection{
+ id: "c",
+ headerThroughput: 1.23,
+ }
+
+ peers := []*peerConnection{a, b, c}
+ tps := []float64{a.headerThroughput,
+ b.headerThroughput, c.headerThroughput}
+ sortPeers := &peerThroughputSort{peers, tps}
+ sort.Sort(sortPeers)
+ if got, exp := sortPeers.p[0].id, "a"; got != exp {
+ t.Errorf("sort fail, got %v exp %v", got, exp)
+ }
+ if got, exp := sortPeers.p[1].id, "c"; got != exp {
+ t.Errorf("sort fail, got %v exp %v", got, exp)
+ }
+ if got, exp := sortPeers.p[2].id, "b"; got != exp {
+ t.Errorf("sort fail, got %v exp %v", got, exp)
+ }
+
+}
diff --git a/xcb/downloader/queue.go b/xcb/downloader/queue.go
index 6f03e604a..cd32b34ab 100644
--- a/xcb/downloader/queue.go
+++ b/xcb/downloader/queue.go
@@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"sync"
+ "sync/atomic"
"time"
"github.com/core-coin/go-core/common"
@@ -33,6 +34,11 @@ import (
"github.com/core-coin/go-core/trie"
)
+const (
+ bodyType = uint(0)
+ receiptType = uint(1)
+)
+
var (
blockCacheItems = 8192 // Maximum number of blocks to cache before throttling the download
blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
@@ -55,8 +61,7 @@ type fetchRequest struct {
// fetchResult is a struct collecting partial results from data fetchers until
// all outstanding pieces complete and the result as a whole can be processed.
type fetchResult struct {
- Pending int // Number of data fetches still pending
- Hash common.Hash // Hash of the header to prevent recalculating
+ pending int32 // Flag telling what deliveries are outstanding
Header *types.Header
Uncles []*types.Header
@@ -64,6 +69,44 @@ type fetchResult struct {
Receipts types.Receipts
}
+func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
+ item := &fetchResult{
+ Header: header,
+ }
+ if !header.EmptyBody() {
+ item.pending |= (1 << bodyType)
+ }
+ if fastSync && !header.EmptyReceipts() {
+ item.pending |= (1 << receiptType)
+ }
+ return item
+}
+
+// SetBodyDone flags the body as finished.
+func (f *fetchResult) SetBodyDone() {
+ if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
+ atomic.AddInt32(&f.pending, -1)
+ }
+}
+
+// AllDone checks if item is done.
+func (f *fetchResult) AllDone() bool {
+ return atomic.LoadInt32(&f.pending) == 0
+}
+
+// SetReceiptsDone flags the receipts as finished.
+func (f *fetchResult) SetReceiptsDone() {
+ if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
+ atomic.AddInt32(&f.pending, -2)
+ }
+}
+
+// Done checks if the given type is done already
+func (f *fetchResult) Done(kind uint) bool {
+ v := atomic.LoadInt32(&f.pending)
+ return v&(1< common.StorageSize(blockCacheMemory) {
- limit = int((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
- }
- // Calculate the number of slots already finished
- finished := 0
- for _, result := range q.resultCache[:limit] {
- if result == nil {
- break
- }
- if _, ok := donePool[result.Hash]; ok {
- finished++
- }
- }
- // Calculate the number of slots currently downloading
- pending := 0
- for _, request := range pendPool {
- for _, header := range request.Headers {
- if header.Number.Uint64() < q.resultOffset+uint64(limit) {
- pending++
- }
- }
- }
- // Return the free slots to distribute
- return limit - finished - pending
+ return (queued + pending) == 0
}
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
@@ -325,21 +305,22 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
break
}
// Make sure no duplicate requests are executed
+ // We cannot skip this, even if the block is empty, since this is
+ // what triggers the fetchResult creation.
if _, ok := q.blockTaskPool[hash]; ok {
log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
- continue
- }
- if _, ok := q.receiptTaskPool[hash]; ok {
- log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
- continue
+ } else {
+ q.blockTaskPool[hash] = header
+ q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
}
- // Queue the header for content retrieval
- q.blockTaskPool[hash] = header
- q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
-
- if q.mode == FastSync {
- q.receiptTaskPool[hash] = header
- q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
+ // Queue for receipt retrieval
+ if q.mode == FastSync && !header.EmptyReceipts() {
+ if _, ok := q.receiptTaskPool[hash]; ok {
+ log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
+ } else {
+ q.receiptTaskPool[hash] = header
+ q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
}
inserts = append(inserts, header)
q.headerHead = hash
@@ -350,66 +331,77 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
// Results retrieves and permanently removes a batch of fetch results from
// the cache. the result slice will be empty if the queue has been closed.
+// Results can be called concurrently with Deliver and Schedule,
+// but assumes that there are not two simultaneous callers to Results
func (q *queue) Results(block bool) []*fetchResult {
- q.lock.Lock()
- defer q.lock.Unlock()
-
- // Count the number of items available for processing
- nproc := q.countProcessableItems()
- for nproc == 0 && !q.closed {
- if !block {
- return nil
+ // Abort early if there are no items and non-blocking requested
+ if !block && !q.resultCache.HasCompletedItems() {
+ return nil
+ }
+ closed := false
+ for !closed && !q.resultCache.HasCompletedItems() {
+ // In order to wait on 'active', we need to obtain the lock.
+ // That may take a while, if someone is delivering at the same
+ // time, so after obtaining the lock, we check again if there
+ // are any results to fetch.
+ // Also, in-between we ask for the lock and the lock is obtained,
+ // someone can have closed the queue. In that case, we should
+ // return the available results and stop blocking
+ q.lock.Lock()
+ if q.resultCache.HasCompletedItems() || q.closed {
+ q.lock.Unlock()
+ break
}
+ // No items available, and not closed
q.active.Wait()
- nproc = q.countProcessableItems()
- }
- // Since we have a batch limit, don't pull more into "dangling" memory
- if nproc > maxResultsProcess {
- nproc = maxResultsProcess
- }
- results := make([]*fetchResult, nproc)
- copy(results, q.resultCache[:nproc])
- if len(results) > 0 {
- // Mark results as done before dropping them from the cache.
- for _, result := range results {
- hash := result.Header.Hash()
- delete(q.blockDonePool, hash)
- delete(q.receiptDonePool, hash)
+ closed = q.closed
+ q.lock.Unlock()
+ }
+ // Regardless if closed or not, we can still deliver whatever we have
+ results := q.resultCache.GetCompleted(maxResultsProcess)
+ for _, result := range results {
+ // Recalculate the result item weights to prevent memory exhaustion
+ size := result.Header.Size()
+ for _, uncle := range result.Uncles {
+ size += uncle.Size()
}
- // Delete the results from the cache and clear the tail.
- copy(q.resultCache, q.resultCache[nproc:])
- for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ {
- q.resultCache[i] = nil
+ for _, receipt := range result.Receipts {
+ size += receipt.Size()
}
- // Advance the expected block number of the first cache entry.
- q.resultOffset += uint64(nproc)
-
- // Recalculate the result item weights to prevent memory exhaustion
- for _, result := range results {
- size := result.Header.Size()
- for _, uncle := range result.Uncles {
- size += uncle.Size()
- }
- for _, receipt := range result.Receipts {
- size += receipt.Size()
- }
- for _, tx := range result.Transactions {
- size += tx.Size()
- }
- q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
+ for _, tx := range result.Transactions {
+ size += tx.Size()
}
+ q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
+ (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
+ }
+ // Using the newly calibrated resultsize, figure out the new throttle limit
+ // on the result cache
+ throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
+ throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
+
+ // Log some info at certain times
+ if time.Since(q.lastStatLog) > 10*time.Second {
+ q.lastStatLog = time.Now()
+ info := q.Stats()
+ info = append(info, "throttle", throttleThreshold)
+ log.Info("Downloader queue stats", info...)
}
return results
}
-// countProcessableItems counts the processable items.
-func (q *queue) countProcessableItems() int {
- for i, result := range q.resultCache {
- if result == nil || result.Pending > 0 {
- return i
- }
+func (q *queue) Stats() []interface{} {
+ q.lock.RLock()
+ defer q.lock.RUnlock()
+
+ return q.stats()
+}
+
+func (q *queue) stats() []interface{} {
+ return []interface{}{
+ "receiptTasks", q.receiptTaskQueue.Size(),
+ "blockTasks", q.blockTaskQueue.Size(),
+ "itemSize", q.resultSize,
}
- return len(q.resultCache)
}
// ReserveHeaders reserves a set of headers for the given peer, skipping any
@@ -417,7 +409,6 @@ func (q *queue) countProcessableItems() int {
func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
q.lock.Lock()
defer q.lock.Unlock()
-
// Short circuit if the peer's already downloading something (sanity check to
// not corrupt state)
if _, ok := q.headerPendPool[p.id]; ok {
@@ -455,27 +446,21 @@ func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
// ReserveBodies reserves a set of body fetches for the given peer, skipping any
// previously failed downloads. Beside the next batch of needed fetches, it also
// returns a flag whether empty blocks were queued requiring processing.
-func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) {
- isNoop := func(header *types.Header) bool {
- return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash
- }
+func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
q.lock.Lock()
defer q.lock.Unlock()
- return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop)
+ return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
}
// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
// any previously failed downloads. Beside the next batch of needed fetches, it
// also returns a flag whether empty receipts were queued requiring importing.
-func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) {
- isNoop := func(header *types.Header) bool {
- return header.ReceiptHash == types.EmptyRootHash
- }
+func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
q.lock.Lock()
defer q.lock.Unlock()
- return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop)
+ return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
}
// reserveHeaders reserves a set of data download operations for a given peer,
@@ -485,57 +470,71 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo
// Note, this method expects the queue lock to be already held for writing. The
// reason the lock is not obtained in here is because the parameters already need
// to access the queue, so they already need a lock anyway.
+//
+// Returns:
+// item - the fetchRequest
+// progress - whether any progress was made
+// throttle - if the caller should throttle for a while
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
- pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) {
+ pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
// Short circuit if the pool has been depleted, or if the peer's already
// downloading something (sanity check not to corrupt state)
if taskQueue.Empty() {
- return nil, false, nil
+ return nil, false, true
}
if _, ok := pendPool[p.id]; ok {
- return nil, false, nil
+ return nil, false, false
}
- // Calculate an upper limit on the items we might fetch (i.e. throttling)
- space := q.resultSlots(pendPool, donePool)
-
// Retrieve a batch of tasks, skipping previously failed ones
send := make([]*types.Header, 0, count)
skip := make([]*types.Header, 0)
-
progress := false
- for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ {
- header := taskQueue.PopItem().(*types.Header)
- hash := header.Hash()
-
- // If we're the first to request this task, initialise the result container
- index := int(header.Number.Int64() - int64(q.resultOffset))
- if index >= len(q.resultCache) || index < 0 {
- common.Report("index allocation went beyond available resultCache space")
- return nil, false, errInvalidChain
+ throttled := false
+ for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
+ // the task queue will pop items in order, so the highest prio block
+ // is also the lowest block number.
+ h, _ := taskQueue.Peek()
+ header := h.(*types.Header)
+ // we can ask the resultcache if this header is within the
+ // "prioritized" segment of blocks. If it is not, we need to throttle
+
+ stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync)
+ if stale {
+ // Don't put back in the task queue, this item has already been
+ // delivered upstream
+ taskQueue.PopItem()
+ progress = true
+ delete(taskPool, header.Hash())
+ proc = proc - 1
+ log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
+ continue
}
- if q.resultCache[index] == nil {
- components := 1
- if q.mode == FastSync {
- components = 2
- }
- q.resultCache[index] = &fetchResult{
- Pending: components,
- Hash: hash,
- Header: header,
- }
+ if throttle {
+ // There are no resultslots available. Leave it in the task queue
+ // However, if there are any left as 'skipped', we should not tell
+ // the caller to throttle, since we still want some other
+ // peer to fetch those for us
+ throttled = len(skip) == 0
+ break
}
- // If this fetch task is a noop, skip this fetch operation
- if isNoop(header) {
- donePool[hash] = struct{}{}
- delete(taskPool, hash)
-
- space, proc = space-1, proc-1
- q.resultCache[index].Pending--
+ if err != nil {
+ // this most definitely should _not_ happen
+ log.Warn("Failed to reserve headers", "err", err)
+ // There are no resultslots available. Leave it in the task queue
+ break
+ }
+ if item.Done(kind) {
+ // If it's a noop, we can skip this task
+ delete(taskPool, header.Hash())
+ taskQueue.PopItem()
+ proc = proc - 1
progress = true
continue
}
+ // Remove it from the task queue
+ taskQueue.PopItem()
// Otherwise unless the peer is known not to have the data, add to the retrieve list
- if p.Lacks(hash) {
+ if p.Lacks(header.Hash()) {
skip = append(skip, header)
} else {
send = append(send, header)
@@ -545,13 +544,13 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
for _, header := range skip {
taskQueue.Push(header, -int64(header.Number.Uint64()))
}
- if progress {
+ if q.resultCache.HasCompletedItems() {
// Wake Results, resultCache was modified
q.active.Signal()
}
// Assemble and return the block download request
if len(send) == 0 {
- return nil, progress, nil
+ return nil, progress, throttled
}
request := &fetchRequest{
Peer: p,
@@ -559,8 +558,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
Time: time.Now(),
}
pendPool[p.id] = request
-
- return request, progress, nil
+ return request, progress, throttled
}
// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
@@ -774,15 +772,23 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
q.lock.Lock()
defer q.lock.Unlock()
- reconstruct := func(header *types.Header, index int, result *fetchResult) error {
- if types.DeriveSha(types.Transactions(txLists[index]), new(trie.Trie)) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
+ validate := func(index int, header *types.Header) error {
+ if types.DeriveSha(types.Transactions(txLists[index]), new(trie.Trie)) != header.TxHash {
+ return errInvalidBody
+ }
+ if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
return errInvalidBody
}
+ return nil
+ }
+
+ reconstruct := func(index int, result *fetchResult) {
result.Transactions = txLists[index]
result.Uncles = uncleLists[index]
- return nil
+ result.SetBodyDone()
}
- return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct)
+ return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
+ bodyReqTimer, len(txLists), validate, reconstruct)
}
// DeliverReceipts injects a receipt retrieval response into the results queue.
@@ -792,24 +798,29 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int,
q.lock.Lock()
defer q.lock.Unlock()
- reconstruct := func(header *types.Header, index int, result *fetchResult) error {
+ validate := func(index int, header *types.Header) error {
if types.DeriveSha(types.Receipts(receiptList[index]), new(trie.Trie)) != header.ReceiptHash {
return errInvalidReceipt
}
- result.Receipts = receiptList[index]
return nil
}
- return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct)
+ reconstruct := func(index int, result *fetchResult) {
+ result.Receipts = receiptList[index]
+ result.SetReceiptsDone()
+ }
+ return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
+ receiptReqTimer, len(receiptList), validate, reconstruct)
}
// deliver injects a data retrieval response into the results queue.
//
// Note, this method expects the queue lock to be already held for writing. The
-// reason the lock is not obtained in here is because the parameters already need
+// reason this lock is not obtained in here is because the parameters already need
// to access the queue, so they already need a lock anyway.
-func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
- pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer,
- results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) {
+func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
+ taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
+ results int, validate func(index int, header *types.Header) error,
+ reconstruct func(index int, result *fetchResult)) (int, error) {
// Short circuit if the data was never requested
request := pendPool[id]
@@ -829,53 +840,56 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ
var (
accepted int
failure error
- useful bool
+ i int
+ hashes []common.Hash
)
- for i, header := range request.Headers {
+ for _, header := range request.Headers {
// Short circuit assembly if no more fetch results are found
if i >= results {
break
}
- // Reconstruct the next result if contents match up
- index := int(header.Number.Int64() - int64(q.resultOffset))
- if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil {
- failure = errInvalidChain
- break
- }
- if err := reconstruct(header, i, q.resultCache[index]); err != nil {
+ // Validate the fields
+ if err := validate(i, header); err != nil {
failure = err
break
}
- hash := header.Hash()
-
- donePool[hash] = struct{}{}
- q.resultCache[index].Pending--
- useful = true
- accepted++
+ hashes = append(hashes, header.Hash())
+ i++
+ }
+ for _, header := range request.Headers[:i] {
+ if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
+ reconstruct(accepted, res)
+ } else {
+ // else: betweeen here and above, some other peer filled this result,
+ // or it was indeed a no-op. This should not happen, but if it does it's
+ // not something to panic about
+ log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
+ failure = errStaleDelivery
+ }
// Clean up a successful fetch
- request.Headers[i] = nil
- delete(taskPool, hash)
+ delete(taskPool, hashes[accepted])
+ accepted++
}
// Return all failed or missing fetches to the queue
- for _, header := range request.Headers {
- if header != nil {
- taskQueue.Push(header, -int64(header.Number.Uint64()))
- }
+ for _, header := range request.Headers[accepted:] {
+ taskQueue.Push(header, -int64(header.Number.Uint64()))
}
// Wake up Results
if accepted > 0 {
q.active.Signal()
}
+ if failure == nil {
+ return accepted, nil
+ }
// If none of the data was good, it's a stale delivery
- switch {
- case failure == nil || failure == errInvalidChain:
+ if errors.Is(failure, errInvalidChain) {
return accepted, failure
- case useful:
+ }
+ if accepted > 0 {
return accepted, fmt.Errorf("partial failure: %v", failure)
- default:
- return accepted, errStaleDelivery
}
+ return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
}
// Prepare configures the result cache to allow accepting and caching inbound
@@ -885,8 +899,6 @@ func (q *queue) Prepare(offset uint64, mode SyncMode) {
defer q.lock.Unlock()
// Prepare the queue for sync results
- if q.resultOffset < offset {
- q.resultOffset = offset
- }
+ q.resultCache.Prepare(offset)
q.mode = mode
}
diff --git a/xcb/downloader/queue_test.go b/xcb/downloader/queue_test.go
new file mode 100644
index 000000000..fc24abe63
--- /dev/null
+++ b/xcb/downloader/queue_test.go
@@ -0,0 +1,425 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+ "github.com/core-coin/go-core/common"
+ "github.com/core-coin/go-core/consensus/cryptore"
+ "github.com/core-coin/go-core/core"
+ "github.com/core-coin/go-core/core/rawdb"
+ "github.com/core-coin/go-core/core/types"
+ "github.com/core-coin/go-core/log"
+ "github.com/core-coin/go-core/params"
+ "math/big"
+ "math/rand"
+ "sync"
+ "testing"
+ "time"
+)
+
+var (
+ testdb = rawdb.NewMemoryDatabase()
+ genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
+)
+
+// makeChain creates a chain of n blocks starting at and including parent.
+// the returned hash chain is ordered head->parent. In addition, every 3rd block
+// contains a transaction and every 5th an uncle to allow testing correct block
+// reassembly.
+func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) {
+ blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, cryptore.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {
+ block.SetCoinbase(common.Address{seed})
+ // Add one tx to every secondblock
+ if !empty && i%2 == 0 {
+ signer := types.MakeSigner(big.NewInt(common.Devin))
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxEnergy, nil, nil), signer, testKey)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ }
+ })
+ return blocks, receipts
+}
+
+type chainData struct {
+ blocks []*types.Block
+ offset int
+}
+
+var chain *chainData
+var emptyChain *chainData
+
+func init() {
+ // Create a chain of blocks to import
+ targetBlocks := 128
+ blocks, _ := makeChain(targetBlocks, 0, genesis, false)
+ chain = &chainData{blocks, 0}
+
+ blocks, _ = makeChain(targetBlocks, 0, genesis, true)
+ emptyChain = &chainData{blocks, 0}
+}
+
+func (chain *chainData) headers() []*types.Header {
+ hdrs := make([]*types.Header, len(chain.blocks))
+ for i, b := range chain.blocks {
+ hdrs[i] = b.Header()
+ }
+ return hdrs
+}
+
+func (chain *chainData) Len() int {
+ return len(chain.blocks)
+}
+
+func dummyPeer(id string) *peerConnection {
+ p := &peerConnection{
+ id: id,
+ lacking: make(map[common.Hash]struct{}),
+ }
+ return p
+}
+
+func TestBasics(t *testing.T) {
+ q := newQueue(10)
+ if !q.Idle() {
+ t.Errorf("new queue should be idle")
+ }
+ q.Prepare(1, FastSync)
+ if res := q.Results(false); len(res) != 0 {
+ t.Fatal("new queue should have 0 results")
+ }
+
+ // Schedule a batch of headers
+ q.Schedule(chain.headers(), 1)
+ if q.Idle() {
+ t.Errorf("queue should not be idle")
+ }
+ if got, exp := q.PendingBlocks(), chain.Len(); got != exp {
+ t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
+ }
+ // Only non-empty receipts get added to task-queue
+ if got, exp := q.PendingReceipts(), 64; got != exp {
+ t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
+ }
+ // Items are now queued for downloading, next step is that we tell the
+ // queue that a certain peer will deliver them for us
+ {
+ peer := dummyPeer("peer-1")
+ fetchReq, _, throttle := q.ReserveBodies(peer, 50)
+ if !throttle {
+ // queue size is only 10, so throttling should occur
+ t.Fatal("should throttle")
+ }
+ // But we should still get the first things to fetch
+ if got, exp := len(fetchReq.Headers), 5; got != exp {
+ t.Fatalf("expected %d requests, got %d", exp, got)
+ }
+ if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
+ t.Fatalf("expected header %d, got %d", exp, got)
+ }
+ }
+ {
+ peer := dummyPeer("peer-2")
+ fetchReq, _, throttle := q.ReserveBodies(peer, 50)
+
+ // The second peer should hit throttling
+ if !throttle {
+ t.Fatalf("should not throttle")
+ }
+ // And not get any fetches at all, since it was throttled to begin with
+ if fetchReq != nil {
+ t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers))
+ }
+ }
+ //fmt.Printf("blockTaskQueue len: %d\n", q.blockTaskQueue.Size())
+ //fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size())
+ {
+ // The receipt delivering peer should not be affected
+ // by the throttling of body deliveries
+ peer := dummyPeer("peer-3")
+ fetchReq, _, throttle := q.ReserveReceipts(peer, 50)
+ if !throttle {
+ // queue size is only 10, so throttling should occur
+ t.Fatal("should throttle")
+ }
+ // But we should still get the first things to fetch
+ if got, exp := len(fetchReq.Headers), 5; got != exp {
+ t.Fatalf("expected %d requests, got %d", exp, got)
+ }
+ if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
+ t.Fatalf("expected header %d, got %d", exp, got)
+ }
+
+ }
+ //fmt.Printf("blockTaskQueue len: %d\n", q.blockTaskQueue.Size())
+ //fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size())
+ //fmt.Printf("processable: %d\n", q.resultCache.countCompleted())
+}
+
+func TestEmptyBlocks(t *testing.T) {
+ q := newQueue(10)
+
+ q.Prepare(1, FastSync)
+ // Schedule a batch of headers
+ q.Schedule(emptyChain.headers(), 1)
+ if q.Idle() {
+ t.Errorf("queue should not be idle")
+ }
+ if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp {
+ t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
+ }
+ if got, exp := q.PendingReceipts(), 0; got != exp {
+ t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
+ }
+ // They won't be processable, because the fetchresults haven't been
+ // created yet
+ if got, exp := q.resultCache.countCompleted(), 0; got != exp {
+ t.Errorf("wrong processable count, got %d, exp %d", got, exp)
+ }
+
+ // Items are now queued for downloading, next step is that we tell the
+ // queue that a certain peer will deliver them for us
+ // That should trigger all of them to suddenly become 'done'
+ {
+ // Reserve blocks
+ peer := dummyPeer("peer-1")
+ fetchReq, _, _ := q.ReserveBodies(peer, 50)
+
+ // there should be nothing to fetch, blocks are empty
+ if fetchReq != nil {
+ t.Fatal("there should be no body fetch tasks remaining")
+ }
+
+ }
+ if q.blockTaskQueue.Size() != len(emptyChain.blocks)-10 {
+ t.Errorf("expected block task queue to be 0, got %d", q.blockTaskQueue.Size())
+ }
+ if q.receiptTaskQueue.Size() != 0 {
+ t.Errorf("expected receipt task queue to be 0, got %d", q.receiptTaskQueue.Size())
+ }
+ //fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size())
+ {
+ peer := dummyPeer("peer-3")
+ fetchReq, _, _ := q.ReserveReceipts(peer, 50)
+
+ // there should be nothing to fetch, blocks are empty
+ if fetchReq != nil {
+ t.Fatal("there should be no body fetch tasks remaining")
+ }
+ }
+ if got, exp := q.resultCache.countCompleted(), 10; got != exp {
+ t.Errorf("wrong processable count, got %d, exp %d", got, exp)
+ }
+}
+
+// XTestDelivery does some more extensive testing of events that happen,
+// blocks that become known and peers that make reservations and deliveries.
+// disabled since it's not really a unit-test, but can be executed to test
+// some more advanced scenarios
+func XTestDelivery(t *testing.T) {
+ // the outside network, holding blocks
+ blo, rec := makeChain(128, 0, genesis, false)
+ world := newNetwork()
+ world.receipts = rec
+ world.chain = blo
+ world.progress(10)
+ if false {
+ log.Root().SetHandler(log.StdoutHandler)
+
+ }
+ q := newQueue(10)
+ var wg sync.WaitGroup
+ q.Prepare(1, FastSync)
+ wg.Add(1)
+ go func() {
+ // deliver headers
+ defer wg.Done()
+ c := 1
+ for {
+ //fmt.Printf("getting headers from %d\n", c)
+ hdrs := world.headers(c)
+ l := len(hdrs)
+ //fmt.Printf("scheduling %d headers, first %d last %d\n",
+ // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64())
+ q.Schedule(hdrs, uint64(c))
+ c += l
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ // collect results
+ defer wg.Done()
+ tot := 0
+ for {
+ res := q.Results(true)
+ tot += len(res)
+ fmt.Printf("got %d results, %d tot\n", len(res), tot)
+ // Now we can forget about these
+ world.forget(res[len(res)-1].Header.Number.Uint64())
+
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // reserve body fetch
+ i := 4
+ for {
+ peer := dummyPeer(fmt.Sprintf("peer-%d", i))
+ f, _, _ := q.ReserveBodies(peer, rand.Intn(30))
+ if f != nil {
+ var emptyList []*types.Header
+ var txs [][]*types.Transaction
+ var uncles [][]*types.Header
+ numToSkip := rand.Intn(len(f.Headers))
+ for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] {
+ txs = append(txs, world.getTransactions(hdr.Number.Uint64()))
+ uncles = append(uncles, emptyList)
+ }
+ time.Sleep(100 * time.Millisecond)
+ _, err := q.DeliverBodies(peer.id, txs, uncles)
+ if err != nil {
+ fmt.Printf("delivered %d bodies %v\n", len(txs), err)
+ }
+ } else {
+ i++
+ time.Sleep(200 * time.Millisecond)
+ }
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ // reserve receiptfetch
+ peer := dummyPeer("peer-3")
+ for {
+ f, _, _ := q.ReserveReceipts(peer, rand.Intn(50))
+ if f != nil {
+ var rcs [][]*types.Receipt
+ for _, hdr := range f.Headers {
+ rcs = append(rcs, world.getReceipts(hdr.Number.Uint64()))
+ }
+ _, err := q.DeliverReceipts(peer.id, rcs)
+ if err != nil {
+ fmt.Printf("delivered %d receipts %v\n", len(rcs), err)
+ }
+ time.Sleep(100 * time.Millisecond)
+ } else {
+ time.Sleep(200 * time.Millisecond)
+ }
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 50; i++ {
+ time.Sleep(300 * time.Millisecond)
+ //world.tick()
+ //fmt.Printf("trying to progress\n")
+ world.progress(rand.Intn(100))
+ }
+ for i := 0; i < 50; i++ {
+ time.Sleep(2990 * time.Millisecond)
+
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ time.Sleep(990 * time.Millisecond)
+ fmt.Printf("world block tip is %d\n",
+ world.chain[len(world.chain)-1].Header().Number.Uint64())
+ fmt.Println(q.Stats())
+ }
+ }()
+ wg.Wait()
+}
+
+func newNetwork() *network {
+ var l sync.RWMutex
+ return &network{
+ cond: sync.NewCond(&l),
+ offset: 1, // block 1 is at blocks[0]
+ }
+}
+
+// represents the network
+type network struct {
+ offset int
+ chain []*types.Block
+ receipts []types.Receipts
+ lock sync.RWMutex
+ cond *sync.Cond
+}
+
+func (n *network) getTransactions(blocknum uint64) types.Transactions {
+ index := blocknum - uint64(n.offset)
+ return n.chain[index].Transactions()
+}
+func (n *network) getReceipts(blocknum uint64) types.Receipts {
+ index := blocknum - uint64(n.offset)
+ if got := n.chain[index].Header().Number.Uint64(); got != blocknum {
+ fmt.Printf("Err, got %d exp %d\n", got, blocknum)
+ panic("sd")
+ }
+ return n.receipts[index]
+}
+
+func (n *network) forget(blocknum uint64) {
+ index := blocknum - uint64(n.offset)
+ n.chain = n.chain[index:]
+ n.receipts = n.receipts[index:]
+ n.offset = int(blocknum)
+
+}
+func (n *network) progress(numBlocks int) {
+
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ //fmt.Printf("progressing...\n")
+ newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false)
+ n.chain = append(n.chain, newBlocks...)
+ n.receipts = append(n.receipts, newR...)
+ n.cond.Broadcast()
+
+}
+
+func (n *network) headers(from int) []*types.Header {
+ numHeaders := 128
+ var hdrs []*types.Header
+ index := from - n.offset
+
+ for index >= len(n.chain) {
+ // wait for progress
+ n.cond.L.Lock()
+ //fmt.Printf("header going into wait\n")
+ n.cond.Wait()
+ index = from - n.offset
+ n.cond.L.Unlock()
+ }
+ n.lock.RLock()
+ defer n.lock.RUnlock()
+ for i, b := range n.chain[index:] {
+ hdrs = append(hdrs, b.Header())
+ if i >= numHeaders {
+ break
+ }
+ }
+ return hdrs
+}
diff --git a/xcb/downloader/resultstore.go b/xcb/downloader/resultstore.go
new file mode 100644
index 000000000..f507a7123
--- /dev/null
+++ b/xcb/downloader/resultstore.go
@@ -0,0 +1,193 @@
+// Copyright 2016 by the Authors
+// This file is part of the go-core library.
+//
+// The go-core library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-core library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-core library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+ "github.com/core-coin/go-core/core/types"
+ "sync"
+ "sync/atomic"
+)
+
+// resultStore implements a structure for maintaining fetchResults, tracking their
+// download-progress and delivering (finished) results.
+type resultStore struct {
+ items []*fetchResult // Downloaded but not yet delivered fetch results
+ resultOffset uint64 // Offset of the first cached fetch result in the block chain
+
+ // Internal index of first non-completed entry, updated atomically when needed.
+ // If all items are complete, this will equal length(items), so
+ // *important* : is not safe to use for indexing without checking against length
+ indexIncomplete int32 // atomic access
+
+ // throttleThreshold is the limit up to which we _want_ to fill the
+ // results. If blocks are large, we want to limit the results to less
+ // than the number of available slots, and maybe only fill 1024 out of
+ // 8192 possible places. The queue will, at certain times, recalibrate
+ // this index.
+ throttleThreshold uint64
+
+ lock sync.RWMutex
+}
+
+func newResultStore(size int) *resultStore {
+ return &resultStore{
+ resultOffset: 0,
+ items: make([]*fetchResult, size),
+ throttleThreshold: uint64(size),
+ }
+}
+
+// SetThrottleThreshold updates the throttling threshold based on the requested
+// limit and the total queue capacity. It returns the (possibly capped) threshold
+func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ limit := uint64(len(r.items))
+ if threshold >= limit {
+ threshold = limit
+ }
+ r.throttleThreshold = threshold
+ return r.throttleThreshold
+}
+
+// AddFetch adds a header for body/receipt fetching. This is used when the queue
+// wants to reserve headers for fetching.
+//
+// It returns the following:
+// stale - if true, this item is already passed, and should not be requested again
+// throttled - if true, the store is at capacity, this particular header is not prio now
+// item - the result to store data into
+// err - any error that occurred
+func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ var index int
+ item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64())
+ if err != nil || stale || throttled {
+ return stale, throttled, item, err
+ }
+ if item == nil {
+ item = newFetchResult(header, fastSync)
+ r.items[index] = item
+ }
+ return stale, throttled, item, err
+}
+
+// GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
+// is true, that means the header has already been delivered 'upstream'. This method
+// does not bubble up the 'throttle' flag, since it's moot at the point in time when
+// the item is downloaded and ready for delivery
+func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ res, _, stale, _, err := r.getFetchResult(headerNumber)
+ return res, stale, err
+}
+
+// getFetchResult returns the fetchResult corresponding to the given item, and
+// the index where the result is stored.
+func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) {
+ index = int(int64(headerNumber) - int64(r.resultOffset))
+ throttle = index >= int(r.throttleThreshold)
+ stale = index < 0
+
+ if index >= len(r.items) {
+ err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+
+ "(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain,
+ index, headerNumber, r.resultOffset, len(r.items))
+ return nil, index, stale, throttle, err
+ }
+ if stale {
+ return nil, index, stale, throttle, nil
+ }
+ item = r.items[index]
+ return item, index, stale, throttle, nil
+}
+
+// hasCompletedItems returns true if there are processable items available
+// this method is cheaper than countCompleted
+func (r *resultStore) HasCompletedItems() bool {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if len(r.items) == 0 {
+ return false
+ }
+ if item := r.items[0]; item != nil && item.AllDone() {
+ return true
+ }
+ return false
+}
+
+// countCompleted returns the number of items ready for delivery, stopping at
+// the first non-complete item.
+//
+// The mthod assumes (at least) rlock is held.
+func (r *resultStore) countCompleted() int {
+ // We iterate from the already known complete point, and see
+ // if any more has completed since last count
+ index := atomic.LoadInt32(&r.indexIncomplete)
+ for ; ; index++ {
+ if index >= int32(len(r.items)) {
+ break
+ }
+ result := r.items[index]
+ if result == nil || !result.AllDone() {
+ break
+ }
+ }
+ atomic.StoreInt32(&r.indexIncomplete, index)
+ return int(index)
+}
+
+// GetCompleted returns the next batch of completed fetchResults
+func (r *resultStore) GetCompleted(limit int) []*fetchResult {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ completed := r.countCompleted()
+ if limit > completed {
+ limit = completed
+ }
+ results := make([]*fetchResult, limit)
+ copy(results, r.items[:limit])
+
+ // Delete the results from the cache and clear the tail.
+ copy(r.items, r.items[limit:])
+ for i := len(r.items) - limit; i < len(r.items); i++ {
+ r.items[i] = nil
+ }
+ // Advance the expected block number of the first cache entry
+ r.resultOffset += uint64(limit)
+ atomic.AddInt32(&r.indexIncomplete, int32(-limit))
+
+ return results
+}
+
+// Prepare initialises the offset with the given block number
+func (r *resultStore) Prepare(offset uint64) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if r.resultOffset < offset {
+ r.resultOffset = offset
+ }
+}
diff --git a/xcb/downloader/statesync.go b/xcb/downloader/statesync.go
index 51cea55c0..f85740f5e 100644
--- a/xcb/downloader/statesync.go
+++ b/xcb/downloader/statesync.go
@@ -34,7 +34,7 @@ import (
// stateReq represents a batch of state fetch requests grouped together into
// a single data retrieval network packet.
type stateReq struct {
- items []common.Hash // Hashes of the state items to download
+ nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient)
tasks map[common.Hash]*stateTask // Download tasks to track previous attempts
timeout time.Duration // Maximum round trip time for this to complete
timer *time.Timer // Timer to fire when the RTT timeout expires
@@ -235,16 +235,16 @@ func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*
if req == nil {
continue
}
- req.peer.log.Trace("State peer marked idle (spindown)", "req.items", len(req.items), "reason", reason)
+ req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason)
req.timer.Stop()
delete(active, req.peer.id)
- req.peer.SetNodeDataIdle(len(req.items))
+ req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
}
// The 'finished' set contains deliveries that we were going to pass to processing.
// Those are now moot, but we still need to set those peers as idle, which would
// otherwise have been done after processing
for _, req := range finished {
- req.peer.SetNodeDataIdle(len(req.items))
+ req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
}
}
@@ -350,9 +350,10 @@ func (s *stateSync) loop() (err error) {
return errCanceled
case req := <-s.deliver:
+ deliveryTime := time.Now()
// Response, disconnect or timeout triggered, drop the peer if stalling
log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut())
- if len(req.items) <= 2 && !req.dropped && req.timedOut() {
+ if req.nItems <= 2 && !req.dropped && req.timedOut() {
// 2 items are the minimum requested, if even that times out, we've no use of
// this peer at the moment.
log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id)
@@ -376,7 +377,7 @@ func (s *stateSync) loop() (err error) {
}
// Process all the received blobs and check for stale delivery
delivered, err := s.process(req)
- req.peer.SetNodeDataIdle(delivered)
+ req.peer.SetNodeDataIdle(delivered, deliveryTime)
if err != nil {
log.Warn("Node data write error", "err", err)
return err
@@ -413,14 +414,14 @@ func (s *stateSync) assignTasks() {
// Assign a batch of fetches proportional to the estimated latency/bandwidth
cap := p.NodeDataCapacity(s.d.requestRTT())
req := &stateReq{peer: p, timeout: s.d.requestTTL()}
- s.fillTasks(cap, req)
+ items := s.fillTasks(cap, req)
// If the peer was assigned tasks to fetch, send the network request
- if len(req.items) > 0 {
- req.peer.log.Trace("Requesting new batch of data", "type", "state", "count", len(req.items), "root", s.root)
+ if len(items) > 0 {
+ req.peer.log.Trace("Requesting new batch of data", "type", "state", "count", len(items), "root", s.root)
select {
case s.d.trackStateReq <- req:
- req.peer.FetchNodeData(req.items)
+ req.peer.FetchNodeData(items)
case <-s.cancel:
case <-s.d.cancelCh:
}
@@ -430,7 +431,7 @@ func (s *stateSync) assignTasks() {
// fillTasks fills the given request object with a maximum of n state download
// tasks to send to the remote peer.
-func (s *stateSync) fillTasks(n int, req *stateReq) {
+func (s *stateSync) fillTasks(n int, req *stateReq) []common.Hash {
// Refill available tasks from the scheduler.
if len(s.tasks) < n {
new := s.sched.Missing(n - len(s.tasks))
@@ -439,11 +440,11 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
}
}
// Find tasks that haven't been tried with the request's peer.
- req.items = make([]common.Hash, 0, n)
+ items := make([]common.Hash, 0, n)
req.tasks = make(map[common.Hash]*stateTask, n)
for hash, t := range s.tasks {
// Stop when we've gathered enough requests
- if len(req.items) == n {
+ if len(items) == n {
break
}
// Skip any requests we've already tried from this peer
@@ -452,10 +453,12 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
}
// Assign the request to this peer
t.attempts[req.peer.id] = struct{}{}
- req.items = append(req.items, hash)
+ items = append(items, hash)
req.tasks[hash] = t
delete(s.tasks, hash)
}
+ req.nItems = uint16(len(items))
+ return items
}
// process iterates over a batch of delivered state data, injecting each item
diff --git a/xcb/energyprice/energyprice.go b/xcb/energyprice/energyprice.go
index 456a3f5c5..6f1dda43f 100644
--- a/xcb/energyprice/energyprice.go
+++ b/xcb/energyprice/energyprice.go
@@ -98,7 +98,7 @@ func (gpo *Oracle) SuggestPrice(ctx context.Context) (*big.Int, error) {
head, _ := gpo.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
headHash := head.Hash()
- // If the latest gasprice is still available, return it.
+ // If the latest energyprice is still available, return it.
gpo.cacheLock.RLock()
lastHead, lastPrice := gpo.lastHead, gpo.lastPrice
gpo.cacheLock.RUnlock()
@@ -185,7 +185,7 @@ func (t transactionsByEnergyPrice) Less(i, j int) bool {
// getBlockPrices calculates the lowest transaction energy price in a given block
// and sends it to the result channel. If the block is empty or all transactions
// are sent by the miner itself(it doesn't make any sense to include this kind of
-// transaction prices for sampling), nil gasprice is returned.
+// transaction prices for sampling), nil energyprice is returned.
func (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, blockNum uint64, limit int, result chan getBlockPricesResult, quit chan struct{}) {
block, err := gpo.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum))
if block == nil {
diff --git a/xcb/energyprice/energyprice_test.go b/xcb/energyprice/energyprice_test.go
index d37e751dc..2fc6cc6eb 100644
--- a/xcb/energyprice/energyprice_test.go
+++ b/xcb/energyprice/energyprice_test.go
@@ -232,7 +232,7 @@ func newTestBackend(t *testing.T) *testBackend {
// Construct testing chain
diskdb := rawdb.NewMemoryDatabase()
gspec.Commit(diskdb)
- chain, err := core.NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := core.NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create local chain, %v", err)
}
diff --git a/xcb/fetcher/block_fetcher.go b/xcb/fetcher/block_fetcher.go
index 1d7ba79d3..d71935591 100644
--- a/xcb/fetcher/block_fetcher.go
+++ b/xcb/fetcher/block_fetcher.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-core library. If not, see .
-// Package fetcher contains the announcement based blocks or transaction synchronisation.
+// Package fetcher contains the announcement based header, blocks or transaction synchronisation.
package fetcher
import (
@@ -32,6 +32,7 @@ import (
)
const (
+ lightTimeout = time.Millisecond // Time allowance before an announced header is explicitly requested
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
@@ -40,7 +41,7 @@ const (
const (
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
- hashLimit = 256 // Maximum number of unique blocks a peer may have announced
+ hashLimit = 256 // Maximum number of unique blocks or headers a peer may have announced
blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
)
@@ -64,9 +65,10 @@ var (
bodyFilterOutMeter = metrics.NewRegisteredMeter("xcb/fetcher/block/filter/bodies/out", nil)
)
-var (
- errTerminated = errors.New("terminated")
-)
+var errTerminated = errors.New("terminated")
+
+// HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
+type HeaderRetrievalFn func(common.Hash) *types.Header
// blockRetrievalFn is a callback type for retrieving a block from the local chain.
type blockRetrievalFn func(common.Hash) *types.Block
@@ -86,6 +88,9 @@ type blockBroadcasterFn func(block *types.Block, propagate bool)
// chainHeightFn is a callback type to retrieve the current chain height.
type chainHeightFn func() uint64
+// headersInsertFn is a callback type to insert a batch of headers into the local chain.
+type headersInsertFn func(headers []*types.Header) (int, error)
+
// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
type chainInsertFn func(types.Blocks) (int, error)
@@ -122,18 +127,38 @@ type bodyFilterTask struct {
time time.Time // Arrival time of the blocks' contents
}
-// blockInject represents a schedules import operation.
-type blockInject struct {
+// blockOrHeaderInject represents a schedules import operation.
+type blockOrHeaderInject struct {
origin string
- block *types.Block
+
+ header *types.Header // Used for light mode fetcher which only cares about header.
+ block *types.Block // Used for normal mode fetcher which imports full block.
+}
+
+// number returns the block number of the injected object.
+func (inject *blockOrHeaderInject) number() uint64 {
+ if inject.header != nil {
+ return inject.header.Number.Uint64()
+ }
+ return inject.block.NumberU64()
+}
+
+// number returns the block hash of the injected object.
+func (inject *blockOrHeaderInject) hash() common.Hash {
+ if inject.header != nil {
+ return inject.header.Hash()
+ }
+ return inject.block.Hash()
}
// BlockFetcher is responsible for accumulating block announcements from various peers
// and scheduling them for retrieval.
type BlockFetcher struct {
+ light bool // The indicator whether it's a light fetcher or normal one.
+
// Various event channels
notify chan *blockAnnounce
- inject chan *blockInject
+ inject chan *blockOrHeaderInject
headerFilter chan chan *headerFilterTask
bodyFilter chan chan *bodyFilterTask
@@ -149,31 +174,34 @@ type BlockFetcher struct {
completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
// Block cache
- queue *prque.Prque // Queue containing the import operations (block number sorted)
- queues map[string]int // Per peer block counts to prevent memory exhaustion
- queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports)
+ queue *prque.Prque // Queue containing the import operations (block number sorted)
+ queues map[string]int // Per peer block counts to prevent memory exhaustion
+ queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
// Callbacks
+ getHeader HeaderRetrievalFn // Retrieves a header from the local chain
getBlock blockRetrievalFn // Retrieves a block from the local chain
verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
chainHeight chainHeightFn // Retrieves the current chain's height
+ insertHeaders headersInsertFn // Injects a batch of headers into the chain
insertChain chainInsertFn // Injects a batch of blocks into the chain
dropPeer peerDropFn // Drops a peer for misbehaving
// Testing hooks
- announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
- queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
- fetchingHook func([]common.Hash) // Method to call upon starting a block (xcb/61) or header (xcb/62) fetch
- completingHook func([]common.Hash) // Method to call upon starting a block body fetch (xcb/62)
- importedHook func(*types.Block) // Method to call upon successful block import (both xcb/61 and xcb/62)
+ announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
+ queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
+ fetchingHook func([]common.Hash) // Method to call upon starting a block (xcb/61) or header (xcb/62) fetch
+ completingHook func([]common.Hash) // Method to call upon starting a block body fetch (xcb/62)
+ importedHook func(*types.Header, *types.Block) // Method to call upon successful header or block import (both xcb/61 and xcb/62)
}
// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
-func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
+func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
return &BlockFetcher{
+ light: light,
notify: make(chan *blockAnnounce),
- inject: make(chan *blockInject),
+ inject: make(chan *blockOrHeaderInject),
headerFilter: make(chan chan *headerFilterTask),
bodyFilter: make(chan chan *bodyFilterTask),
done: make(chan common.Hash),
@@ -185,11 +213,13 @@ func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, b
completing: make(map[common.Hash]*blockAnnounce),
queue: prque.New(nil),
queues: make(map[string]int),
- queued: make(map[common.Hash]*blockInject),
+ queued: make(map[common.Hash]*blockOrHeaderInject),
+ getHeader: getHeader,
getBlock: getBlock,
verifyHeader: verifyHeader,
broadcastBlock: broadcastBlock,
chainHeight: chainHeight,
+ insertHeaders: insertHeaders,
insertChain: insertChain,
dropPeer: dropPeer,
}
@@ -229,7 +259,7 @@ func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time
// Enqueue tries to fill gaps the fetcher's future import queue.
func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
- op := &blockInject{
+ op := &blockOrHeaderInject{
origin: peer,
block: block,
}
@@ -316,13 +346,13 @@ func (f *BlockFetcher) loop() {
// Import any queued blocks that could potentially fit
height := f.chainHeight()
for !f.queue.Empty() {
- op := f.queue.PopItem().(*blockInject)
- hash := op.block.Hash()
+ op := f.queue.PopItem().(*blockOrHeaderInject)
+ hash := op.hash()
if f.queueChangeHook != nil {
f.queueChangeHook(hash, false)
}
// If too high up the chain or phase, continue later
- number := op.block.NumberU64()
+ number := op.number()
if number > height+1 {
f.queue.Push(op, -int64(number))
if f.queueChangeHook != nil {
@@ -331,11 +361,15 @@ func (f *BlockFetcher) loop() {
break
}
// Otherwise if fresh and still unknown, try and import
- if number+maxUncleDist < height || f.getBlock(hash) != nil {
+ if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {
f.forgetBlock(hash)
continue
}
- f.insert(op.origin, op.block)
+ if f.light {
+ f.importHeaders(op.origin, op.header)
+ } else {
+ f.importBlocks(op.origin, op.block)
+ }
}
// Wait for an outside event to occur
select {
@@ -380,7 +414,13 @@ func (f *BlockFetcher) loop() {
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
blockBroadcastInMeter.Mark(1)
- f.enqueue(op.origin, op.block)
+
+ // Now only direct block injection is allowed, drop the header injection
+ // here silently if we receive.
+ if f.light {
+ continue
+ }
+ f.enqueue(op.origin, nil, op.block)
case hash := <-f.done:
// A pending import finished, remove all traces of the notification
@@ -392,13 +432,19 @@ func (f *BlockFetcher) loop() {
request := make(map[string][]common.Hash)
for hash, announces := range f.announced {
- if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
+ // In current LES protocol(les2/les3), only header announce is
+ // available, no need to wait too much time for header broadcast.
+ timeout := arriveTimeout - gatherSlack
+ if f.light {
+ timeout = 0
+ }
+ if time.Since(announces[0].time) > timeout {
// Pick a random peer to retrieve from, reset all others
announce := announces[rand.Intn(len(announces))]
f.forgetHash(hash)
// If the block still didn't arrive, queue for fetching
- if f.getBlock(hash) == nil {
+ if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) {
request[announce.origin] = append(request[announce.origin], hash)
f.fetching[hash] = announce
}
@@ -466,7 +512,7 @@ func (f *BlockFetcher) loop() {
// Split the batch of headers into unknown ones (to return to the caller),
// known incomplete ones (requiring body retrievals) and completed blocks.
- unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}
+ unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
for _, header := range task.headers {
hash := header.Hash()
@@ -479,6 +525,16 @@ func (f *BlockFetcher) loop() {
f.forgetHash(hash)
continue
}
+ // Collect all headers only if we are running in light
+ // mode and the headers are not imported by other means.
+ if f.light {
+ if f.getHeader(hash) == nil {
+ announce.header = header
+ lightHeaders = append(lightHeaders, announce)
+ }
+ f.forgetHash(hash)
+ continue
+ }
// Only keep if not imported by other means
if f.getBlock(hash) == nil {
announce.header = header
@@ -523,10 +579,15 @@ func (f *BlockFetcher) loop() {
f.rescheduleComplete(completeTimer)
}
}
+ // Schedule the header for light fetcher import
+ for _, announce := range lightHeaders {
+ f.enqueue(announce.origin, announce.header, nil)
+ }
+
// Schedule the header-only blocks for import
for _, block := range complete {
if announce := f.completing[block.Hash()]; announce != nil {
- f.enqueue(announce.origin, block)
+ f.enqueue(announce.origin, nil, block)
}
}
@@ -541,38 +602,50 @@ func (f *BlockFetcher) loop() {
bodyFilterInMeter.Mark(int64(len(task.transactions)))
blocks := []*types.Block{}
- for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
- // Match up a body to any possible completion request
- matched := false
-
- for hash, announce := range f.completing {
- if f.queued[hash] == nil {
- txnHash := types.DeriveSha(types.Transactions(task.transactions[i]), new(trie.Trie))
- uncleHash := types.CalcUncleHash(task.uncles[i])
-
- if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
- // Mark the body matched, reassemble if still unknown
- matched = true
-
- if f.getBlock(hash) == nil {
- block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
- block.ReceivedAt = task.time
-
- blocks = append(blocks, block)
- } else {
- f.forgetHash(hash)
- }
+ // abort early if there's nothing explicitly requested
+ if len(f.completing) > 0 {
+ for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
+ // Match up a body to any possible completion request
+ var (
+ matched = false
+ uncleHash common.Hash // calculated lazily and reused
+ txnHash common.Hash // calculated lazily and reused
+ )
+ for hash, announce := range f.completing {
+ if f.queued[hash] != nil || announce.origin != task.peer {
+ continue
+ }
+ if uncleHash == (common.Hash{}) {
+ uncleHash = types.CalcUncleHash(task.uncles[i])
+ }
+ if uncleHash != announce.header.UncleHash {
+ continue
}
+ if txnHash == (common.Hash{}) {
+ txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), new(trie.Trie))
+ }
+ if txnHash != announce.header.TxHash {
+ continue
+ }
+ // Mark the body matched, reassemble if still unknown
+ matched = true
+ if f.getBlock(hash) == nil {
+ block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
+ block.ReceivedAt = task.time
+ blocks = append(blocks, block)
+ } else {
+ f.forgetHash(hash)
+ }
+
+ }
+ if matched {
+ task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
+ task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
+ i--
+ continue
}
- }
- if matched {
- task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
- task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
- i--
- continue
}
}
-
bodyFilterOutMeter.Mark(int64(len(task.transactions)))
select {
case filter <- task:
@@ -582,7 +655,7 @@ func (f *BlockFetcher) loop() {
// Schedule the retrieved blocks for ordered import
for _, block := range blocks {
if announce := f.completing[block.Hash()]; announce != nil {
- f.enqueue(announce.origin, block)
+ f.enqueue(announce.origin, nil, block)
}
}
}
@@ -595,6 +668,12 @@ func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
if len(f.announced) == 0 {
return
}
+ // Schedule announcement retrieval quickly for light mode
+ // since server won't send any headers to client.
+ if f.light {
+ fetch.Reset(lightTimeout)
+ return
+ }
// Otherwise find the earliest expiring announcement
earliest := time.Now()
for _, announces := range f.announced {
@@ -611,6 +690,7 @@ func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
if len(f.fetched) == 0 {
return
}
+
// Otherwise find the earliest expiring announcement
earliest := time.Now()
for _, announces := range f.fetched {
@@ -621,46 +701,89 @@ func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
complete.Reset(gatherSlack - time.Since(earliest))
}
-// enqueue schedules a new future import operation, if the block to be imported
-// has not yet been seen.
-func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
- hash := block.Hash()
+// enqueue schedules a new header or block import operation, if the component
+// to be imported has not yet been seen.
+func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {
+ var (
+ hash common.Hash
+ number uint64
+ )
+ if header != nil {
+ hash, number = header.Hash(), header.Number.Uint64()
+ } else {
+ hash, number = block.Hash(), block.NumberU64()
+ }
// Ensure the peer isn't DOSing us
count := f.queues[peer] + 1
if count > blockLimit {
- log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
+ log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
blockBroadcastDOSMeter.Mark(1)
f.forgetHash(hash)
return
}
// Discard any past or too distant blocks
- if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
- log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
+ if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
+ log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
blockBroadcastDropMeter.Mark(1)
f.forgetHash(hash)
return
}
// Schedule the block for future importing
if _, ok := f.queued[hash]; !ok {
- op := &blockInject{
- origin: peer,
- block: block,
+ op := &blockOrHeaderInject{origin: peer}
+ if header != nil {
+ op.header = header
+ } else {
+ op.block = block
}
f.queues[peer] = count
f.queued[hash] = op
- f.queue.Push(op, -int64(block.NumberU64()))
+ f.queue.Push(op, -int64(number))
if f.queueChangeHook != nil {
- f.queueChangeHook(op.block.Hash(), true)
+ f.queueChangeHook(hash, true)
}
- log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size())
+ log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
}
}
-// insert spawns a new goroutine to run a block insertion into the chain. If the
+// importHeaders spawns a new goroutine to run a header insertion into the chain.
+// If the header's number is at the same height as the current import phase, it
+// updates the phase states accordingly.
+func (f *BlockFetcher) importHeaders(peer string, header *types.Header) {
+ hash := header.Hash()
+ log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash)
+
+ go func() {
+ defer func() { f.done <- hash }()
+ // If the parent's unknown, abort insertion
+ parent := f.getHeader(header.ParentHash)
+ if parent == nil {
+ log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash)
+ return
+ }
+ // Validate the header and if something went wrong, drop the peer
+ if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock {
+ log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
+ f.dropPeer(peer)
+ return
+ }
+ // Run the actual import and log any issues
+ if _, err := f.insertHeaders([]*types.Header{header}); err != nil {
+ log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
+ return
+ }
+ // Invoke the testing hook if needed
+ if f.importedHook != nil {
+ f.importedHook(header, nil)
+ }
+ }()
+}
+
+// importBlocks spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, it updates
// the phase states accordingly.
-func (f *BlockFetcher) insert(peer string, block *types.Block) {
+func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
hash := block.Hash()
// Run the import on a new thread
@@ -701,7 +824,7 @@ func (f *BlockFetcher) insert(peer string, block *types.Block) {
// Invoke the testing hook if needed
if f.importedHook != nil {
- f.importedHook(block)
+ f.importedHook(nil, block)
}
}()
}
diff --git a/xcb/fetcher/block_fetcher_test.go b/xcb/fetcher/block_fetcher_test.go
index 6e3f4df7c..c4e315f16 100644
--- a/xcb/fetcher/block_fetcher_test.go
+++ b/xcb/fetcher/block_fetcher_test.go
@@ -81,26 +81,36 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
type fetcherTester struct {
fetcher *BlockFetcher
- hashes []common.Hash // Hash chain belonging to the tester
- blocks map[common.Hash]*types.Block // Blocks belonging to the tester
- drops map[string]bool // Map of peers dropped by the fetcher
+ hashes []common.Hash // Hash chain belonging to the tester
+ headers map[common.Hash]*types.Header // Headers belonging to the tester
+ blocks map[common.Hash]*types.Block // Blocks belonging to the tester
+ drops map[string]bool // Map of peers dropped by the fetcher
lock sync.RWMutex
}
// newTester creates a new fetcher test mocker.
-func newTester() *fetcherTester {
+func newTester(light bool) *fetcherTester {
tester := &fetcherTester{
- hashes: []common.Hash{genesis.Hash()},
- blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
- drops: make(map[string]bool),
+ hashes: []common.Hash{genesis.Hash()},
+ headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
+ blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
+ drops: make(map[string]bool),
}
- tester.fetcher = NewBlockFetcher(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
+ tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer)
tester.fetcher.Start()
return tester
}
+// getHeader retrieves a header from the tester's block chain.
+func (f *fetcherTester) getHeader(hash common.Hash) *types.Header {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.headers[hash]
+}
+
// getBlock retrieves a block from the tester's block chain.
func (f *fetcherTester) getBlock(hash common.Hash) *types.Block {
f.lock.RLock()
@@ -123,9 +133,33 @@ func (f *fetcherTester) chainHeight() uint64 {
f.lock.RLock()
defer f.lock.RUnlock()
+ if f.fetcher.light {
+ return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64()
+ }
return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64()
}
+// insertChain injects a new headers into the simulated chain.
+func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ for i, header := range headers {
+ // Make sure the parent in known
+ if _, ok := f.headers[header.ParentHash]; !ok {
+ return i, errors.New("unknown parent")
+ }
+ // Discard any new blocks if the same height already exists
+ if header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() {
+ return i, nil
+ }
+ // Otherwise build our current chain
+ f.hashes = append(f.hashes, header.Hash())
+ f.headers[header.Hash()] = header
+ }
+ return 0, nil
+}
+
// insertChain injects a new blocks into the simulated chain.
func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
f.lock.Lock()
@@ -236,7 +270,7 @@ func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive b
}
// verifyImportEvent verifies that one single event arrive on an import channel.
-func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) {
+func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
if arrive {
select {
case <-imported:
@@ -254,7 +288,7 @@ func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) {
// verifyImportCount verifies that exactly count number of events arrive on an
// import hook channel.
-func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
+func verifyImportCount(t *testing.T, imported chan interface{}, count int) {
for i := 0; i < count; i++ {
select {
case <-imported:
@@ -266,7 +300,7 @@ func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
}
// verifyImportDone verifies that no more events are arriving on an import channel.
-func verifyImportDone(t *testing.T, imported chan *types.Block) {
+func verifyImportDone(t *testing.T, imported chan interface{}) {
select {
case <-imported:
t.Fatalf("extra block imported")
@@ -274,45 +308,63 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) {
}
}
-// Tests that a fetcher accepts block announcements and initiates retrievals for
-// them, successfully importing into the local chain.
-func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
-func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
-func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
+// verifyChainHeight verifies the chain height is as expected.
+func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) {
+ if fetcher.chainHeight() != height {
+ t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height)
+ }
+}
+
+// Tests that a fetcher accepts block/header announcements and initiates retrievals
+// for them, successfully importing into the local chain.
+func TestFullSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, false) }
+func TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) }
-func testSequentialAnnouncements(t *testing.T, protocol int) {
+func testSequentialAnnouncements(t *testing.T, light bool) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
- tester := newTester()
+ tester := newTester(light)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks until all are imported
- imported := make(chan *types.Block)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
verifyImportEvent(t, imported, true)
}
verifyImportDone(t, imported)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
}
// Tests that if blocks are announced by multiple peers (or even the same buggy
// peer), they will only get downloaded at most once.
-func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
-func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
-func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
+func TestFullConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, false) }
+func TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) }
-func testConcurrentAnnouncements(t *testing.T, protocol int) {
+func testConcurrentAnnouncements(t *testing.T, light bool) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
// Assemble a tester with a built in counter for the requests
- tester := newTester()
+ tester := newTester(light)
firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack)
firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0)
secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack)
@@ -328,8 +380,20 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
return secondHeaderFetcher(hash)
}
// Iteratively announce blocks until all are imported
- imported := make(chan *types.Block)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher)
@@ -343,30 +407,42 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
if int(counter) != targetBlocks {
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks)
}
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
}
// Tests that announcements arriving while a previous is being fetched still
// results in a valid import.
-func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
-func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
-func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
+func TestFullOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, false) }
+func TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) }
-func testOverlappingAnnouncements(t *testing.T, protocol int) {
+func testOverlappingAnnouncements(t *testing.T, light bool) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
- tester := newTester()
+ tester := newTester(light)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks, but overlap them continuously
overlap := 16
- imported := make(chan *types.Block, len(hashes)-1)
+ imported := make(chan interface{}, len(hashes)-1)
for i := 0; i < overlap; i++ {
imported <- nil
}
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
@@ -378,19 +454,19 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) {
}
// Wait for all the imports to complete and check count
verifyImportCount(t, imported, overlap)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
}
// Tests that announces already being retrieved will not be duplicated.
-func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
-func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
-func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
+func TestFullPendingDeduplication(t *testing.T) { testPendingDeduplication(t, false) }
+func TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) }
-func testPendingDeduplication(t *testing.T, protocol int) {
+func testPendingDeduplication(t *testing.T, light bool) {
// Create a hash and corresponding block
hashes, blocks := makeChain(1, 0, genesis)
// Assemble a tester with a built in counter and delayed fetcher
- tester := newTester()
+ tester := newTester(light)
headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0)
@@ -406,41 +482,58 @@ func testPendingDeduplication(t *testing.T, protocol int) {
}()
return nil
}
+ checkNonExist := func() bool {
+ return tester.getBlock(hashes[0]) == nil
+ }
+ if light {
+ checkNonExist = func() bool {
+ return tester.getHeader(hashes[0]) == nil
+ }
+ }
// Announce the same block many times until it's fetched (wait for any pending ops)
- for tester.getBlock(hashes[0]) == nil {
+ for checkNonExist() {
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
time.Sleep(time.Millisecond)
}
time.Sleep(delay)
// Check that all blocks were imported and none fetched twice
- if imported := len(tester.blocks); imported != 2 {
- t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 2)
- }
if int(counter) != 1 {
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1)
}
+ verifyChainHeight(t, tester, 1)
}
// Tests that announcements retrieved in a random order are cached and eventually
// imported when all the gaps are filled in.
-func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
-func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
-func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
+func TestFullRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, false) }
+func TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) }
-func testRandomArrivalImport(t *testing.T, protocol int) {
+func testRandomArrivalImport(t *testing.T, light bool) {
// Create a chain of blocks to import, and choose one to delay
targetBlocks := maxQueueDist
hashes, blocks := makeChain(targetBlocks, 0, genesis)
skip := targetBlocks / 2
- tester := newTester()
+ tester := newTester(light)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks, skipping one entry
- imported := make(chan *types.Block, len(hashes)-1)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported := make(chan interface{}, len(hashes)-1)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
for i := len(hashes) - 1; i >= 0; i-- {
if i != skip {
@@ -451,27 +544,24 @@ func testRandomArrivalImport(t *testing.T, protocol int) {
// Finally announce the skipped entry and check full import
tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
verifyImportCount(t, imported, len(hashes)-1)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
}
// Tests that direct block enqueues (due to block propagation vs. hash announce)
// are correctly schedule, filling and import queue gaps.
-func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
-func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
-func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
-
-func testQueueGapFill(t *testing.T, protocol int) {
+func TestQueueGapFill(t *testing.T) {
// Create a chain of blocks to import, and choose one to not announce at all
targetBlocks := maxQueueDist
hashes, blocks := makeChain(targetBlocks, 0, genesis)
skip := targetBlocks / 2
- tester := newTester()
+ tester := newTester(false)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
// Iteratively announce blocks, skipping one entry
- imported := make(chan *types.Block, len(hashes)-1)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported := make(chan interface{}, len(hashes)-1)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
for i := len(hashes) - 1; i >= 0; i-- {
if i != skip {
@@ -482,20 +572,17 @@ func testQueueGapFill(t *testing.T, protocol int) {
// Fill the missing block directly as if propagated
tester.fetcher.Enqueue("valid", blocks[hashes[skip]])
verifyImportCount(t, imported, len(hashes)-1)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
}
// Tests that blocks arriving from various sources (multiple propagations, hash
// announces, etc) do not get scheduled for import multiple times.
-func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
-func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
-func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
-
-func testImportDeduplication(t *testing.T, protocol int) {
+func TestImportDeduplication(t *testing.T) {
// Create two blocks to import (one for duplication, the other for stalling)
hashes, blocks := makeChain(2, 0, genesis)
// Create the tester and wrap the importer with a counter
- tester := newTester()
+ tester := newTester(false)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -506,9 +593,9 @@ func testImportDeduplication(t *testing.T, protocol int) {
}
// Instrument the fetching and imported events
fetching := make(chan []common.Hash)
- imported := make(chan *types.Block, len(hashes)-1)
+ imported := make(chan interface{}, len(hashes)-1)
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
// Announce the duplicating block, wait for retrieval, and also propagate directly
tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
@@ -537,7 +624,7 @@ func TestDistantPropagationDiscarding(t *testing.T) {
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
// Create a tester and simulate a head block being the middle of the above chain
- tester := newTester()
+ tester := newTester(false)
tester.lock.Lock()
tester.hashes = []common.Hash{head}
@@ -561,11 +648,10 @@ func TestDistantPropagationDiscarding(t *testing.T) {
// Tests that announcements with numbers much lower or higher than out current
// head get discarded to prevent wasting resources on useless blocks from faulty
// peers.
-func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) }
-func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) }
-func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) }
+func TestFullDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, false) }
+func TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) }
-func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
+func testDistantAnnouncementDiscarding(t *testing.T, light bool) {
// Create a long chain to import and define the discard boundaries
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
head := hashes[len(hashes)/2]
@@ -573,10 +659,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
// Create a tester and simulate a head block being the middle of the above chain
- tester := newTester()
+ tester := newTester(light)
tester.lock.Lock()
tester.hashes = []common.Hash{head}
+ tester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()}
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
tester.lock.Unlock()
@@ -604,20 +691,31 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
// the headers provided afterwards) get dropped as malicious.
-func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) }
-func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) }
-func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) }
+func TestFullInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, false) }
+func TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) }
-func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
+func testInvalidNumberAnnouncement(t *testing.T, light bool) {
// Create a single block to import and check numbers against
hashes, blocks := makeChain(1, 0, genesis)
- tester := newTester()
+ tester := newTester(light)
badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack)
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
- imported := make(chan *types.Block)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
// Announce a block with a bad number, check for immediate drop
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher)
@@ -649,15 +747,11 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
// Tests that if a block is empty (i.e. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) }
-func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) }
-func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) }
-
-func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
+func TestEmptyBlockShortCircuit(t *testing.T) {
// Create a chain of blocks to import
hashes, blocks := makeChain(32, 0, genesis)
- tester := newTester()
+ tester := newTester(false)
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -668,8 +762,13 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
completing := make(chan []common.Hash)
tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes }
- imported := make(chan *types.Block)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
// Iteratively announce blocks until all are imported
for i := len(hashes) - 2; i >= 0; i-- {
@@ -690,16 +789,12 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
// Tests that a peer is unable to use unbounded memory with sending infinite
// block announcements to a node, but that even in the face of such an attack,
// the fetcher remains operational.
-func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
-func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
-func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
-
-func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
+func TestHashMemoryExhaustionAttack(t *testing.T) {
// Create a tester with instrumented import hooks
- tester := newTester()
+ tester := newTester(false)
- imported, announces := make(chan *types.Block), int32(0)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported, announces := make(chan interface{}), int32(0)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) {
if added {
atomic.AddInt32(&announces, 1)
@@ -743,10 +838,10 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
// system memory.
func TestBlockMemoryExhaustionAttack(t *testing.T) {
// Create a tester with instrumented import hooks
- tester := newTester()
+ tester := newTester(false)
- imported, enqueued := make(chan *types.Block), int32(0)
- tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+ imported, enqueued := make(chan interface{}), int32(0)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) {
if added {
atomic.AddInt32(&enqueued, 1)
diff --git a/xcb/fetcher/tx_fetcher.go b/xcb/fetcher/tx_fetcher.go
index 7339f338a..aff266d7e 100644
--- a/xcb/fetcher/tx_fetcher.go
+++ b/xcb/fetcher/tx_fetcher.go
@@ -387,7 +387,7 @@ func (f *TxFetcher) loop() {
if announces := f.announces[ann.origin]; announces != nil {
announces[hash] = struct{}{}
} else {
- f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
}
continue
}
@@ -400,7 +400,7 @@ func (f *TxFetcher) loop() {
if announces := f.announces[ann.origin]; announces != nil {
announces[hash] = struct{}{}
} else {
- f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
}
continue
}
@@ -413,18 +413,18 @@ func (f *TxFetcher) loop() {
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
waitslots[hash] = struct{}{}
} else {
- f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
}
continue
}
// Transaction unknown to the fetcher, insert it into the waiting list
- f.waitlist[hash] = map[string]struct{}{ann.origin: struct{}{}}
+ f.waitlist[hash] = map[string]struct{}{ann.origin: {}}
f.waittime[hash] = f.clock.Now()
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
waitslots[hash] = struct{}{}
} else {
- f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
}
}
// If a new item was added to the waitlist, schedule it into the fetcher
@@ -452,7 +452,7 @@ func (f *TxFetcher) loop() {
if announces := f.announces[peer]; announces != nil {
announces[hash] = struct{}{}
} else {
- f.announces[peer] = map[common.Hash]struct{}{hash: struct{}{}}
+ f.announces[peer] = map[common.Hash]struct{}{hash: {}}
}
delete(f.waitslots[peer], hash)
if len(f.waitslots[peer]) == 0 {
diff --git a/xcb/filters/bench_test.go b/xcb/filters/bench_test.go
index 3fb20a8af..0656474f4 100644
--- a/xcb/filters/bench_test.go
+++ b/xcb/filters/bench_test.go
@@ -147,7 +147,7 @@ var bloomBitsPrefix = []byte("bloomBits-")
func clearBloomBits(db xcbdb.Database) {
fmt.Println("Clearing bloombits data...")
- it := db.NewIteratorWithPrefix(bloomBitsPrefix)
+ it := db.NewIterator(bloomBitsPrefix, nil)
for it.Next() {
db.Delete(it.Key())
}
diff --git a/xcb/gen_config.go b/xcb/gen_config.go
index 732aecccc..fc07f5f68 100644
--- a/xcb/gen_config.go
+++ b/xcb/gen_config.go
@@ -18,43 +18,46 @@ import (
// MarshalTOML marshals as TOML.
func (c Config) MarshalTOML() (interface{}, error) {
type Config struct {
- Genesis *core.Genesis `toml:",omitempty"`
- NetworkId uint64
- SyncMode downloader.SyncMode
- DiscoveryURLs []string
- UseDNSDiscovery bool
- NoPruning bool
- NoPrefetch bool
- Whitelist map[uint64]common.Hash `toml:"-"`
- LightServ int `toml:",omitempty"`
- LightIngress int `toml:",omitempty"`
- LightEgress int `toml:",omitempty"`
- LightPeers int `toml:",omitempty"`
- LightNoPrune bool `toml:",omitempty"`
- UltraLightServers []string `toml:",omitempty"`
- UltraLightFraction int `toml:",omitempty"`
- UltraLightOnlyAnnounce bool `toml:",omitempty"`
- SkipBcVersionCheck bool `toml:"-"`
- DatabaseHandles int `toml:"-"`
- DatabaseCache int
- DatabaseFreezer string
- TrieCleanCache int
- TrieCleanCacheJournal string `toml:",omitempty"`
- TrieCleanCacheRejournal time.Duration `toml:",omitempty"`
- TrieDirtyCache int
- TrieTimeout time.Duration
- SnapshotCache int
- Miner miner.Config
- Cryptore cryptore.Config
- TxPool core.TxPoolConfig
- GPO energyprice.Config
- EnablePreimageRecording bool
- DocRoot string `toml:"-"`
- EWASMInterpreter string
- CVMInterpreter string
- RPCEnergyCap *big.Int `toml:",omitempty"`
- Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
- CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
+ Genesis *core.Genesis `toml:",omitempty"`
+ NetworkId uint64
+ SyncMode downloader.SyncMode
+ DiscoveryURLs []string
+ UseDNSDiscovery bool
+ NoPruning bool
+ NoPrefetch bool
+ TxLookupLimit uint64 `toml:",omitempty"`
+ Whitelist map[uint64]common.Hash `toml:"-"`
+ LightServ int `toml:",omitempty"`
+ LightIngress int `toml:",omitempty"`
+ LightEgress int `toml:",omitempty"`
+ LightPeers int `toml:",omitempty"`
+ LightNoPrune bool `toml:",omitempty"`
+ UltraLightServers []string `toml:",omitempty"`
+ UltraLightFraction int `toml:",omitempty"`
+ UltraLightOnlyAnnounce bool `toml:",omitempty"`
+ SkipBcVersionCheck bool `toml:"-"`
+ DatabaseHandles int `toml:"-"`
+ DatabaseCache int
+ DatabaseFreezer string
+ TrieCleanCache int
+ TrieCleanCacheJournal string `toml:",omitempty"`
+ TrieCleanCacheRejournal time.Duration `toml:",omitempty"`
+ TrieDirtyCache int
+ TrieTimeout time.Duration
+ SnapshotCache int
+ Miner miner.Config
+ Cryptore cryptore.Config
+ TxPool core.TxPoolConfig
+ GPO energyprice.Config
+ EnablePreimageRecording bool
+ DocRoot string `toml:"-"`
+ EWASMInterpreter string
+ CVMInterpreter string
+ TrustedPeersBroadcasting bool
+ RPCEnergyCap *big.Int `toml:",omitempty"`
+ RPCTxFeeCap float64 `toml:",omitempty"`
+ Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
+ CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
}
var enc Config
enc.Genesis = c.Genesis
@@ -64,6 +67,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.UseDNSDiscovery = c.UseDNSDiscovery
enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch
+ enc.TxLookupLimit = c.TxLookupLimit
enc.Whitelist = c.Whitelist
enc.LightServ = c.LightServ
enc.LightIngress = c.LightIngress
@@ -91,7 +95,9 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.DocRoot = c.DocRoot
enc.EWASMInterpreter = c.EWASMInterpreter
enc.CVMInterpreter = c.CVMInterpreter
+ enc.TrustedPeersBroadcasting = c.TrustedPeersBroadcasting
enc.RPCEnergyCap = c.RPCEnergyCap
+ enc.RPCTxFeeCap = c.RPCTxFeeCap
enc.Checkpoint = c.Checkpoint
enc.CheckpointOracle = c.CheckpointOracle
return &enc, nil
@@ -100,43 +106,46 @@ func (c Config) MarshalTOML() (interface{}, error) {
// UnmarshalTOML unmarshals from TOML.
func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
type Config struct {
- Genesis *core.Genesis `toml:",omitempty"`
- NetworkId *uint64
- SyncMode *downloader.SyncMode
- DiscoveryURLs []string
- UseDNSDiscovery *bool
- NoPruning *bool
- NoPrefetch *bool
- Whitelist map[uint64]common.Hash `toml:"-"`
- LightServ *int `toml:",omitempty"`
- LightIngress *int `toml:",omitempty"`
- LightEgress *int `toml:",omitempty"`
- LightPeers *int `toml:",omitempty"`
- LightNoPrune *bool `toml:",omitempty"`
- UltraLightServers []string `toml:",omitempty"`
- UltraLightFraction *int `toml:",omitempty"`
- UltraLightOnlyAnnounce *bool `toml:",omitempty"`
- SkipBcVersionCheck *bool `toml:"-"`
- DatabaseHandles *int `toml:"-"`
- DatabaseCache *int
- DatabaseFreezer *string
- TrieCleanCache *int
- TrieCleanCacheJournal *string `toml:",omitempty"`
- TrieCleanCacheRejournal *time.Duration `toml:",omitempty"`
- TrieDirtyCache *int
- TrieTimeout *time.Duration
- SnapshotCache *int
- Miner *miner.Config
- Cryptore *cryptore.Config
- TxPool *core.TxPoolConfig
- GPO *energyprice.Config
- EnablePreimageRecording *bool
- DocRoot *string `toml:"-"`
- EWASMInterpreter *string
- CVMInterpreter *string
- RPCEnergyCap *big.Int `toml:",omitempty"`
- Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
- CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
+ Genesis *core.Genesis `toml:",omitempty"`
+ NetworkId *uint64
+ SyncMode *downloader.SyncMode
+ DiscoveryURLs []string
+ UseDNSDiscovery *bool
+ NoPruning *bool
+ NoPrefetch *bool
+ TxLookupLimit *uint64 `toml:",omitempty"`
+ Whitelist map[uint64]common.Hash `toml:"-"`
+ LightServ *int `toml:",omitempty"`
+ LightIngress *int `toml:",omitempty"`
+ LightEgress *int `toml:",omitempty"`
+ LightPeers *int `toml:",omitempty"`
+ LightNoPrune *bool `toml:",omitempty"`
+ UltraLightServers []string `toml:",omitempty"`
+ UltraLightFraction *int `toml:",omitempty"`
+ UltraLightOnlyAnnounce *bool `toml:",omitempty"`
+ SkipBcVersionCheck *bool `toml:"-"`
+ DatabaseHandles *int `toml:"-"`
+ DatabaseCache *int
+ DatabaseFreezer *string
+ TrieCleanCache *int
+ TrieCleanCacheJournal *string `toml:",omitempty"`
+ TrieCleanCacheRejournal *time.Duration `toml:",omitempty"`
+ TrieDirtyCache *int
+ TrieTimeout *time.Duration
+ SnapshotCache *int
+ Miner *miner.Config
+ Cryptore *cryptore.Config
+ TxPool *core.TxPoolConfig
+ GPO *energyprice.Config
+ EnablePreimageRecording *bool
+ DocRoot *string `toml:"-"`
+ EWASMInterpreter *string
+ CVMInterpreter *string
+ TrustedPeersBroadcasting *bool
+ RPCEnergyCap *big.Int `toml:",omitempty"`
+ RPCTxFeeCap *float64 `toml:",omitempty"`
+ Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
+ CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
}
var dec Config
if err := unmarshal(&dec); err != nil {
@@ -163,6 +172,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.NoPrefetch != nil {
c.NoPrefetch = *dec.NoPrefetch
}
+ if dec.TxLookupLimit != nil {
+ c.TxLookupLimit = *dec.TxLookupLimit
+ }
if dec.Whitelist != nil {
c.Whitelist = dec.Whitelist
}
@@ -244,9 +256,15 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.CVMInterpreter != nil {
c.CVMInterpreter = *dec.CVMInterpreter
}
+ if dec.TrustedPeersBroadcasting != nil {
+ c.TrustedPeersBroadcasting = *dec.TrustedPeersBroadcasting
+ }
if dec.RPCEnergyCap != nil {
c.RPCEnergyCap = dec.RPCEnergyCap
}
+ if dec.RPCTxFeeCap != nil {
+ c.RPCTxFeeCap = *dec.RPCTxFeeCap
+ }
if dec.Checkpoint != nil {
c.Checkpoint = dec.Checkpoint
}
diff --git a/xcb/handler.go b/xcb/handler.go
index 8d8e66239..19e7242a8 100644
--- a/xcb/handler.go
+++ b/xcb/handler.go
@@ -72,6 +72,7 @@ type ProtocolManager struct {
txpool txPool
blockchain *core.BlockChain
+ chaindb xcbdb.Database
maxPeers int
downloader *downloader.Downloader
@@ -110,6 +111,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
+ chaindb: chaindb,
peers: newPeerSet(),
whitelist: whitelist,
txsyncCh: make(chan *txsync),
@@ -189,7 +191,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
}
return n, err
}
- manager.blockFetcher = fetcher.NewBlockFetcher(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
+ manager.blockFetcher = fetcher.NewBlockFetcher(false, nil, blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, nil, inserter, manager.removePeer)
fetchTx := func(peer string, hashes []common.Hash) error {
p := manager.peers.Peer(peer)
diff --git a/xcb/handler_test.go b/xcb/handler_test.go
index 4d4a99b10..3876d340b 100644
--- a/xcb/handler_test.go
+++ b/xcb/handler_test.go
@@ -320,7 +320,7 @@ func testGetNodeData(t *testing.T, protocol int) { // TODO: TEST
// Fetch for now the entire chain db
hashes := []common.Hash{}
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
for it.Next() {
if key := it.Key(); len(key) == common.HashLength {
hashes = append(hashes, common.BytesToHash(key))
@@ -496,7 +496,7 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
}
}
// Create a checkpoint aware protocol manager
- blockchain, err := core.NewBlockChain(db, nil, config, cryptore.NewFaker(), vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(db, nil, config, cryptore.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
@@ -604,7 +604,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int, bttp bo
gspec = &core.Genesis{Config: config}
genesis = gspec.MustCommit(db)
)
- blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
@@ -664,7 +664,7 @@ func TestBroadcastMalformedBlock(t *testing.T) {
gspec = &core.Genesis{Config: config}
genesis = gspec.MustCommit(db)
)
- blockchain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
diff --git a/xcb/helper_test.go b/xcb/helper_test.go
index 33fbb7ded..7017139e1 100644
--- a/xcb/helper_test.go
+++ b/xcb/helper_test.go
@@ -64,7 +64,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
+ blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil)
)
chain, _ := core.GenerateChain(gspec.Config, genesis, cryptore.NewFaker(), db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil {
diff --git a/xcb/protocol_test.go b/xcb/protocol_test.go
index a8cd1ed41..ee211e804 100644
--- a/xcb/protocol_test.go
+++ b/xcb/protocol_test.go
@@ -169,8 +169,8 @@ func TestForkIDSplit(t *testing.T) {
genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
genesisProFork = gspecProFork.MustCommit(dbProFork)
- chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil)
- chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil)
+ chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil)
+ chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil)
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
diff --git a/xcb/sync.go b/xcb/sync.go
index 98971b713..649a82dc8 100644
--- a/xcb/sync.go
+++ b/xcb/sync.go
@@ -17,6 +17,7 @@
package xcb
import (
+ "github.com/core-coin/go-core/core/rawdb"
"math/big"
"math/rand"
"sync/atomic"
@@ -265,15 +266,25 @@ func peerToSyncOp(mode downloader.SyncMode, p *peer) *chainSyncOp {
}
func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
+ // If we're in fast sync mode, return that directly
if atomic.LoadUint32(&cs.pm.fastSync) == 1 {
block := cs.pm.blockchain.CurrentFastBlock()
td := cs.pm.blockchain.GetTdByHash(block.Hash())
return downloader.FastSync, td
- } else {
- head := cs.pm.blockchain.CurrentHeader()
- td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
- return downloader.FullSync, td
}
+ // We are probably in full sync, but we might have rewound to before the
+ // fast sync pivot, check if we should reenable
+ if pivot := rawdb.ReadLastPivotNumber(cs.pm.chaindb); pivot != nil {
+ if head := cs.pm.blockchain.CurrentBlock(); head.NumberU64() < *pivot {
+ block := cs.pm.blockchain.CurrentFastBlock()
+ td := cs.pm.blockchain.GetTdByHash(block.Hash())
+ return downloader.FastSync, td
+ }
+ }
+ // Nope, we're really full syncing
+ head := cs.pm.blockchain.CurrentHeader()
+ td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
+ return downloader.FullSync, td
}
// startSync launches doSync in a new goroutine.
@@ -284,6 +295,25 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) {
// doSync synchronizes the local blockchain with a remote peer.
func (pm *ProtocolManager) doSync(op *chainSyncOp) error {
+ if op.mode == downloader.FastSync {
+ // Before launch the fast sync, we have to ensure user uses the same
+ // txlookup limit.
+ // The main concern here is: during the fast sync Gocore won't index the
+ // block(generate tx indices) before the HEAD-limit. But if user changes
+ // the limit in the next fast sync(e.g. user kill Gocore manually and
+ // restart) then it will be hard for Gocore to figure out the oldest block
+ // has been indexed. So here for the user-experience wise, it's non-optimal
+ // that user can't change limit during the fast sync. If changed, Gocore
+ // will just blindly use the original one.
+ limit := pm.blockchain.TxLookupLimit()
+ if stored := rawdb.ReadFastTxLookupLimit(pm.chaindb); stored == nil {
+ rawdb.WriteFastTxLookupLimit(pm.chaindb, limit)
+ } else if *stored != limit {
+ pm.blockchain.SetTxLookupLimit(*stored)
+ log.Warn("Update txLookup limit", "provided", limit, "updated", *stored)
+ }
+ }
+
// Run the sync cycle, and disable fast sync if we're past the pivot block
err := pm.downloader.Synchronise(op.peer.id, op.head, op.td, op.mode)
if err != nil {
diff --git a/xcb/tracers/internal/tracers/assets.go b/xcb/tracers/internal/tracers/assets.go
index 9f8142bf5..f27d76141 100644
--- a/xcb/tracers/internal/tracers/assets.go
+++ b/xcb/tracers/internal/tracers/assets.go
@@ -92,7 +92,7 @@ func _4byte_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "4byte_tracer.js", size: 2914, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "4byte_tracer.js", size: 2914, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5d, 0x48, 0x89, 0x5d, 0x5e, 0xca, 0x4c, 0xe2, 0xfb, 0x8, 0x3d, 0x13, 0x2a, 0xdd, 0x80, 0xce, 0x46, 0x60, 0x3c, 0x38, 0xef, 0x17, 0x1b, 0x97, 0x29, 0x2a, 0x3f, 0x6b, 0x87, 0x54, 0x3b, 0xf}}
return a, nil
}
@@ -112,7 +112,7 @@ func bigram_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "bigram_tracer.js", size: 1687, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "bigram_tracer.js", size: 1687, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0xbf, 0x87, 0xa9, 0x3e, 0xb6, 0xd7, 0xed, 0xa6, 0xd7, 0xdb, 0x19, 0x9e, 0x5e, 0xb2, 0x64, 0xba, 0x46, 0x10, 0xde, 0x24, 0x93, 0xa6, 0xf1, 0x58, 0x19, 0x98, 0x60, 0x9e, 0x3f, 0x35, 0x84}}
return a, nil
}
@@ -132,7 +132,7 @@ func call_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "call_tracer.js", size: 9072, mode: os.FileMode(0664), modTime: time.Unix(1633331980, 0)}
+ info := bindataFileInfo{name: "call_tracer.js", size: 9072, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xda, 0xf3, 0xf2, 0x30, 0x63, 0x10, 0x9d, 0x6c, 0xa7, 0xa4, 0xe7, 0xe2, 0x88, 0xb3, 0xe0, 0xca, 0x23, 0x13, 0x5c, 0x3b, 0xcc, 0x76, 0x9e, 0x9f, 0xa0, 0x5f, 0x89, 0xba, 0x7f, 0xf6, 0xdf, 0x7c}}
return a, nil
}
@@ -152,7 +152,7 @@ func cvmdis_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "cvmdis_tracer.js", size: 4176, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "cvmdis_tracer.js", size: 4176, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x94, 0x3a, 0x41, 0xf3, 0xcc, 0xc7, 0x6b, 0xc9, 0x57, 0x20, 0x4f, 0x7d, 0x7b, 0xd1, 0x7b, 0x6e, 0xa9, 0x4d, 0xf, 0x22, 0x9b, 0x44, 0x5d, 0xc9, 0x2d, 0x3, 0x92, 0xd6, 0x5, 0xcc, 0x6e, 0xaf}}
return a, nil
}
@@ -172,7 +172,7 @@ func noop_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "noop_tracer.js", size: 1246, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "noop_tracer.js", size: 1246, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x53, 0x86, 0xe2, 0x1d, 0xe3, 0x12, 0xe0, 0x64, 0xe8, 0xde, 0xc5, 0x3, 0xef, 0x72, 0x42, 0xfc, 0x36, 0x63, 0x0, 0xd, 0x97, 0x87, 0xe5, 0x70, 0xf2, 0xf1, 0x8d, 0xc3, 0xd2, 0x38, 0x61, 0xdd}}
return a, nil
}
@@ -192,7 +192,7 @@ func opcount_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "opcount_tracer.js", size: 1347, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "opcount_tracer.js", size: 1347, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x64, 0xc8, 0x8e, 0xca, 0x9e, 0xb6, 0x7c, 0x80, 0xb1, 0xdb, 0xc5, 0x71, 0x73, 0x39, 0x9f, 0x4a, 0x49, 0x3, 0x41, 0xb9, 0x8f, 0x8b, 0xce, 0x16, 0x8d, 0xb, 0xea, 0x42, 0x1d, 0x89, 0xcd, 0x57}}
return a, nil
}
@@ -212,7 +212,7 @@ func prestate_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "prestate_tracer.js", size: 4209, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "prestate_tracer.js", size: 4209, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0x9d, 0xc7, 0x88, 0x23, 0xf5, 0xb4, 0xa0, 0xb1, 0x96, 0xc4, 0x22, 0x3b, 0xd0, 0xb9, 0xe9, 0xf3, 0x6c, 0xdc, 0xb0, 0x3e, 0x7c, 0x48, 0xf7, 0x2f, 0xf3, 0x7d, 0xfb, 0x46, 0xb2, 0xd5, 0xc6}}
return a, nil
}
@@ -232,7 +232,7 @@ func trigram_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "trigram_tracer.js", size: 1763, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "trigram_tracer.js", size: 1763, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x62, 0x83, 0xd9, 0xa6, 0x51, 0x46, 0x23, 0x4a, 0x33, 0x2, 0xd6, 0x16, 0x56, 0x16, 0xe, 0x26, 0x7a, 0xd1, 0x7c, 0xfb, 0x31, 0xd5, 0x39, 0x44, 0x44, 0x16, 0xcb, 0x6c, 0x9a, 0x6e, 0xdd, 0x9a}}
return a, nil
}
@@ -252,7 +252,7 @@ func unigram_tracerJs() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "unigram_tracer.js", size: 1485, mode: os.FileMode(0664), modTime: time.Unix(1614794073, 0)}
+ info := bindataFileInfo{name: "unigram_tracer.js", size: 1485, mode: os.FileMode(0664), modTime: time.Unix(1653306979, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0xef, 0x2a, 0x9c, 0xcc, 0xab, 0x31, 0x35, 0x57, 0xc7, 0x9c, 0x44, 0xd8, 0x55, 0xef, 0x89, 0x62, 0x2f, 0x6, 0x19, 0xc6, 0x50, 0x8a, 0xf1, 0x13, 0x62, 0xfb, 0xde, 0x12, 0x9d, 0x96, 0x56}}
return a, nil
}
diff --git a/xcb/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json b/xcb/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
index fa4237c03..816907d96 100644
--- a/xcb/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
+++ b/xcb/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
@@ -59,7 +59,7 @@
"value": "0x0"
}
],
- "error": "cvm: invalid jump destination",
+ "error": "invalid jump destination",
"from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8",
"energy": "0x435c8",
"energyUsed": "0x435c8",
diff --git a/xcb/tracers/testdata/call_tracer_inner_instafail.json b/xcb/tracers/testdata/call_tracer_inner_instafail.json
index ab358c2a5..2b0544b56 100644
--- a/xcb/tracers/testdata/call_tracer_inner_instafail.json
+++ b/xcb/tracers/testdata/call_tracer_inner_instafail.json
@@ -2,7 +2,7 @@
"genesis": {
"difficulty": "117067574",
"extraData": "0xd783010502846765746887676f312e372e33856c696e7578",
- "gasLimit": "4712380",
+ "energyLimit": "4712380",
"hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486",
"miner": "0x0c062b329265c965deef1eede55183b3acb8f611",
"mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d",
@@ -45,7 +45,7 @@
"number": "24974",
"difficulty": "117067574",
"timestamp": "1479891162",
- "gasLimit": "4712388",
+ "energyLimit": "4712388",
"miner": "0xc822ef32e6d26e170b70cf761e204c1806265914"
},
"input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745",
@@ -54,8 +54,8 @@
"from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
"to": "0x6c06b16512b332e6cd8293a2974872674716ce18",
"value": "0x0",
- "gas": "0x1a466",
- "gasUsed": "0x1dc6",
+ "energy": "0x1a466",
+ "energyUsed": "0x1dc6",
"input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000",
"output": "0x",
"calls": [
diff --git a/xcb/tracers/testdata/call_tracer_inner_throw_outer_revert.json b/xcb/tracers/testdata/call_tracer_inner_throw_outer_revert.json
index 2fa493574..d636f8e11 100644
--- a/xcb/tracers/testdata/call_tracer_inner_throw_outer_revert.json
+++ b/xcb/tracers/testdata/call_tracer_inner_throw_outer_revert.json
@@ -53,7 +53,7 @@
"result": {
"calls": [
{
- "error": "invalid opcode 0xfe",
+ "error": "invalid opcode: opcode 0xfe not defined",
"from": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76",
"energy": "0x75fe3",
"energyUsed": "0x75fe3",
diff --git a/xcb/tracers/testdata/call_tracer_revert_reason.json b/xcb/tracers/testdata/call_tracer_revert_reason.json
index 30d84649c..fc9e78950 100644
--- a/xcb/tracers/testdata/call_tracer_revert_reason.json
+++ b/xcb/tracers/testdata/call_tracer_revert_reason.json
@@ -1,7 +1,7 @@
{
"context": {
"difficulty": "2",
- "gasLimit": "8000000",
+ "energyLimit": "8000000",
"miner": "0x0000000000000000000000000000000000000000",
"number": "3212651",
"timestamp": "1597246515"
@@ -39,7 +39,7 @@
},
"difficulty": "3509749784",
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
- "gasLimit": "4727564",
+ "energyLimit": "4727564",
"hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
"mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
@@ -53,8 +53,8 @@
"result": {
"error": "execution reverted",
"from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
- "gas": "0x2d7308",
- "gasUsed": "0x588",
+ "energy": "0x2d7308",
+ "energyUsed": "0x588",
"input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
"to": "0xf58833cf0c791881b494eb79d461e08a1f043f52",
"type": "CALL",
diff --git a/xcb/tracers/testdata/call_tracer_throw.json b/xcb/tracers/testdata/call_tracer_throw.json
index 60ac238a0..134c71607 100644
--- a/xcb/tracers/testdata/call_tracer_throw.json
+++ b/xcb/tracers/testdata/call_tracer_throw.json
@@ -44,7 +44,7 @@
},
"input": "0xf8a08206668504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb8000000000000000000000000000000000000000000000027fad02094277c000029a0692a3b4e7b2842f8dd7832e712c21e09f451f416c8976d5b8d02e8c0c2b4bea9a07645e90fc421b63dd755767fd93d3c03b4ec0c4d8fafa059558d08cf11d597509470c9217d814985faef62b124420f8dfbddd96433",
"result": {
- "error": "cvm: invalid jump destination",
+ "error": "invalid jump destination",
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
"energy": "0x37b38",
"energyUsed": "0x37b38",
diff --git a/xcb/tracers/tracer.go b/xcb/tracers/tracer.go
index 669fa1b53..bf09a5001 100644
--- a/xcb/tracers/tracer.go
+++ b/xcb/tracers/tracer.go
@@ -163,7 +163,7 @@ func (sw *stackWrapper) peek(idx int) *big.Int {
log.Warn("Tracer accessed out of bound stack", "size", len(sw.stack.Data()), "index", idx)
return new(big.Int)
}
- return sw.stack.Data()[len(sw.stack.Data())-idx-1]
+ return sw.stack.Back(idx).ToBig()
}
// pushObject assembles a JSVM object wrapping a swappable stack and pushes it
@@ -554,7 +554,7 @@ func (jst *Tracer) CaptureStart(from common.Address, to common.Address, create b
}
// CaptureState implements the Tracer interface to trace a single step of VM execution.
-func (jst *Tracer) CaptureState(env *vm.CVM, pc uint64, op vm.OpCode, energy, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
+func (jst *Tracer) CaptureState(env *vm.CVM, pc uint64, op vm.OpCode, energy, cost uint64, memory *vm.Memory, stack *vm.Stack, rStack *vm.ReturnStack, rdata []byte, contract *vm.Contract, depth int, err error) error {
if jst.err == nil {
// Initialize the context if it wasn't done yet
if !jst.inited {
@@ -593,7 +593,7 @@ func (jst *Tracer) CaptureState(env *vm.CVM, pc uint64, op vm.OpCode, energy, co
// CaptureFault implements the Tracer interface to trace an execution fault
// while running an opcode.
-func (jst *Tracer) CaptureFault(env *vm.CVM, pc uint64, op vm.OpCode, energy, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
+func (jst *Tracer) CaptureFault(env *vm.CVM, pc uint64, op vm.OpCode, energy, cost uint64, memory *vm.Memory, stack *vm.Stack, rStack *vm.ReturnStack, contract *vm.Contract, depth int, err error) error {
if jst.err == nil {
// Apart from the error, everything matches the previous invocation
jst.errorValue = new(string)
diff --git a/xcb/tracers/tracer_test.go b/xcb/tracers/tracer_test.go
index 8f132dd7d..ad7c66bd6 100644
--- a/xcb/tracers/tracer_test.go
+++ b/xcb/tracers/tracer_test.go
@@ -169,10 +169,10 @@ func TestHaltBetweenSteps(t *testing.T) {
env := vm.NewCVM(vm.Context{BlockNumber: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
contract := vm.NewContract(&account{}, &account{}, big.NewInt(0), 0)
- tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, contract, 0, nil)
+ tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, nil, contract, 0, nil)
timeout := errors.New("stahp")
tracer.Stop(timeout)
- tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, contract, 0, nil)
+ tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, nil, contract, 0, nil)
if _, err := tracer.GetResult(); err.Error() != timeout.Error() {
t.Errorf("Expected timeout error, got %v", err)
diff --git a/xcb/tracers/tracers_test.go b/xcb/tracers/tracers_test.go
index 0abf42f4c..e69ef0ed8 100644
--- a/xcb/tracers/tracers_test.go
+++ b/xcb/tracers/tracers_test.go
@@ -185,7 +185,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(cvm, msg, new(core.EnergyPool).AddEnergy(tx.Energy()))
- if _, _, _, err = st.TransitionDb(); err != nil {
+ if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
@@ -205,7 +205,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
// Iterates over all the input-output datasets in the tracer test harness and
// runs the JavaScript tracers against them.
func TestCallTracer(t *testing.T) {
- t.Skip("Need to recompile contracts using energyPrice and energyAmount instead of gasPrice and gasAmount")
+ t.Skip("Need to recompile contracts using energyPrice and energyAmount instead of energyPrice and energyAmount")
files, err := ioutil.ReadDir("testdata")
if err != nil {
t.Fatalf("failed to retrieve tracer test suite: %v", err)
@@ -260,7 +260,7 @@ func TestCallTracer(t *testing.T) {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(cvm, msg, new(core.EnergyPool).AddEnergy(tx.Energy()))
- if _, _, _, err = st.TransitionDb(); err != nil {
+ if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
diff --git a/xcbclient/xcbclient_test.go b/xcbclient/xcbclient_test.go
index 9d6dde8a8..021a5eac3 100644
--- a/xcbclient/xcbclient_test.go
+++ b/xcbclient/xcbclient_test.go
@@ -177,16 +177,19 @@ var (
func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
// Generate test chain.
genesis, blocks := generateTestChain()
+ // Create node
- // Start Core service.
- var xcbservice *xcb.Core
n, err := node.New(&node.Config{})
- n.Register(func(ctx *node.ServiceContext) (node.Service, error) {
- config := &xcb.Config{Genesis: genesis}
- config.Cryptore.PowMode = cryptore.ModeFake
- xcbservice, err = xcb.New(ctx, config)
- return xcbservice, err
- })
+ if err != nil {
+ t.Fatalf("can't create new node: %v", err)
+ }
+ // Create Core Service
+ config := &xcb.Config{Genesis: genesis}
+ config.Cryptore.PowMode = cryptore.ModeFake
+ xcbservice, err := xcb.New(n, config)
+ if err != nil {
+ t.Fatalf("can't create new core service: %v", err)
+ }
// Import the test chain.
if err := n.Start(); err != nil {
@@ -223,7 +226,7 @@ func generateTestChain() (*core.Genesis, []*types.Block) {
func TestHeader(t *testing.T) {
backend, chain := newTestBackend(t)
client, _ := backend.Attach()
- defer backend.Stop()
+ defer backend.Close()
defer client.Close()
tests := map[string]struct {
@@ -267,7 +270,7 @@ func TestHeader(t *testing.T) {
func TestBalanceAt(t *testing.T) {
backend, _ := newTestBackend(t)
client, _ := backend.Attach()
- defer backend.Stop()
+ defer backend.Close()
defer client.Close()
tests := map[string]struct {
@@ -313,7 +316,7 @@ func TestBalanceAt(t *testing.T) {
func TestTransactionInBlockInterrupted(t *testing.T) {
backend, _ := newTestBackend(t)
client, _ := backend.Attach()
- defer backend.Stop()
+ defer backend.Close()
defer client.Close()
ec := NewClient(client)
@@ -331,7 +334,7 @@ func TestTransactionInBlockInterrupted(t *testing.T) {
func TestNetworkID(t *testing.T) {
backend, _ := newTestBackend(t)
client, _ := backend.Attach()
- defer backend.Stop()
+ defer backend.Close()
defer client.Close()
ec := NewClient(client)
diff --git a/xcbdb/dbtest/testsuite.go b/xcbdb/dbtest/testsuite.go
index 3739aca67..22ec355e6 100644
--- a/xcbdb/dbtest/testsuite.go
+++ b/xcbdb/dbtest/testsuite.go
@@ -32,31 +32,32 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
tests := []struct {
content map[string]string
prefix string
+ start string
order []string
}{
// Empty databases should be iterable
- {map[string]string{}, "", nil},
- {map[string]string{}, "non-existent-prefix", nil},
+ {map[string]string{}, "", "", nil},
+ {map[string]string{}, "non-existent-prefix", "", nil},
// Single-item databases should be iterable
- {map[string]string{"key": "val"}, "", []string{"key"}},
- {map[string]string{"key": "val"}, "k", []string{"key"}},
- {map[string]string{"key": "val"}, "l", nil},
+ {map[string]string{"key": "val"}, "", "", []string{"key"}},
+ {map[string]string{"key": "val"}, "k", "", []string{"key"}},
+ {map[string]string{"key": "val"}, "l", "", nil},
// Multi-item databases should be fully iterable
{
map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"},
- "",
+ "", "",
[]string{"k1", "k2", "k3", "k4", "k5"},
},
{
map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"},
- "k",
+ "k", "",
[]string{"k1", "k2", "k3", "k4", "k5"},
},
{
map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"},
- "l",
+ "l", "",
nil,
},
// Multi-item databases should be prefix-iterable
@@ -65,7 +66,7 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
"ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3",
"kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3",
},
- "ka",
+ "ka", "",
[]string{"ka1", "ka2", "ka3", "ka4", "ka5"},
},
{
@@ -73,7 +74,24 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
"ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3",
"kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3",
},
- "kc",
+ "kc", "",
+ nil,
+ },
+ // Multi-item databases should be prefix-iterable with start position
+ {
+ map[string]string{
+ "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3",
+ "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3",
+ },
+ "ka", "3",
+ []string{"ka3", "ka4", "ka5"},
+ },
+ {
+ map[string]string{
+ "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3",
+ "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3",
+ },
+ "ka", "8",
nil,
},
}
@@ -86,7 +104,7 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
}
}
// Iterate over the database with the given configs and verify the results
- it, idx := db.NewIteratorWithPrefix([]byte(tt.prefix)), 0
+ it, idx := db.NewIterator([]byte(tt.prefix), []byte(tt.start)), 0
for it.Next() {
if len(tt.order) <= idx {
t.Errorf("test %d: prefix=%q more items than expected: checking idx=%d (key %q), expecting len=%d", i, tt.prefix, idx, it.Key(), len(tt.order))
@@ -124,62 +142,57 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
}
{
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
got, want := iterateKeys(it), keys
if err := it.Error(); err != nil {
t.Fatal(err)
}
- it.Release()
if !reflect.DeepEqual(got, want) {
t.Errorf("Iterator: got: %s; want: %s", got, want)
}
}
{
- it := db.NewIteratorWithPrefix([]byte("1"))
+ it := db.NewIterator([]byte("1"), nil)
got, want := iterateKeys(it), []string{"1", "10", "11", "12"}
if err := it.Error(); err != nil {
t.Fatal(err)
}
- it.Release()
if !reflect.DeepEqual(got, want) {
- t.Errorf("IteratorWithPrefix(1): got: %s; want: %s", got, want)
+ t.Errorf("IteratorWith(1,nil): got: %s; want: %s", got, want)
}
}
{
- it := db.NewIteratorWithPrefix([]byte("5"))
+ it := db.NewIterator([]byte("5"), nil)
got, want := iterateKeys(it), []string{}
if err := it.Error(); err != nil {
t.Fatal(err)
}
- it.Release()
if !reflect.DeepEqual(got, want) {
- t.Errorf("IteratorWithPrefix(1): got: %s; want: %s", got, want)
+ t.Errorf("IteratorWith(5,nil): got: %s; want: %s", got, want)
}
}
{
- it := db.NewIteratorWithStart([]byte("2"))
+ it := db.NewIterator(nil, []byte("2"))
got, want := iterateKeys(it), []string{"2", "20", "21", "22", "3", "4", "6"}
if err := it.Error(); err != nil {
t.Fatal(err)
}
- it.Release()
if !reflect.DeepEqual(got, want) {
- t.Errorf("IteratorWithStart(2): got: %s; want: %s", got, want)
+ t.Errorf("IteratorWith(nil,2): got: %s; want: %s", got, want)
}
}
{
- it := db.NewIteratorWithStart([]byte("5"))
+ it := db.NewIterator(nil, []byte("5"))
got, want := iterateKeys(it), []string{"6"}
if err := it.Error(); err != nil {
t.Fatal(err)
}
- it.Release()
if !reflect.DeepEqual(got, want) {
- t.Errorf("IteratorWithStart(2): got: %s; want: %s", got, want)
+ t.Errorf("IteratorWith(nil,5): got: %s; want: %s", got, want)
}
}
})
@@ -246,11 +259,10 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
}
{
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
if got, want := iterateKeys(it), []string{"1", "2", "3", "4"}; !reflect.DeepEqual(got, want) {
t.Errorf("got: %s; want: %s", got, want)
}
- it.Release()
}
b.Reset()
@@ -267,11 +279,10 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
}
{
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
if got, want := iterateKeys(it), []string{"2", "3", "4", "5", "6"}; !reflect.DeepEqual(got, want) {
t.Errorf("got: %s; want: %s", got, want)
}
- it.Release()
}
})
@@ -296,11 +307,10 @@ func TestDatabaseSuite(t *testing.T, New func() xcbdb.KeyValueStore) {
t.Fatal(err)
}
- it := db.NewIterator()
+ it := db.NewIterator(nil, nil)
if got := iterateKeys(it); !reflect.DeepEqual(got, want) {
t.Errorf("got: %s; want: %s", got, want)
}
- it.Release()
})
}
@@ -311,5 +321,6 @@ func iterateKeys(it xcbdb.Iterator) []string {
keys = append(keys, string(it.Key()))
}
sort.Strings(keys)
+ it.Release()
return keys
}
diff --git a/xcbdb/iterator.go b/xcbdb/iterator.go
index 49ba6de41..460244a83 100644
--- a/xcbdb/iterator.go
+++ b/xcbdb/iterator.go
@@ -51,16 +51,11 @@ type Iterator interface {
// Iteratee wraps the NewIterator methods of a backing data store.
type Iteratee interface {
- // NewIterator creates a binary-alphabetical iterator over the entire keyspace
- // contained within the key-value database.
- NewIterator() Iterator
-
- // NewIteratorWithStart creates a binary-alphabetical iterator over a subset of
- // database content starting at a particular initial key (or after, if it does
- // not exist).
- NewIteratorWithStart(start []byte) Iterator
-
- // NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
- // of database content with a particular key prefix.
- NewIteratorWithPrefix(prefix []byte) Iterator
+ // NewIterator creates a binary-alphabetical iterator over a subset
+ // of database content with a particular key prefix, starting at a particular
+ // initial key (or after, if it does not exist).
+ //
+ // Note: This method assumes that the prefix is NOT part of the start, so there's
+ // no need for the caller to prepend the prefix to the start
+ NewIterator(prefix []byte, start []byte) Iterator
}
diff --git a/xcbdb/leveldb/leveldb.go b/xcbdb/leveldb/leveldb.go
index be3cd3cce..79b1bf67c 100644
--- a/xcbdb/leveldb/leveldb.go
+++ b/xcbdb/leveldb/leveldb.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-core library. If not, see .
+//go:build !js
// +build !js
// Package leveldb implements the key-value database layer based on LevelDB.
@@ -183,23 +184,11 @@ func (db *Database) NewBatch() xcbdb.Batch {
}
}
-// NewIterator creates a binary-alphabetical iterator over the entire keyspace
-// contained within the leveldb database.
-func (db *Database) NewIterator() xcbdb.Iterator {
- return db.db.NewIterator(new(util.Range), nil)
-}
-
-// NewIteratorWithStart creates a binary-alphabetical iterator over a subset of
-// database content starting at a particular initial key (or after, if it does
-// not exist).
-func (db *Database) NewIteratorWithStart(start []byte) xcbdb.Iterator {
- return db.db.NewIterator(&util.Range{Start: start}, nil)
-}
-
-// NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset
-// of database content with a particular key prefix.
-func (db *Database) NewIteratorWithPrefix(prefix []byte) xcbdb.Iterator {
- return db.db.NewIterator(util.BytesPrefix(prefix), nil)
+// NewIterator creates a binary-alphabetical iterator over a subset
+// of database content with a particular key prefix, starting at a particular
+// initial key (or after, if it does not exist).
+func (db *Database) NewIterator(prefix []byte, start []byte) xcbdb.Iterator {
+ return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
}
// Stat returns a particular internal stat of the database.
@@ -488,3 +477,12 @@ func (r *replayer) Delete(key []byte) {
}
r.failure = r.writer.Delete(key)
}
+
+// bytesPrefixRange returns key range that satisfy
+// - the given prefix, and
+// - the given seek position
+func bytesPrefixRange(prefix, start []byte) *util.Range {
+ r := util.BytesPrefix(prefix)
+ r.Start = append(r.Start, start...)
+ return r
+}
diff --git a/xcbdb/memorydb/memorydb.go b/xcbdb/memorydb/memorydb.go
index f7f57704b..81bafaac9 100644
--- a/xcbdb/memorydb/memorydb.go
+++ b/xcbdb/memorydb/memorydb.go
@@ -129,30 +129,30 @@ func (db *Database) NewBatch() xcbdb.Batch {
}
}
-// NewIterator creates a binary-alphabetical iterator over the entire keyspace
-// contained within the memory database.
-func (db *Database) NewIterator() xcbdb.Iterator {
- return db.NewIteratorWithStart(nil)
-}
-
-// NewIteratorWithStart creates a binary-alphabetical iterator over a subset of
-// database content starting at a particular initial key (or after, if it does
-// not exist).
-func (db *Database) NewIteratorWithStart(start []byte) xcbdb.Iterator {
+// NewIterator creates a binary-alphabetical iterator over a subset
+// of database content with a particular key prefix, starting at a particular
+// initial key (or after, if it does not exist).
+func (db *Database) NewIterator(prefix []byte, start []byte) xcbdb.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
var (
- st = string(start)
+ pr = string(prefix)
+ st = string(append(prefix, start...))
keys = make([]string, 0, len(db.db))
values = make([][]byte, 0, len(db.db))
)
- // Collect the keys from the memory database corresponding to the given start
+ // Collect the keys from the memory database corresponding to the given prefix
+ // and start
for key := range db.db {
+ if !strings.HasPrefix(key, pr) {
+ continue
+ }
if key >= st {
keys = append(keys, key)
}
}
+
// Sort the items and retrieve the associated values
sort.Strings(keys)
for _, key := range keys {
diff --git a/xcbstats/xcbstats.go b/xcbstats/xcbstats.go
index 949bc75ba..dd0ed144f 100644
--- a/xcbstats/xcbstats.go
+++ b/xcbstats/xcbstats.go
@@ -22,6 +22,9 @@ import (
"encoding/json"
"errors"
"fmt"
+ "github.com/core-coin/go-core/miner"
+ "github.com/core-coin/go-core/node"
+ "github.com/core-coin/go-core/xcb/downloader"
"math/big"
"net/http"
"regexp"
@@ -57,23 +60,33 @@ const (
chainHeadChanSize = 10
)
-type txPool interface {
- // SubscribeNewTxsEvent should return an event subscription of
- // NewTxsEvent and send events to the given channel.
- SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
+// backend encompasses the bare-minimum functionality needed for xcbstats reporting
+type backend interface {
+ SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
+ SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription
+ CurrentHeader() *types.Header
+ HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
+ GetTd(ctx context.Context, hash common.Hash) *big.Int
+ Stats() (pending int, queued int)
+ Downloader() *downloader.Downloader
}
-type blockChain interface {
- SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
+// fullNodeBackend encompasses the functionality necessary for a full node
+// reporting to xcbstats
+type fullNodeBackend interface {
+ backend
+ Miner() *miner.Miner
+ BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
+ CurrentBlock() *types.Block
+ SuggestPrice(ctx context.Context) (*big.Int, error)
}
// Service implements an Core netstats reporting daemon that pushes local
// chain statistics up to a monitoring server.
type Service struct {
- server *p2p.Server // Peer-to-peer server to retrieve networking infos
- xcb *xcb.Core // Full Core service if monitoring a full node
- les *les.LightCore // Light Core service if monitoring a light node
- engine consensus.Engine // Consensus engine to retrieve variadic block fields
+ server *p2p.Server // Peer-to-peer server to retrieve networking infos
+ backend backend
+ engine consensus.Engine // Consensus engine to retrieve variadic block fields
node string // Name of the node to display on the monitoring page
pass string // Password to authorize access to the monitoring page
@@ -129,50 +142,37 @@ func (w *connWrapper) Close() error {
}
// New returns a monitoring service ready for stats reporting.
-func New(url string, xcbServ *xcb.Core, lesServ *les.LightCore) (*Service, error) {
+func New(node *node.Node, backend backend, engine consensus.Engine, url string) error {
// Parse the netstats connection url
re := regexp.MustCompile("([^:@]*)(:([^@]*))?@(.+)")
parts := re.FindStringSubmatch(url)
if len(parts) != 5 {
- return nil, fmt.Errorf("invalid netstats url: \"%s\", should be nodename:secret@host:port", url)
+ return fmt.Errorf("invalid netstats url: \"%s\", should be nodename:secret@host:port", url)
+ }
+ ethstats := &Service{
+ backend: backend,
+ engine: engine,
+ server: node.Server(),
+ node: parts[1],
+ pass: parts[3],
+ host: parts[4],
+ pongCh: make(chan struct{}),
+ histCh: make(chan []uint64, 1),
}
- // Assemble and return the stats service
- var engine consensus.Engine
- if xcbServ != nil {
- engine = xcbServ.Engine()
- } else {
- engine = lesServ.Engine()
- }
- return &Service{
- xcb: xcbServ,
- les: lesServ,
- engine: engine,
- node: parts[1],
- pass: parts[3],
- host: parts[4],
- pongCh: make(chan struct{}),
- histCh: make(chan []uint64, 1),
- }, nil
-}
-
-// Protocols implements node.Service, returning the P2P network protocols used
-// by the stats service (nil as it doesn't use the devp2p overlay network).
-func (s *Service) Protocols() []p2p.Protocol { return nil }
-// APIs implements node.Service, returning the RPC API endpoints provided by the
-// stats service (nil as it doesn't provide any user callable APIs).
-func (s *Service) APIs() []rpc.API { return nil }
+ node.RegisterLifecycle(ethstats)
+ return nil
+}
-// Start implements node.Service, starting up the monitoring and reporting daemon.
-func (s *Service) Start(server *p2p.Server) error {
- s.server = server
+// Start implements node.Lifecycle, starting up the monitoring and reporting daemon.
+func (s *Service) Start() error {
go s.loop()
log.Info("Stats daemon started")
return nil
}
-// Stop implements node.Service, terminating the monitoring and reporting daemon.
+// Stop implements node.Lifecycle, terminating the monitoring and reporting daemon.
func (s *Service) Stop() error {
log.Info("Stats daemon stopped")
return nil
@@ -182,22 +182,13 @@ func (s *Service) Stop() error {
// until termination.
func (s *Service) loop() {
// Subscribe to chain events to execute updates on
- var blockchain blockChain
- var txpool txPool
- if s.xcb != nil {
- blockchain = s.xcb.BlockChain()
- txpool = s.xcb.TxPool()
- } else {
- blockchain = s.les.BlockChain()
- txpool = s.les.TxPool()
- }
chainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize)
- headSub := blockchain.SubscribeChainHeadEvent(chainHeadCh)
+ headSub := s.backend.SubscribeChainHeadEvent(chainHeadCh)
defer headSub.Unsubscribe()
txEventCh := make(chan core.NewTxsEvent, txChanSize)
- txSub := txpool.SubscribeNewTxsEvent(txEventCh)
+ txSub := s.backend.SubscribeNewTxsEvent(txEventCh)
defer txSub.Unsubscribe()
// Start a goroutine that exhausts the subscriptions to avoid events piling up
@@ -605,13 +596,14 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
txs []txStats
uncles []*types.Header
)
- if s.xcb != nil {
- // Full nodes have all needed information available
+ // check if backend is a full node
+ fullBackend, ok := s.backend.(fullNodeBackend)
+ if ok {
if block == nil {
- block = s.xcb.BlockChain().CurrentBlock()
+ block = fullBackend.CurrentBlock()
}
header = block.Header()
- td = s.xcb.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
+ td = fullBackend.GetTd(context.Background(), header.Hash())
txs = make([]txStats, len(block.Transactions()))
for i, tx := range block.Transactions() {
@@ -623,9 +615,9 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
if block != nil {
header = block.Header()
} else {
- header = s.les.BlockChain().CurrentHeader()
+ header = s.backend.CurrentHeader()
}
- td = s.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
+ td = s.backend.GetTd(context.Background(), header.Hash())
txs = []txStats{}
}
// Assemble and return the block stats
@@ -658,12 +650,7 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
indexes = append(indexes, list...)
} else {
// No indexes requested, send back the top ones
- var head int64
- if s.xcb != nil {
- head = s.xcb.BlockChain().CurrentHeader().Number.Int64()
- } else {
- head = s.les.BlockChain().CurrentHeader().Number.Int64()
- }
+ head := s.backend.CurrentHeader().Number.Int64()
start := head - historyUpdateRange + 1
if start < 0 {
start = 0
@@ -675,12 +662,13 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
// Gather the batch of blocks to report
history := make([]*blockStats, len(indexes))
for i, number := range indexes {
+ fullBackend, ok := s.backend.(fullNodeBackend)
// Retrieve the next block if it's known to us
var block *types.Block
- if s.xcb != nil {
- block = s.xcb.BlockChain().GetBlockByNumber(number)
+ if ok {
+ block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(number)) // TODO ignore error here ?
} else {
- if header := s.les.BlockChain().GetHeaderByNumber(number); header != nil {
+ if header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number)); header != nil {
block = types.NewBlockWithHeader(header)
}
}
@@ -718,12 +706,7 @@ type pendStats struct {
// it to the stats server.
func (s *Service) reportPending(conn *connWrapper) error {
// Retrieve the pending count from the local blockchain
- var pending int
- if s.xcb != nil {
- pending, _ = s.xcb.TxPool().Stats()
- } else {
- pending = s.les.TxPool().Stats()
- }
+ pending, _ := s.backend.Stats()
// Assemble the transaction stats and send it to the server
log.Trace("Sending pending transactions to xcbstats", "count", pending)
@@ -750,7 +733,7 @@ type nodeStats struct {
Uptime int `json:"uptime"`
}
-// reportPending retrieves various stats about the node at the networking and
+// reportStats retrieves various stats about the node at the networking and
// mining layer and reports it to the stats server.
func (s *Service) reportStats(conn *connWrapper) error {
// Gather the syncing and mining infos from the local miner instance
@@ -760,18 +743,20 @@ func (s *Service) reportStats(conn *connWrapper) error {
syncing bool
energyprice int
)
- if s.xcb != nil {
- mining = s.xcb.Miner().Mining()
- hashrate = int(s.xcb.Miner().HashRate())
+ // check if backend is a full node
+ fullBackend, ok := s.backend.(fullNodeBackend)
+ if ok {
+ mining = fullBackend.Miner().Mining()
+ hashrate = int(fullBackend.Miner().HashRate())
- sync := s.xcb.Downloader().Progress()
- syncing = s.xcb.BlockChain().CurrentHeader().Number.Uint64() >= sync.HighestBlock
+ sync := fullBackend.Downloader().Progress()
+ syncing = fullBackend.CurrentHeader().Number.Uint64() >= sync.HighestBlock
- price, _ := s.xcb.APIBackend.SuggestPrice(context.Background())
+ price, _ := fullBackend.SuggestPrice(context.Background())
energyprice = int(price.Uint64())
} else {
- sync := s.les.Downloader().Progress()
- syncing = s.les.BlockChain().CurrentHeader().Number.Uint64() >= sync.HighestBlock
+ sync := s.backend.Downloader().Progress()
+ syncing = s.backend.CurrentHeader().Number.Uint64() >= sync.HighestBlock
}
// Assemble the node stats and send it to the server
log.Trace("Sending node details to xcbstats")