From 9f3200764e63f6ff4d510132db1484d49d6ddd9a Mon Sep 17 00:00:00 2001 From: Tarak Ben Youssef Date: Thu, 1 Oct 2020 12:01:16 -0700 Subject: [PATCH 001/105] compute polynomials in G2 using small scalar multiplications only --- crypto/bls12381_utils.c | 3 +++ crypto/dkg_core.c | 32 +++++++++----------------------- crypto/dkg_test.go | 8 ++++---- 3 files changed, 16 insertions(+), 27 deletions(-) diff --git a/crypto/bls12381_utils.c b/crypto/bls12381_utils.c index 6d955a6c37b..c6818892217 100644 --- a/crypto/bls12381_utils.c +++ b/crypto/bls12381_utils.c @@ -573,6 +573,9 @@ void ep2_sum_vector(ep2_t jointy, ep2_st* y, int len){ for (int i=0; i 264 bits - bn_new(bn_x); - bn_new_size(bn_x, BITS_TO_DIGITS(Fr_BITS+N_bits_max)); - bn_set_dig(bn_x, 1); - // temp variables - ep2_t mult, acc; - ep2_new(mult); - ep2_new(acc); - ep2_set_infty(acc); - - for (int i=0; i < len_A; i++) { - ep2_mul_lwnaf(mult, (ep2_st*)&A[i], bn_x); - ep2_add_projc(acc, acc, mult); - bn_mul_dig(bn_x, bn_x, x); - // Use basic reduction as it's an 8-bits reduction - // in the worst case (|bn_x|<|r|+8 ) - bn_mod_basic(bn_x, bn_x, r); + bn_t bn_x; + bn_new(bn_x); + ep2_set_infty(y); + bn_set_dig(bn_x, x); + for (int i = len_A-1; i >= 0 ; i--) { + ep2_mul_lwnaf(y, y, bn_x); + ep2_add_projc(y, y, (ep2_st*)&A[i]); } - // export the result - ep2_copy(y, acc); - ep2_norm(y, y); - ep2_free(acc) - ep2_free(mult); + ep2_norm(y, y); // not necessary but left here to optimize the + // the multiple pairing computations with the same public key bn_free(bn_x); } diff --git a/crypto/dkg_test.go b/crypto/dkg_test.go index cf3ca5210b9..c397a661a16 100644 --- a/crypto/dkg_test.go +++ b/crypto/dkg_test.go @@ -16,10 +16,10 @@ import ( func TestDKG(t *testing.T) { t.Run("FeldmanVSS", testFeldmanVSSSimple) - t.Run("FeldmanVSS with Qualified set", testFeldmanVSSQual) - t.Run("FeldmanVSS Unhappy Path", testFeldmanVSSQualUnhappyPath) - t.Run("Joint Feldman", testJointFeldman) - t.Run("Joint Feldman Unhappy Path", testJointFeldmanUnhappyPath) + t.Run("FeldmanVSSQual", testFeldmanVSSQual) + t.Run("FeldmanVSSUnhappyPath", testFeldmanVSSQualUnhappyPath) + t.Run("JointFeldmanHappyPath", testJointFeldman) + t.Run("JointFeldmanUnhappyPath", testJointFeldmanUnhappyPath) } // optimal threshold (t) to allow the largest number of malicious nodes (m) From fa2433a3cca77012232ed422857548d859dacd21 Mon Sep 17 00:00:00 2001 From: Tarak Ben Youssef Date: Thu, 1 Oct 2020 12:59:32 -0700 Subject: [PATCH 002/105] compute polynomial images in Zr using a single mult per iteration --- crypto/dkg_core.c | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/crypto/dkg_core.c b/crypto/dkg_core.c index 401d53a7297..02db10f3e8a 100644 --- a/crypto/dkg_core.c +++ b/crypto/dkg_core.c @@ -29,40 +29,28 @@ void Zr_polynomialImage(bn_t image, ep2_t y, const bn_st *a, const int a_size, c bn_t r; bn_new(r); g2_get_ord(r); - - // powers of x - bn_t bn_x; // maximum is |n|+|r| --> 264 bits - ep_new(bn_x); - bn_new_size(bn_x, BITS_TO_DIGITS(Fr_BITS+N_bits_max)); - bn_set_dig(bn_x, 1); // temp variables - bn_t mult, acc; - bn_new(mult); // maximum --> 256+256 = 512 bits - bn_new_size(mult, BITS_TO_DIGITS(2*Fr_BITS)); - bn_new(acc); // maximum --> 512+1 = 513 bits - bn_new_size(acc, BITS_TO_DIGITS(2*Fr_BITS+1)); + bn_t acc; + bn_new(acc); + bn_new_size(acc, BITS_TO_DIGITS(Fr_BITS+8+1)); bn_set_dig(acc, 0); - for (int i=0; i= 0; i--) { + bn_mul_dig(acc, acc, x); + // Use basic reduction as it's an 9-bits reduction + // in the worst case (|acc|<|r|+9 ) bn_mod_basic(acc, acc, r); - // Use basic reduction as it's an 8-bits reduction - // in the worst case (|bn_x|<|r|+8 ) - bn_mul_dig(bn_x, bn_x, x); - bn_mod_basic(bn_x, bn_x, r); + bn_add(acc, acc, &a[i]); } - // copy the result - bn_copy(image, acc); + // export the result + bn_mod_basic(image, acc, r); // compute y = P(x).g2 if (y) g2_mul_gen(y, acc); bn_free(acc) - bn_free(mult); bn_free(r); - bn_free(bn_x); } // computes Q(x) = A_0 + A_1*x + ... + A_n*x^n in G2 From 458501430056f3f83faa330c2237df05e001fe53 Mon Sep 17 00:00:00 2001 From: Tarak Ben Youssef Date: Tue, 13 Oct 2020 19:48:52 -0700 Subject: [PATCH 003/105] refactor BLS tests --- crypto/bls_test.go | 542 +++++++++++++++++++++----------------- crypto/go.sum | 1 + crypto/sign_test_utils.go | 12 + 3 files changed, 309 insertions(+), 246 deletions(-) diff --git a/crypto/bls_test.go b/crypto/bls_test.go index 5e4048ffa2c..6e75b51287e 100644 --- a/crypto/bls_test.go +++ b/crypto/bls_test.go @@ -49,16 +49,22 @@ func TestBLSBLS12381Hasher(t *testing.T) { seed := make([]byte, KeyGenSeedMinLenBLSBLS12381) sk := randomSK(t, seed) sig := make([]byte, SignatureLenBLSBLS12381) + // empty hasher - _, err := sk.Sign(seed, nil) - assert.Error(t, err) - _, err = sk.PublicKey().Verify(sig, seed, nil) - assert.Error(t, err) + t.Run("Empty hasher", func(t *testing.T) { + _, err := sk.Sign(seed, nil) + assert.Error(t, err) + _, err = sk.PublicKey().Verify(sig, seed, nil) + assert.Error(t, err) + }) + // short size hasher - _, err = sk.Sign(seed, hash.NewSHA2_256()) - assert.Error(t, err) - _, err = sk.PublicKey().Verify(sig, seed, hash.NewSHA2_256()) - assert.Error(t, err) + t.Run("Empty hasher", func(t *testing.T) { + _, err := sk.Sign(seed, hash.NewSHA2_256()) + assert.Error(t, err) + _, err = sk.PublicKey().Verify(sig, seed, hash.NewSHA2_256()) + assert.Error(t, err) + }) } // TestBLSEncodeDecode tests encoding and decoding of BLS keys @@ -109,6 +115,7 @@ func TestAggregateSignatures(t *testing.T) { sks := make([]PrivateKey, 0, sigsNum) pks := make([]PublicKey, 0, sigsNum) seed := make([]byte, KeyGenSeedMinLenBLSBLS12381) + var aggSig,expectedSig Signature // create the signatures for i := 0; i < sigsNum; i++ { @@ -119,71 +126,84 @@ func TestAggregateSignatures(t *testing.T) { sks = append(sks, sk) pks = append(pks, sk.PublicKey()) } - // aggregate private keys - aggSk, err := AggregatePrivateKeys(sks) - require.NoError(t, err) - expectedSig, err := aggSk.Sign(input, kmac) - require.NoError(t, err) - // aggregate signatures - aggSig, err := AggregateSignatures(sigs) - require.NoError(t, err) - // First check: check the signatures are equal - assert.Equal(t, aggSig, expectedSig, - fmt.Sprintf("incorrect signature %s, should be %s, private keys are %s, input is %x", - aggSig, expectedSig, sks, input)) - // Second check: Verify the aggregated signature - valid, err := VerifySignatureOneMessage(pks, aggSig, input, kmac) - require.NoError(t, err) - assert.True(t, valid, - fmt.Sprintf("Verification of %s failed, signature should be %s private keys are %s, input is %x", - aggSig, expectedSig, sks, input)) + + // all signatures are valid + t.Run("all valid signatures", func(t *testing.T) { + // aggregate private keys + aggSk, err := AggregatePrivateKeys(sks) + require.NoError(t, err) + expectedSig, err := aggSk.Sign(input, kmac) + require.NoError(t, err) + // aggregate signatures + aggSig, err := AggregateSignatures(sigs) + require.NoError(t, err) + // First check: check the signatures are equal + assert.Equal(t, aggSig, expectedSig, + fmt.Sprintf("incorrect signature %s, should be %s, private keys are %s, input is %x", + aggSig, expectedSig, sks, input)) + // Second check: Verify the aggregated signature + valid, err := VerifySignatureOneMessage(pks, aggSig, input, kmac) + require.NoError(t, err) + assert.True(t, valid, + fmt.Sprintf("Verification of %s failed, signature should be %s private keys are %s, input is %x", + aggSig, expectedSig, sks, input)) + }) + // check if one the signatures is not correct - input[0] ^= 1 - randomIndex := mrand.Intn(sigsNum) - sigs[randomIndex], err = sks[randomIndex].Sign(input, kmac) - input[0] ^= 1 - aggSig, err = AggregateSignatures(sigs) - require.NoError(t, err) - assert.NotEqual(t, aggSig, expectedSig, - fmt.Sprintf("signature %s shouldn't be %s private keys are %s, input is %x", - aggSig, expectedSig, sks, input)) - valid, err = VerifySignatureOneMessage(pks, aggSig, input, kmac) - require.NoError(t, err) - assert.False(t, valid, - fmt.Sprintf("verification of signature %s should fail, it shouldn't be %s private keys are %s, input is %x", - aggSig, expectedSig, sks, input)) - sigs[randomIndex], err = sks[randomIndex].Sign(input, kmac) + t.Run("one invalid signatures", func(t *testing.T) { + input[0] ^= 1 + randomIndex := mrand.Intn(sigsNum) + sigs[randomIndex], err = sks[randomIndex].Sign(input, kmac) + input[0] ^= 1 + aggSig, err = AggregateSignatures(sigs) + require.NoError(t, err) + assert.NotEqual(t, aggSig, expectedSig, + fmt.Sprintf("signature %s shouldn't be %s private keys are %s, input is %x", + aggSig, expectedSig, sks, input)) + valid, err := VerifySignatureOneMessage(pks, aggSig, input, kmac) + require.NoError(t, err) + assert.False(t, valid, + fmt.Sprintf("verification of signature %s should fail, it shouldn't be %s private keys are %s, input is %x", + aggSig, expectedSig, sks, input)) + sigs[randomIndex], err = sks[randomIndex].Sign(input, kmac) + }) + + // check if one the public keys is not correct - randomIndex = mrand.Intn(sigsNum) - newSk := randomSK(t, seed) - sks[randomIndex] = newSk - pks[randomIndex] = newSk.PublicKey() - aggSk, err = AggregatePrivateKeys(sks) - require.NoError(t, err) - expectedSig, err = aggSk.Sign(input, kmac) - require.NoError(t, err) - assert.NotEqual(t, aggSig, expectedSig, - fmt.Sprintf("signature %s shouldn't be %s, private keys are %s, input is %x, wrong key is of index %d", - aggSig, expectedSig, sks, input, randomIndex)) - valid, err = VerifySignatureOneMessage(pks, aggSig, input, kmac) - require.NoError(t, err) - assert.False(t, valid, - fmt.Sprintf("signature %s should fail, shouldn't be %s, private keys are %s, input is %x, wrong key is of index %d", - aggSig, expectedSig, sks, input, randomIndex)) + t.Run("one invalid public key", func(t *testing.T) { + randomIndex := mrand.Intn(sigsNum) + newSk := randomSK(t, seed) + sks[randomIndex] = newSk + pks[randomIndex] = newSk.PublicKey() + aggSk, err := AggregatePrivateKeys(sks) + require.NoError(t, err) + expectedSig, err = aggSk.Sign(input, kmac) + require.NoError(t, err) + assert.NotEqual(t, aggSig, expectedSig, + fmt.Sprintf("signature %s shouldn't be %s, private keys are %s, input is %x, wrong key is of index %d", + aggSig, expectedSig, sks, input, randomIndex)) + valid, err := VerifySignatureOneMessage(pks, aggSig, input, kmac) + require.NoError(t, err) + assert.False(t, valid, + fmt.Sprintf("signature %s should fail, shouldn't be %s, private keys are %s, input is %x, wrong key is of index %d", + aggSig, expectedSig, sks, input, randomIndex)) + }) // test the empty list case - aggSk, err = AggregatePrivateKeys(sks[:0]) - assert.NoError(t, err) - expectedSig, err = aggSk.Sign(input, kmac) - aggSig, err = AggregateSignatures(sigs[:0]) - assert.NoError(t, err) - assert.Equal(t, aggSig, expectedSig, - fmt.Sprintf("wrong empty list key %s", sks)) - valid, err = VerifySignatureOneMessage(pks[:0], aggSig, input, kmac) - assert.Error(t, err) - assert.False(t, valid, - fmt.Sprintf("verification should fail with empty list key %s", sks)) + t.Run("empty list", func(t *testing.T) { + aggSk, err := AggregatePrivateKeys(sks[:0]) + assert.NoError(t, err) + expectedSig, err = aggSk.Sign(input, kmac) + aggSig, err = AggregateSignatures(sigs[:0]) + assert.NoError(t, err) + assert.Equal(t, aggSig, expectedSig, + fmt.Sprintf("wrong empty list key %s", sks)) + valid, err := VerifySignatureOneMessage(pks[:0], aggSig, input, kmac) + assert.Error(t, err) + assert.False(t, valid, + fmt.Sprintf("verification should fail with empty list key %s", sks)) + }) } // BLS multi-signature @@ -206,34 +226,36 @@ func TestAggregatePubKeys(t *testing.T) { sks = append(sks, sk) pks = append(pks, sk.PublicKey()) } - // aggregate private keys - aggSk, err := AggregatePrivateKeys(sks) - require.NoError(t, err) - expectedPk := aggSk.PublicKey() - // aggregate public keys - aggPk, err := AggregatePublicKeys(pks) - assert.NoError(t, err) - assert.True(t, expectedPk.Equals(aggPk), - fmt.Sprintf("incorrect public key %s, should be %s, public keys are %s", - aggPk, expectedPk, pks)) + + // consistent private and public key aggregation + t.Run("correctness check", func(t *testing.T) { + // aggregate private keys + aggSk, err := AggregatePrivateKeys(sks) + require.NoError(t, err) + expectedPk := aggSk.PublicKey() + // aggregate public keys + aggPk, err := AggregatePublicKeys(pks) + assert.NoError(t, err) + assert.True(t, expectedPk.Equals(aggPk), + fmt.Sprintf("incorrect public key %s, should be %s, public keys are %s", + aggPk, expectedPk, pks)) + }) // aggregate an empty list - aggSk, err = AggregatePrivateKeys(sks[:0]) - assert.NoError(t, err) - expectedPk = aggSk.PublicKey() - aggPk, err = AggregatePublicKeys(pks[:0]) - assert.NoError(t, err) - assert.True(t, expectedPk.Equals(aggPk), - fmt.Sprintf("incorrect generator %s, should be %s", - aggPk, expectedPk)) + t.Run("empty list", func(t *testing.T) { + aggSk, err := AggregatePrivateKeys(sks[:0]) + assert.NoError(t, err) + expectedPk := aggSk.PublicKey() + aggPk, err := AggregatePublicKeys(pks[:0]) + assert.NoError(t, err) + assert.True(t, expectedPk.Equals(aggPk), + fmt.Sprintf("incorrect generator %s, should be %s", + aggPk, expectedPk)) + }) } // BLS multi-signature -// public keys aggregation sanity check -// -// Aggregate n public keys and their respective private keys and compare -// the public key of the aggregated private key is equal to the aggregated -// public key +// public keys removal sanity check func TestRemovePubKeys(t *testing.T) { mrand.Seed(time.Now().UnixNano()) // number of keys to aggregate @@ -252,52 +274,64 @@ func TestRemovePubKeys(t *testing.T) { // random number of keys to remove pkToRemoveNum := mrand.Intn(pkNum) - - partialPk, err := RemovePublicKeys(aggPk, pks[:pkToRemoveNum]) - require.NoError(t, err) expectedPatrialPk, err := AggregatePublicKeys(pks[pkToRemoveNum:]) require.NoError(t, err) - BLSkey, ok := expectedPatrialPk.(*PubKeyBLSBLS12381) - require.True(t, ok) + // check correctness + t.Run("equality check", func(t *testing.T) { + partialPk, err := RemovePublicKeys(aggPk, pks[:pkToRemoveNum]) + require.NoError(t, err) + + BLSkey, ok := expectedPatrialPk.(*PubKeyBLSBLS12381) + require.True(t, ok) - assert.True(t, BLSkey.Equals(partialPk), - fmt.Sprintf("incorrect key %s, should be %s, keys are %s, index is %d", - partialPk, BLSkey, pks, pkToRemoveNum)) + assert.True(t, BLSkey.Equals(partialPk), + fmt.Sprintf("incorrect key %s, should be %s, keys are %s, index is %d", + partialPk, BLSkey, pks, pkToRemoveNum)) + }) // remove an extra key and check inequality - extraPk := randomSK(t, seed).PublicKey() - partialPk, err = RemovePublicKeys(aggPk, []PublicKey{extraPk}) - assert.NoError(t, err) - assert.False(t, BLSkey.Equals(partialPk), - fmt.Sprintf("incorrect key %s, should not be %s, keys are %s, index is %d, extra key is %s", - partialPk, BLSkey, pks, pkToRemoveNum, extraPk)) + t.Run("inequality check", func(t *testing.T) { + extraPk := randomSK(t, seed).PublicKey() + partialPk, err := RemovePublicKeys(aggPk, []PublicKey{extraPk}) + assert.NoError(t, err) + + BLSkey, ok := expectedPatrialPk.(*PubKeyBLSBLS12381) + require.True(t, ok) + assert.False(t, BLSkey.Equals(partialPk), + fmt.Sprintf("incorrect key %s, should not be %s, keys are %s, index is %d, extra key is %s", + partialPk, BLSkey, pks, pkToRemoveNum, extraPk)) + }) // specific test to remove all keys - partialPk, err = RemovePublicKeys(aggPk, pks) - require.NoError(t, err) - expectedPatrialPk, err = AggregatePublicKeys([]PublicKey{}) - require.NoError(t, err) + t.Run("remove all keys", func(t *testing.T) { + partialPk, err := RemovePublicKeys(aggPk, pks) + require.NoError(t, err) + expectedPatrialPk, err := AggregatePublicKeys([]PublicKey{}) + require.NoError(t, err) - BLSkey, ok = expectedPatrialPk.(*PubKeyBLSBLS12381) - require.True(t, ok) + BLSkey, ok := expectedPatrialPk.(*PubKeyBLSBLS12381) + require.True(t, ok) - assert.True(t, BLSkey.Equals(partialPk), - fmt.Sprintf("incorrect key %s, should be infinity point, keys are %s", - partialPk, pks)) + assert.True(t, BLSkey.Equals(partialPk), + fmt.Sprintf("incorrect key %s, should be infinity point, keys are %s", + partialPk, pks)) + }) // specific test with an empty slice of keys to remove - partialPk, err = RemovePublicKeys(aggPk, pks) - require.NoError(t, err) - expectedPatrialPk, err = AggregatePublicKeys([]PublicKey{}) - require.NoError(t, err) + t.Run("remove empty list", func(t *testing.T) { + partialPk, err := RemovePublicKeys(aggPk, pks) + require.NoError(t, err) + expectedPatrialPk, err := AggregatePublicKeys([]PublicKey{}) + require.NoError(t, err) - BLSkey, ok = expectedPatrialPk.(*PubKeyBLSBLS12381) - require.True(t, ok) + BLSkey, ok := expectedPatrialPk.(*PubKeyBLSBLS12381) + require.True(t, ok) - assert.True(t, BLSkey.Equals(partialPk), - fmt.Sprintf("incorrect key %s, should be %s, keys are %s", - partialPk, BLSkey, pks)) + assert.True(t, BLSkey.Equals(partialPk), + fmt.Sprintf("incorrect key %s, should be %s, keys are %s", + partialPk, BLSkey, pks)) + }) } // BLS multi-signature @@ -315,7 +349,7 @@ func TestBatchVerify(t *testing.T) { // hasher kmac := NewBLSKMAC("test tag") // number of signatures to aggregate - sigsNum := mrand.Intn(100) + 1 + sigsNum := mrand.Intn(100) + 2 sigs := make([]Signature, 0, sigsNum) sks := make([]PrivateKey, 0, sigsNum) pks := make([]PublicKey, 0, sigsNum) @@ -333,17 +367,20 @@ func TestBatchVerify(t *testing.T) { expectedValid = append(expectedValid, true) } - // Batch verify the signatures // all signatures are valid - valid, err := BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(t, err) - assert.Equal(t, valid, expectedValid, - fmt.Sprintf("Verification of %s failed, private keys are %s, input is %x, results is %v", - sigs, sks, input, valid)) + t.Run("all signatures are valid", func(t *testing.T) { + valid, err := BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) + require.NoError(t, err) + assert.Equal(t, valid, expectedValid, + fmt.Sprintf("Verification of %s failed, private keys are %s, input is %x, results is %v", + sigs, sks, input, valid)) + }) - // some signatures are invalid - invalidSigsNum := mrand.Intn(sigsNum-1) + 1 // pick a random number of invalid signatures - indices := make([]int, 0, sigsNum) // pick invalidSigsNum random indices + // pick a random number of invalid signatures + invalidSigsNum := mrand.Intn(sigsNum-1) + 1 + // generate a random permutation of indices to pick the + // invalid signatures. + indices := make([]int, 0, sigsNum) for i := 0; i < sigsNum; i++ { indices = append(indices, i) } @@ -351,51 +388,65 @@ func TestBatchVerify(t *testing.T) { indices[i], indices[j] = indices[j], indices[i] }) - for i := 0; i < invalidSigsNum; i++ { // alter invalidSigsNum random signatures - alterSignature(sigs[indices[i]]) - expectedValid[indices[i]] = false - } + // some signatures are invalid + t.Run("some signatures are invalid", func(t *testing.T) { - valid, err = BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(t, err) - assert.Equal(t, expectedValid, valid, - fmt.Sprintf("Verification of %s failed\n private keys are %s\n input is %x\n results is %v", - sigs, sks, input, valid)) + for i := 0; i < invalidSigsNum; i++ { // alter invalidSigsNum random signatures + alterSignature(sigs[indices[i]]) + expectedValid[indices[i]] = false + } + + valid, err := BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) + require.NoError(t, err) + assert.Equal(t, expectedValid, valid, + fmt.Sprintf("Verification of %s failed\n private keys are %s\n input is %x\n results is %v", + sigs, sks, input, valid)) + }) // all signatures are invalid - for i := invalidSigsNum; i < sigsNum; i++ { // alter the remaining random signatures - alterSignature(sigs[indices[i]]) - expectedValid[indices[i]] = false - if i%5 == 0 { - sigs[indices[i]] = sigs[indices[i]][:3] // test the short signatures + t.Run("all signatures are invalid", func(t *testing.T) { + for i := invalidSigsNum; i < sigsNum; i++ { // alter the remaining random signatures + alterSignature(sigs[indices[i]]) + expectedValid[indices[i]] = false + if i%5 == 0 { + sigs[indices[i]] = sigs[indices[i]][:3] // test the short signatures + } } - } - - valid, err = BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(t, err) - assert.Equal(t, valid, expectedValid, - fmt.Sprintf("Verification of %s failed, private keys are %s, input is %x, results is %v", - sigs, sks, input, valid)) + + valid, err := BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) + require.NoError(t, err) + assert.Equal(t, valid, expectedValid, + fmt.Sprintf("Verification of %s failed, private keys are %s, input is %x, results is %v", + sigs, sks, input, valid)) + }) // test the empty list case - valid, err = BatchVerifySignaturesOneMessage(pks[:0], sigs[:0], input, kmac) - require.Error(t, err) - assert.Equal(t, valid, []bool{}, - fmt.Sprintf("verification should fail with empty list key, got %v", valid)) + t.Run("empty list", func(t *testing.T) { + valid, err := BatchVerifySignaturesOneMessage(pks[:0], sigs[:0], input, kmac) + require.Error(t, err) + assert.Equal(t, valid, []bool{}, + fmt.Sprintf("verification should fail with empty list key, got %v", valid)) + }) + // test incorrect inputs - valid, err = BatchVerifySignaturesOneMessage(pks[:len(pks)-1], sigs, input, kmac) - require.Error(t, err) - assert.Equal(t, valid, []bool{}, - fmt.Sprintf("verification should fail with incorrect input lenghts, got %v", valid)) + t.Run("inconsistent inputs", func(t *testing.T) { + valid, err := BatchVerifySignaturesOneMessage(pks[:len(pks)-1], sigs, input, kmac) + require.Error(t, err) + assert.Equal(t, valid, []bool{}, + fmt.Sprintf("verification should fail with incorrect input lenghts, got %v", valid)) + }) + // test wrong hasher - for i := 0; i < sigsNum; i++ { - expectedValid[i] = false - } - valid, err = BatchVerifySignaturesOneMessage(pks, sigs, input, nil) - require.Error(t, err) - //require.Nil(t, aggSig) - assert.Equal(t, valid, expectedValid, - fmt.Sprintf("verification should fail with incorrect input lenghts, got %v", valid)) + t.Run("invalid hasher", func(t *testing.T) { + for i := 0; i < sigsNum; i++ { + expectedValid[i] = false + } + valid, err := BatchVerifySignaturesOneMessage(pks, sigs, input, nil) + require.Error(t, err) + + assert.Equal(t, valid, expectedValid, + fmt.Sprintf("verification should fail with incorrect input lenghts, got %v", valid)) + }) } // alter or fix a signature @@ -406,9 +457,9 @@ func alterSignature(s Signature) { s[10] ^= 1 } -// Batch verify bench when all signatures are valid -// (2) pairing compared to (2*n) pairings for the batch verification. -func BenchmarkBatchVerifyHappyPath(b *testing.B) { +// Batch verify bench in the happy (all signatures are valid) +// and unhappy path (only one signature is invalid) +func BenchmarkBatchVerify(b *testing.B) { // random message input := make([]byte, 100) _, _ = mrand.Read(input) @@ -427,45 +478,33 @@ func BenchmarkBatchVerifyHappyPath(b *testing.B) { sigs = append(sigs, s) pks = append(pks, sk.PublicKey()) } - b.ResetTimer() - for i := 0; i < b.N; i++ { - // all signatures are valid - _, _ = BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) - } - b.StopTimer() -} -// Batch verify bench when some signatures are invalid -// - if only one signaure is invalid (a valid point in G1): -// less than (2*2*log(n)) pairings compared to (2*n) pairings for the simple verification. -// - if all signatures are invalid (valid points in G1): -// (2*2*(n-1)) pairings compared to (2*n) pairings for the simple verification. -func BenchmarkBatchVerifyUnHappyPath(b *testing.B) { - input := make([]byte, 100) - _, _ = mrand.Read(input) - kmac := NewBLSKMAC("bench tag") - sigsNum := 100 - sigs := make([]Signature, 0, sigsNum) - pks := make([]PublicKey, 0, sigsNum) - seed := make([]byte, KeyGenSeedMinLenBLSBLS12381) - - // create the signatures - for i := 0; i < sigsNum; i++ { - _, _ = mrand.Read(seed) - sk, _ := GeneratePrivateKey(BLSBLS12381, seed) - s, _ := sk.Sign(input, kmac) - sigs = append(sigs, s) - pks = append(pks, sk.PublicKey()) - } + // Batch verify bench when all signatures are valid + // (2) pairing compared to (2*n) pairings for the batch verification. + b.Run("happy path", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + // all signatures are valid + _, _ = BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) + } + b.StopTimer() + }) - // only one invalid signature - alterSignature(sigs[sigsNum/2]) - b.ResetTimer() - for i := 0; i < b.N; i++ { - // all signatures are valid - _, _ = BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) - } - b.StopTimer() + // Batch verify bench when some signatures are invalid + // - if only one signaure is invalid (a valid point in G1): + // less than (2*2*log(n)) pairings compared to (2*n) pairings for the simple verification. + // - if all signatures are invalid (valid points in G1): + // (2*2*(n-1)) pairings compared to (2*n) pairings for the simple verification. + b.Run("unhappy path", func(b *testing.B) { + // only one invalid signature + alterSignature(sigs[sigsNum/2]) + b.ResetTimer() + for i := 0; i < b.N; i++ { + // all signatures are valid + _, _ = BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) + } + b.StopTimer() + }) } // BLS multi-signature @@ -475,7 +514,6 @@ func BenchmarkBatchVerifyUnHappyPath(b *testing.B) { // and verify the aggregated signature using the multi-signature verification with // many message. func TestAggregateSignaturesManyMessages(t *testing.T) { - //int64(1601003187394381000) // mrand.Seed(time.Now().UnixNano()) // number of signatures to aggregate @@ -494,7 +532,7 @@ func TestAggregateSignaturesManyMessages(t *testing.T) { pks = append(pks, sk.PublicKey()) } - // number of messages (could be more or less than keys) + // number of messages (could be larger or smaller than the number of keys) msgsNum := mrand.Intn(sigsNum) + 1 messages := make([][20]byte, msgsNum) for i := 0; i < msgsNum; i++ { @@ -524,45 +562,57 @@ func TestAggregateSignaturesManyMessages(t *testing.T) { inputMsgs = append(inputMsgs, msg) inputKmacs = append(inputKmacs, kmac) } - // aggregate signatures - aggSig, err := AggregateSignatures(sigs) - require.NoError(t, err) - // Verify the aggregated signature - valid, err := VerifySignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) - require.NoError(t, err) - assert.True(t, valid, - fmt.Sprintf("Verification of %s failed, should be valid, private keys are %s, inputs are %x, input public keys are %s", - aggSig, sks, inputMsgs, inputPks)) + var aggSig Signature - // check if one the signatures is not correct - randomIndex := mrand.Intn(sigsNum) // pick a random signature - messages[0][0] ^= 1 // make sure the signature is different - sigs[randomIndex], err = sks[0].Sign(messages[0][:], inputKmacs[0]) - messages[0][0] ^= 1 - aggSig, err = AggregateSignatures(sigs) - require.NoError(t, err) - valid, err = VerifySignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) - require.NoError(t, err) - assert.False(t, valid, - fmt.Sprintf("Verification of %s should fail, private keys are %s, inputs are %x, input public keys are %s", - aggSig, sks, inputMsgs, inputPks)) + t.Run("correctness check", func(t *testing.T) { + // aggregate signatures + var err error + aggSig, err = AggregateSignatures(sigs) + require.NoError(t, err) + // Verify the aggregated signature + valid, err := VerifySignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) + require.NoError(t, err) + assert.True(t, valid, + fmt.Sprintf("Verification of %s failed, should be valid, private keys are %s, inputs are %x, input public keys are %s", + aggSig, sks, inputMsgs, inputPks)) + }) + + // check if one of the signatures is not correct + t.Run("one signature is invalid", func(t *testing.T) { + randomIndex := mrand.Intn(sigsNum) // pick a random signature + messages[0][0] ^= 1 // make sure the signature is different + var err error + sigs[randomIndex], err = sks[0].Sign(messages[0][:], inputKmacs[0]) + messages[0][0] ^= 1 + aggSig, err = AggregateSignatures(sigs) + require.NoError(t, err) + valid, err := VerifySignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) + require.NoError(t, err) + assert.False(t, valid, + fmt.Sprintf("Verification of %s should fail, private keys are %s, inputs are %x, input public keys are %s", + aggSig, sks, inputMsgs, inputPks)) + }) // test the empty keys case - valid, err = VerifySignatureManyMessages(inputPks[:0], aggSig, inputMsgs, inputKmacs) - assert.Error(t, err) - assert.False(t, valid, - fmt.Sprintf("verification should fail with empty list key")) + t.Run("empty list", func(t *testing.T) { + valid, err := VerifySignatureManyMessages(inputPks[:0], aggSig, inputMsgs, inputKmacs) + assert.Error(t, err) + assert.False(t, valid, + fmt.Sprintf("verification should fail with an empty key list")) + }) // test inconsistent input arrays - valid, err = VerifySignatureManyMessages(inputPks, aggSig, inputMsgs[:sigsNum-1], inputKmacs) - assert.Error(t, err) - assert.False(t, valid, - fmt.Sprintf("verification should fail with empty list key")) - - valid, err = VerifySignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs[:sigsNum-1]) - assert.Error(t, err) - assert.False(t, valid, - fmt.Sprintf("verification should fail with empty list key")) + t.Run("inconsistent inputs", func(t *testing.T) { + valid, err := VerifySignatureManyMessages(inputPks, aggSig, inputMsgs[:sigsNum-1], inputKmacs) + assert.Error(t, err) + assert.False(t, valid, + fmt.Sprintf("verification should fail with empty list key")) + + valid, err = VerifySignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs[:sigsNum-1]) + assert.Error(t, err) + assert.False(t, valid, + fmt.Sprintf("verification should fail with empty list key")) + }) } // VerifySignatureManyMessages bench diff --git a/crypto/go.sum b/crypto/go.sum index 285b3c5d391..64dddcbb233 100644 --- a/crypto/go.sum +++ b/crypto/go.sum @@ -37,6 +37,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= diff --git a/crypto/sign_test_utils.go b/crypto/sign_test_utils.go index cfee72b6497..de879925854 100644 --- a/crypto/sign_test_utils.go +++ b/crypto/sign_test_utils.go @@ -34,11 +34,13 @@ func testGenSignVerify(t *testing.T, salg SigningAlgorithm, halg hash.Hasher) { s, err := sk.Sign(input, halg) require.NoError(t, err) pk := sk.PublicKey() + // test a valid signature result, err := pk.Verify(s, input, halg) require.NoError(t, err) assert.True(t, result, fmt.Sprintf( "Verification should succeed:\n signature:%s\n message:%x\n private key:%s", s, input, sk)) + // test with a different message input[0] ^= 1 result, err = pk.Verify(s, input, halg) @@ -46,6 +48,7 @@ func testGenSignVerify(t *testing.T, salg SigningAlgorithm, halg hash.Hasher) { assert.False(t, result, fmt.Sprintf( "Verification should fail:\n signature:%s\n message:%x\n private key:%s", s, input, sk)) input[0] ^= 1 + // test with a valid but different key seed[0] ^= 1 wrongSk, err := GeneratePrivateKey(salg, seed) @@ -54,6 +57,7 @@ func testGenSignVerify(t *testing.T, salg SigningAlgorithm, halg hash.Hasher) { require.NoError(t, err) assert.False(t, result, fmt.Sprintf( "Verification should fail:\n signature:%s\n message:%x\n private key:%s", s, input, sk)) + // test a wrong signature length invalidLen := mrand.Intn(2 * len(s)) // try random invalid lengths if invalidLen == len(s) { // map to an invalid length @@ -99,28 +103,34 @@ func testEquals(t *testing.T, salg SigningAlgorithm, otherSigAlgo SigningAlgorit t.Logf("Testing Equals for %s", salg) // make sure the length is larger than minimum lengths of all the signaure algos seedMinLength := 48 + // generate a key pair seed := make([]byte, seedMinLength) n, err := rand.Read(seed) require.Equal(t, n, seedMinLength) require.NoError(t, err) + // first pair sk1, err := GeneratePrivateKey(salg, seed) require.NoError(t, err) pk1 := sk1.PublicKey() + // second pair without changing the seed sk2, err := GeneratePrivateKey(salg, seed) require.NoError(t, err) pk2 := sk2.PublicKey() + // unrelated algo pair sk3, err := GeneratePrivateKey(otherSigAlgo, seed) require.NoError(t, err) pk3 := sk3.PublicKey() + // fourth pair with same algo but a different seed seed[0] ^= 1 sk4, err := GeneratePrivateKey(salg, seed) require.NoError(t, err) pk4 := sk4.PublicKey() + // tests assert.True(t, sk1.Equals(sk2), "key equality should return true") assert.True(t, pk1.Equals(pk2), "key equality should return true") @@ -201,11 +211,13 @@ func testPOP(t *testing.T, salg SigningAlgorithm, halg hash.Hasher) { s, err := sk.GeneratePOP(halg) require.NoError(t, err) pk := sk.PublicKey() + // test a valid PoP result, err := pk.VerifyPOP(s, halg) require.NoError(t, err) assert.True(t, result, fmt.Sprintf( "Verification should succeed:\n signature:%s\n private key:%s", s, sk)) + // test with a valid but different key seed[0] ^= 1 wrongSk, err := GeneratePrivateKey(salg, seed) From 63fc6911c6f99034482ade3185263479fcc9c3e4 Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Tue, 13 Oct 2020 20:40:59 -0700 Subject: [PATCH 004/105] Split Snapshot into Snapshot and SpockSnapshot to ensure spock secret is not stored and relied upon --- .../delta_snapshot_exporter.go | 3 --- .../computation/computer/computer.go | 2 +- engine/execution/ingestion/engine.go | 26 ++++++++++++------- engine/execution/ingestion/engine_test.go | 6 ++++- engine/execution/messages.go | 2 +- engine/execution/state/delta/view.go | 20 +++++++++----- .../execution/state/mock/execution_state.go | 14 ++++++++++ engine/execution/state/state.go | 14 +++++++--- engine/execution/state/unittest/fixtures.go | 6 ++--- storage/badger/operation/interactions_test.go | 2 +- utils/unittest/fixtures.go | 2 +- 11 files changed, 65 insertions(+), 32 deletions(-) diff --git a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go index 1d729400259..8d44bb8b8f8 100644 --- a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go @@ -2,7 +2,6 @@ package jsonexporter import ( "bufio" - "encoding/hex" "encoding/json" "fmt" "os" @@ -19,7 +18,6 @@ import ( type dSnapshot struct { DeltaJSONStr string `json:"delta_json_str"` Reads []string `json:"reads"` - SpockSecret string `json:"spock_secret_data"` } // ExportDeltaSnapshots exports all the delta snapshots @@ -79,7 +77,6 @@ func ExportDeltaSnapshots(blockID flow.Identifier, dbPath string, outputPath str data := dSnapshot{ DeltaJSONStr: string(m), Reads: reads, - SpockSecret: hex.EncodeToString(snap[0].SpockSecret), } jsonData, err := json.Marshal(data) diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index c4c1a52e9aa..c7e492e3bf9 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -104,7 +104,7 @@ func (e *blockComputer) executeBlock( var gasUsed uint64 - interactions := make([]*delta.Snapshot, len(collections)+1) + interactions := make([]*delta.SpockSnapshot, len(collections)+1) events := make([]flow.Event, 0) blockTxResults := make([]flow.TransactionResult, 0) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index baa1aa9bc85..e44b2da0b5e 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -718,17 +718,23 @@ func (e *Engine) handleComputationResult( // There is one result per transaction e.metrics.ExecutionTotalExecutedTransactions(len(result.TransactionResult)) - receipt, err := e.saveExecutionResults( + snapshots := make([]*delta.Snapshot, len(result.StateSnapshots)) + for i, stateSnapshot := range result.StateSnapshots { + snapshots[i] = &stateSnapshot.Snapshot + } + + executionResult, err := e.saveExecutionResults( ctx, result.ExecutableBlock, - result.StateSnapshots, + snapshots, result.Events, result.TransactionResult, startState, ) + receipt, err := e.generateExecutionReceipt(ctx, executionResult, result.StateSnapshots) if err != nil { - return nil, err + return nil, fmt.Errorf("could not generate execution receipt: %w", err) } err = e.providerEngine.BroadcastExecutionReceipt(ctx, receipt) @@ -752,7 +758,7 @@ func (e *Engine) saveExecutionResults( events []flow.Event, txResults []flow.TransactionResult, startState flow.StateCommitment, -) (*flow.ExecutionReceipt, error) { +) (*flow.ExecutionResult, error) { span, childCtx := e.tracer.StartSpanFromContext(ctx, trace.EXESaveExecutionResults) defer span.Finish() @@ -824,9 +830,9 @@ func (e *Engine) saveExecutionResults( return nil, fmt.Errorf("could not generate execution result: %w", err) } - receipt, err := e.generateExecutionReceipt(childCtx, executionResult, stateInteractions) + err = e.execState.PersistExecutionResult(ctx, executionResult) if err != nil { - return nil, fmt.Errorf("could not generate execution receipt: %w", err) + return nil, fmt.Errorf("could not persist execution result: %w", err) } // not update the highest executed until the result and receipts are saved. @@ -872,7 +878,7 @@ func (e *Engine) saveExecutionResults( Hex("final_state", endState). Msg("saved computation results") - return receipt, nil + return executionResult, nil } // logExecutableBlock logs all data about an executable block @@ -957,7 +963,7 @@ func (e *Engine) generateExecutionResultForBlock( func (e *Engine) generateExecutionReceipt( ctx context.Context, result *flow.ExecutionResult, - stateInteractions []*delta.Snapshot, + stateInteractions []*delta.SpockSnapshot, ) (*flow.ExecutionReceipt, error) { spocks := make([]crypto.Signature, len(stateInteractions)) @@ -1384,7 +1390,7 @@ func (e *Engine) applyStateDelta(delta *messages.ExecutionStateDelta) { // TODO - validate state delta, reject invalid messages - executionReceipt, err := e.saveExecutionResults( + executionResult, err := e.saveExecutionResults( e.unit.Ctx(), &delta.ExecutableBlock, delta.StateInteractions, @@ -1397,7 +1403,7 @@ func (e *Engine) applyStateDelta(delta *messages.ExecutionStateDelta) { log.Fatal().Err(err).Msg("fatal error while processing sync message") } - finalState, ok := executionReceipt.ExecutionResult.FinalStateCommitment() + finalState, ok := executionResult.FinalStateCommitment() if !ok { // set to start state next line will fail anyways finalState = delta.StartState diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index a0b8c90908e..b2d55e2fab6 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -216,6 +216,10 @@ func (ctx *testingContext) assertSuccessfulBlockComputation(executableBlock *ent On("UpdateHighestExecutedBlockIfHigher", mock.Anything, executableBlock.Block.Header). Return(nil) + ctx.executionState. + On("PersistExecutionResult", mock.Anything, executableBlock.Block.Header). + Return(nil) + ctx.executionState. On( "PersistExecutionReceipt", @@ -500,7 +504,7 @@ func TestExecuteScriptAtBlockID(t *testing.T) { func Test_SPOCKGeneration(t *testing.T) { runWithEngine(t, func(ctx testingContext) { - snapshots := []*delta.Snapshot{ + snapshots := []*delta.SpockSnapshot{ { SpockSecret: []byte{1, 2, 3}, }, diff --git a/engine/execution/messages.go b/engine/execution/messages.go index 6d910ddfee7..5a7c0c2d51d 100644 --- a/engine/execution/messages.go +++ b/engine/execution/messages.go @@ -17,7 +17,7 @@ type ComputationOrder struct { type ComputationResult struct { ExecutableBlock *entity.ExecutableBlock - StateSnapshots []*delta.Snapshot + StateSnapshots []*delta.SpockSnapshot Events []flow.Event TransactionResult []flow.TransactionResult GasUsed uint64 diff --git a/engine/execution/state/delta/view.go b/engine/execution/state/delta/view.go index cda9f49c54d..b9004ecff88 100644 --- a/engine/execution/state/delta/view.go +++ b/engine/execution/state/delta/view.go @@ -25,10 +25,14 @@ type View struct { readFunc GetRegisterFunc } -// Snapshot is set of interactions with the register type Snapshot struct { - Delta Delta - Reads []flow.RegisterID + Delta Delta + Reads []flow.RegisterID +} + +// Snapshot is state of interactions with the register +type SpockSnapshot struct { + Snapshot SpockSecret []byte } @@ -43,7 +47,7 @@ func NewView(readFunc GetRegisterFunc) *View { } // Snapshot returns copy of current state of interactions with a View -func (v *View) Interactions() *Snapshot { +func (v *View) Interactions() *SpockSnapshot { var delta = Delta{ Data: make(map[string]flow.RegisterEntry, len(v.delta.Data)), @@ -63,9 +67,11 @@ func (v *View) Interactions() *Snapshot { var spockSecret = make([]byte, len(spockSecHashSum)) copy(spockSecret, spockSecHashSum) - return &Snapshot{ - Delta: delta, - Reads: reads, + return &SpockSnapshot{ + Snapshot: Snapshot{ + Delta: delta, + Reads: reads, + }, SpockSecret: spockSecret, } } diff --git a/engine/execution/state/mock/execution_state.go b/engine/execution/state/mock/execution_state.go index 2cd43309d70..faad0cea705 100644 --- a/engine/execution/state/mock/execution_state.go +++ b/engine/execution/state/mock/execution_state.go @@ -230,6 +230,20 @@ func (_m *ExecutionState) PersistExecutionReceipt(_a0 context.Context, _a1 *flow return r0 } +// PersistExecutionResult provides a mock function with given fields: ctx, result +func (_m *ExecutionState) PersistExecutionResult(ctx context.Context, result *flow.ExecutionResult) error { + ret := _m.Called(ctx, result) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.ExecutionResult) error); ok { + r0 = rf(ctx, result) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // PersistStateCommitment provides a mock function with given fields: _a0, _a1, _a2 func (_m *ExecutionState) PersistStateCommitment(_a0 context.Context, _a1 flow.Identifier, _a2 []byte) error { ret := _m.Called(_a0, _a1, _a2) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 84f530e9c0e..d150c193213 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -67,6 +67,8 @@ type ExecutionState interface { // PersistChunkDataPack stores a chunk data pack by chunk ID. PersistChunkDataPack(context.Context, *flow.ChunkDataPack) error + PersistExecutionResult(ctx context.Context, result *flow.ExecutionResult) error + PersistExecutionReceipt(context.Context, *flow.ExecutionReceipt) error PersistStateInteractions(context.Context, flow.Identifier, []*delta.Snapshot) error @@ -92,6 +94,14 @@ type state struct { db *badger.DB } +func (s *state) PersistExecutionResult(ctx context.Context, executionResult *flow.ExecutionResult) error { + err := s.results.Index(executionResult.BlockID, executionResult.ID()) + if err != nil { + return fmt.Errorf("could not index execution result: %w", err) + } + return nil +} + func RegisterIDToKey(reg flow.RegisterID) ledger.Key { return ledger.NewKey([]ledger.KeyPart{ ledger.NewKeyPart(KeyPartOwner, []byte(reg.Owner)), @@ -332,10 +342,6 @@ func (s *state) PersistExecutionReceipt(ctx context.Context, receipt *flow.Execu if err != nil { return fmt.Errorf("could not index execution receipt: %w", err) } - err = s.results.Index(receipt.ExecutionResult.BlockID, receipt.ExecutionResult.ID()) - if err != nil { - return fmt.Errorf("could not index execution result: %w", err) - } return nil } diff --git a/engine/execution/state/unittest/fixtures.go b/engine/execution/state/unittest/fixtures.go index cd9e45185c8..323688c0088 100644 --- a/engine/execution/state/unittest/fixtures.go +++ b/engine/execution/state/unittest/fixtures.go @@ -8,12 +8,12 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func StateInteractionsFixture() *delta.Snapshot { +func StateInteractionsFixture() *delta.SpockSnapshot { return delta.NewView(nil).Interactions() } func ComputationResultFixture(collectionsSignerIDs [][]flow.Identifier) *execution.ComputationResult { - stateViews := make([]*delta.Snapshot, len(collectionsSignerIDs)) + stateViews := make([]*delta.SpockSnapshot, len(collectionsSignerIDs)) for i := 0; i < len(collectionsSignerIDs); i++ { stateViews[i] = StateInteractionsFixture() } @@ -25,7 +25,7 @@ func ComputationResultFixture(collectionsSignerIDs [][]flow.Identifier) *executi func ComputationResultForBlockFixture(completeBlock *entity.ExecutableBlock) *execution.ComputationResult { n := len(completeBlock.CompleteCollections) - stateViews := make([]*delta.Snapshot, n) + stateViews := make([]*delta.SpockSnapshot, n) for i := 0; i < n; i++ { stateViews[i] = StateInteractionsFixture() } diff --git a/storage/badger/operation/interactions_test.go b/storage/badger/operation/interactions_test.go index 6f6f190abee..da8f6df57b4 100644 --- a/storage/badger/operation/interactions_test.go +++ b/storage/badger/operation/interactions_test.go @@ -35,7 +35,7 @@ func TestStateInteractionsInsertCheckRetrieve(t *testing.T) { _, err = d1.Get(string([]byte{3}), "", "") require.NoError(t, err) - interactions := []*delta.Snapshot{d1.Interactions(), d2.Interactions()} + interactions := []*delta.Snapshot{&d1.Interactions().Snapshot, &d2.Interactions().Snapshot} blockID := unittest.IdentifierFixture() diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 54ec70727b7..d60d03aae89 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -128,7 +128,7 @@ func BlockWithParentFixture(parent *flow.Header) flow.Block { } func StateInteractionsFixture() *delta.Snapshot { - return delta.NewView(nil).Interactions() + return &delta.NewView(nil).Interactions().Snapshot } func BlockWithParentAndProposerFixture(parent *flow.Header, proposer flow.Identifier) flow.Block { From 7c449d4b32ecf75c7370cbcbe53042fe3c057ed9 Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Tue, 13 Oct 2020 21:34:05 -0700 Subject: [PATCH 005/105] Fix missing storage of ExecutionResults --- engine/execution/ingestion/engine_test.go | 8 +++++++- engine/execution/state/state.go | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index b2d55e2fab6..19bd7efdd18 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -217,7 +217,13 @@ func (ctx *testingContext) assertSuccessfulBlockComputation(executableBlock *ent Return(nil) ctx.executionState. - On("PersistExecutionResult", mock.Anything, executableBlock.Block.Header). + On( + "PersistExecutionResult", + mock.Anything, + mock.MatchedBy(func(executionResult *flow.ExecutionResult) bool { + return executionResult.BlockID == executableBlock.Block.ID() && executionResult.PreviousResultID == previousExecutionResultID + }), + ). Return(nil) ctx.executionState. diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index d150c193213..6eb8229b00d 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -95,7 +95,13 @@ type state struct { } func (s *state) PersistExecutionResult(ctx context.Context, executionResult *flow.ExecutionResult) error { - err := s.results.Index(executionResult.BlockID, executionResult.ID()) + + err := s.results.Store(executionResult) + if err != nil { + return fmt.Errorf("could not store result: %w", err) + } + + err = s.results.Index(executionResult.BlockID, executionResult.ID()) if err != nil { return fmt.Errorf("could not index execution result: %w", err) } From a4a86b72b2ff55bf79f666deeb2ea0965bcb81cd Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Wed, 14 Oct 2020 09:00:14 -0700 Subject: [PATCH 006/105] Linting --- engine/execution/ingestion/engine.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index e44b2da0b5e..9d03078a470 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -731,6 +731,9 @@ func (e *Engine) handleComputationResult( result.TransactionResult, startState, ) + if err != nil { + return nil, fmt.Errorf("could not save execution results: %w", err) + } receipt, err := e.generateExecutionReceipt(ctx, executionResult, result.StateSnapshots) if err != nil { From c28e513a6e7e4e039481588363d162449f9078a8 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 16:53:25 -0700 Subject: [PATCH 007/105] log receiving collection --- engine/execution/ingestion/engine.go | 19 +++++++++++++------ module/synchronization/core.go | 5 +++-- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index baa1aa9bc85..64b86730072 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -484,14 +484,18 @@ func (e *Engine) OnCollection(originID flow.Identifier, entity flow.Entity) { // is. func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Collection) error { + collID := collection.ID() + + log := e.log.With().Hex("collection_id", collID[:]).Logger() + + log.Info().Hex("sender", originID[:]).Msg("handle collection") + // TODO: bail if have seen this collection before. err := e.collections.Store(collection) if err != nil { return fmt.Errorf("cannot store collection: %w", err) } - collID := collection.ID() - return e.mempool.BlockByCollection.Run( func(backdata *stdmap.BlockByCollectionBackdata) error { blockByCollectionID, exists := backdata.ByID(collID) @@ -501,15 +505,16 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col // or it was ejected from the mempool when it was full. // either way, we will return if !exists { - e.log.Debug().Hex("collection_id", collID[:]). - Msg("could not find block for collection") + log.Debug().Msg("could not find block for collection") return nil } for _, executableBlock := range blockByCollectionID.ExecutableBlocks { + blockID := executableBlock.ID() + completeCollection, ok := executableBlock.CompleteCollections[collID] if !ok { - return fmt.Errorf("cannot handle collection: internal inconsistency - collection pointing to block which does not contain said collection") + return fmt.Errorf("cannot handle collection: internal inconsistency - collection pointing to block %v which does not contain said collection", blockID) } if completeCollection.IsCompleted() { @@ -523,7 +528,9 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col completeCollection.Transactions = collection.Transactions // check if the block becomes executable - _ = e.executeBlockIfComplete(executableBlock) + completed := e.executeBlockIfComplete(executableBlock) + + log.Debug().Hex("block_id", blockID[:]).Bool("completed", completed).Msg("collection added to block") } // since we've received this collection, remove it from the index diff --git a/module/synchronization/core.go b/module/synchronization/core.go index 23af2284b66..b2c2c58c92c 100644 --- a/module/synchronization/core.go +++ b/module/synchronization/core.go @@ -241,8 +241,9 @@ func (c *Core) prune(final *flow.Header) { } } - prunedHeights := len(c.heights) - initialHeights - prunedBlockIDs := len(c.blockIDs) - initialBlockIDs + prunedHeights := initialHeights - len(c.heights) + prunedBlockIDs := initialBlockIDs - len(c.blockIDs) + c.log.Debug(). Uint64("final_height", final.Height). Msgf("pruned %d heights, %d block IDs", prunedHeights, prunedBlockIDs) From 3529525b4bd8f527675292da0b719d422972a2c3 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 19:29:31 -0700 Subject: [PATCH 008/105] reload all finalized and unexecuted blocks --- engine/execution/ingestion/engine.go | 77 ++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 64b86730072..4f804b0b09d 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -132,6 +132,11 @@ func New( // Ready returns a channel that will close when the engine has // successfully started. func (e *Engine) Ready() <-chan struct{} { + err := e.loadAllFinalizedAndUnexecutedBlocks() + if err != nil { + e.log.Fatal().Err(err).Msg("failed to load all unexecuted blocks") + } + return e.unit.Ready() } @@ -180,6 +185,78 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } } +func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { + // get finalized height + header, err := e.state.Final().Head() + if err != nil { + return fmt.Errorf("could not get finalized block: %w", err) + } + + finalizedHeight := header.Height + + // get the last executed height + lastExecutedHeight, _, err := e.execState.GetHighestExecutedBlockID(e.unit.Ctx()) + if err != nil { + return fmt.Errorf("could not get last executed block: %w", err) + } + + var unexecuted int64 + unexecuted = int64(finalizedHeight) - int64(lastExecutedHeight) + + e.log.Info(). + Int64("count", unexecuted). + Msg("reloading finalized and unexecuted blocks to execution queues...") + + // log the number of unexecuted blocks + if unexecuted <= 0 { + return nil + } + + count := 0 + for height := lastExecutedHeight + 1; height <= finalizedHeight; height++ { + block, err := e.blocks.ByHeight(height) + if err != nil { + return fmt.Errorf("could not get block by height: %w", err) + } + + executableBlock := &entity.ExecutableBlock{ + Block: block, + CompleteCollections: make(map[flow.Identifier]*entity.CompleteCollection), + } + + blockID := executableBlock.ID() + + // acquiring the lock so that there is only one process modifying the queue + err = e.mempool.Run( + func( + blockByCollection *stdmap.BlockByCollectionBackdata, + executionQueues *stdmap.QueuesBackdata, + ) error { + // adding the block to the queue, + _, added := enqueue(executableBlock, executionQueues) + if !added { + // we started from an empty queue, and added each finalized block to the + // queue. Each block should always be added to the queues. + // a sanity check it must be an exception if not added. + return fmt.Errorf("block %v is not added to the queue", blockID) + } + + return nil + }) + + if err != nil { + return fmt.Errorf("failed to recover block %v", err) + } + + count++ + } + + e.log.Info().Int("count", count). + Msg("reloaded all the finalized and unexecuted blocks to execution queues") + + return nil +} + // BlockProcessable handles the new verified blocks (blocks that // have passed consensus validation) received from the consensus nodes // Note: BlockProcessable might be called multiple times for the same block. From 6539803d9f7e81fb8358f9cc90f58e69c1154b87 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 20:10:49 -0700 Subject: [PATCH 009/105] print block height --- engine/execution/ingestion/engine.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 4f804b0b09d..109559197c3 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -267,7 +267,9 @@ func (e *Engine) BlockProcessable(b *flow.Header) { e.log.Fatal().Err(err).Msgf("could not get incorporated block(%v): %v", blockID, err) } - e.log.Debug().Hex("block_id", blockID[:]).Msg("handling new block") + e.log.Debug().Hex("block_id", blockID[:]). + Uint64("height", b.Height). + Msg("handling new block") err = e.handleBlock(e.unit.Ctx(), newBlock) if err != nil { @@ -749,6 +751,7 @@ func (e *Engine) matchOrRequestCollections( e.log.Debug(). Hex("block", logging.Entity(executableBlock)). + Uint64("height", executableBlock.Block.Header.Height). Int("num_col", len(executableBlock.Block.Payload.Guarantees)). Int("actual_req", actualRequested). Msg("requested all collections") From 1d6ba0f85651df33c2c93584f839354963d862b1 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 20:32:13 -0700 Subject: [PATCH 010/105] update metrics --- engine/execution/ingestion/engine.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 109559197c3..2478317cb7b 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -394,16 +394,12 @@ func (e *Engine) executeBlock(ctx context.Context, executableBlock *entity.Execu return } - e.metrics.ExecutionStorageStateCommitment(int64(len(finalState))) - e.log.Info(). Hex("block_id", logging.Entity(executableBlock)). Uint64("block_height", executableBlock.Block.Header.Height). Hex("final_state", finalState). Msg("block executed") - e.metrics.ExecutionLastExecutedBlockHeight(executableBlock.Block.Header.Height) - err = e.onBlockExecuted(executableBlock, finalState) if err != nil { e.log.Err(err).Msg("failed in process block's children") @@ -428,6 +424,9 @@ func (e *Engine) executeBlock(ctx context.Context, executableBlock *entity.Execu func (e *Engine) onBlockExecuted(executed *entity.ExecutableBlock, finalState flow.StateCommitment) error { + e.metrics.ExecutionStorageStateCommitment(int64(len(finalState))) + e.metrics.ExecutionLastExecutedBlockHeight(executed.Block.Header.Height) + e.checkStateSyncStop(executed.Block.Header.Height) err := e.mempool.Run( From d322a7ca29a13ac20229d1fdf1bbefc55822d49c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 20:32:29 -0700 Subject: [PATCH 011/105] show block height in extend call --- state/protocol/badger/mutator.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/state/protocol/badger/mutator.go b/state/protocol/badger/mutator.go index 2704ebf8b61..beb4b7b0213 100644 --- a/state/protocol/badger/mutator.go +++ b/state/protocol/badger/mutator.go @@ -300,8 +300,8 @@ func (m *Mutator) headerExtend(candidate *flow.Block) error { // block G is not a valid block, because it does not include C which has been finalized. // block H and I are a valid, because its their includes C. return state.NewOutdatedExtensionErrorf( - "candidate block conflicts with finalized state (ancestor: %d final: %d)", - ancestor.Height, finalizedHeight) + "candidate block (height: %v) conflicts with finalized state (ancestor: %d final: %d)", + header.Height, ancestor.Height, finalizedHeight) } ancestorID = ancestor.ParentID } From c18760d27c7667a4a21ae99b8bb9af7e8afe7c9c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 21:09:46 -0700 Subject: [PATCH 012/105] log missing parent --- engine/execution/ingestion/engine.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 2478317cb7b..2a152523137 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -185,6 +185,7 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } } +// on nodes startup, we need to load all the unexecuted blocks to the execution queues. func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { // get finalized height header, err := e.state.Final().Head() @@ -342,7 +343,12 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { // if we found the statecommitment for the parent block, then add it to the executable block. if err == nil { executableBlock.StartState = parentCommitment - } else if !errors.Is(err, storage.ErrNotFound) { + } else if errors.Is(err, storage.ErrNotFound) { + // the parent block is an unexecuted block. + // if the queue only has one block, and its parent doesn't + // exist in the queue, then we need to load the block from the storage. + log.Error().Msgf("an unexecuted parent block is missing in the queue") + } else { // if there is exception, then crash log.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") } From f159cd14cee85af789740e63859ac4f62855256e Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 21:17:22 -0700 Subject: [PATCH 013/105] linting --- engine/execution/ingestion/engine.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 2a152523137..3b0e988803f 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -201,8 +201,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { return fmt.Errorf("could not get last executed block: %w", err) } - var unexecuted int64 - unexecuted = int64(finalizedHeight) - int64(lastExecutedHeight) + unexecuted := int64(finalizedHeight) - int64(lastExecutedHeight) e.log.Info(). Int64("count", unexecuted). From 1b43735bf516b3018f2c5c03e973ed3a05562045 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 21:33:46 -0700 Subject: [PATCH 014/105] fix logging --- engine/execution/ingestion/engine.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 3b0e988803f..b6fac02bdee 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -346,7 +346,10 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { // the parent block is an unexecuted block. // if the queue only has one block, and its parent doesn't // exist in the queue, then we need to load the block from the storage. - log.Error().Msgf("an unexecuted parent block is missing in the queue") + _, ok := queue.Nodes[blockID] + if !ok { + log.Error().Msgf("an unexecuted parent block is missing in the queue") + } } else { // if there is exception, then crash log.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") From 6f6d9768d992ae64ea8374e78a6ddcec1e46eacc Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 21:41:20 -0700 Subject: [PATCH 015/105] log last finalized --- engine/execution/ingestion/engine.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index b6fac02bdee..668412b1c78 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -205,6 +205,8 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { e.log.Info(). Int64("count", unexecuted). + Uint64("last_executed_height", lastExecutedHeight). + Uint64("last_finalized_height", finalizedHeight). Msg("reloading finalized and unexecuted blocks to execution queues...") // log the number of unexecuted blocks From bdd0485e5e2a14352e8a5817c20138c02125031e Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 21:46:45 -0700 Subject: [PATCH 016/105] adjust log level --- engine/execution/ingestion/engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 668412b1c78..2ef6d590c00 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -215,7 +215,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { } count := 0 - for height := lastExecutedHeight + 1; height <= finalizedHeight; height++ { + for height := 7651977; height <= finalizedHeight; height++ { block, err := e.blocks.ByHeight(height) if err != nil { return fmt.Errorf("could not get block by height: %w", err) @@ -269,7 +269,7 @@ func (e *Engine) BlockProcessable(b *flow.Header) { e.log.Fatal().Err(err).Msgf("could not get incorporated block(%v): %v", blockID, err) } - e.log.Debug().Hex("block_id", blockID[:]). + e.log.Info().Hex("block_id", blockID[:]). Uint64("height", b.Height). Msg("handling new block") From d86c30ecdc314e05c293d84c932f15380d0abac4 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 22:02:02 -0700 Subject: [PATCH 017/105] force executing on startup --- engine/execution/ingestion/engine.go | 40 ++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 2ef6d590c00..91daf771a3d 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -8,6 +8,7 @@ import ( "math/rand" "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "go.uber.org/atomic" "github.com/onflow/flow-go/consensus/hotstuff/notifications" @@ -215,7 +216,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { } count := 0 - for height := 7651977; height <= finalizedHeight; height++ { + for height := lastExecutedHeight + 1; height <= finalizedHeight; height++ { block, err := e.blocks.ByHeight(height) if err != nil { return fmt.Errorf("could not get block by height: %w", err) @@ -235,7 +236,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { executionQueues *stdmap.QueuesBackdata, ) error { // adding the block to the queue, - _, added := enqueue(executableBlock, executionQueues) + queue, added := enqueue(executableBlock, executionQueues) if !added { // we started from an empty queue, and added each finalized block to the // queue. Each block should always be added to the queues. @@ -243,6 +244,41 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { return fmt.Errorf("block %v is not added to the queue", blockID) } + // check if a block is executable. + // a block is executable if the following conditions are all true + // 1) the parent state commitment is ready + // 2) the collections for the block payload are ready + // 3) the child block is ready for querying the randomness + + // check if the block's parent has been executed. (we can't execute the block if the parent has + // not been executed yet) + // check if there is a statecommitment for the parent block + parentCommitment, err := e.execState.StateCommitmentByBlockID(e.unit.Ctx(), block.Header.ParentID) + + // if we found the statecommitment for the parent block, then add it to the executable block. + if err == nil { + executableBlock.StartState = parentCommitment + } else if errors.Is(err, storage.ErrNotFound) { + // the parent block is an unexecuted block. + // if the queue only has one block, and its parent doesn't + // exist in the queue, then we need to load the block from the storage. + _, ok := queue.Nodes[blockID] + if !ok { + log.Error().Msgf("an unexecuted parent block is missing in the queue") + } + } else { + // if there is exception, then crash + log.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") + } + + // check if we have all the collections for the block, and request them if there is missing. + err = e.matchOrRequestCollections(executableBlock, blockByCollection) + if err != nil { + return fmt.Errorf("cannot send collection requests: %w", err) + } + + // execute the block if the block is ready to be executed + e.executeBlockIfComplete(executableBlock) return nil }) From e50f7e58c964f10dac9e99e22efefaf77b58f7ab Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 22:42:14 -0700 Subject: [PATCH 018/105] pull until a future height --- engine/execution/ingestion/engine.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 91daf771a3d..eef3c9d601a 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -135,7 +135,7 @@ func New( func (e *Engine) Ready() <-chan struct{} { err := e.loadAllFinalizedAndUnexecutedBlocks() if err != nil { - e.log.Fatal().Err(err).Msg("failed to load all unexecuted blocks") + e.log.Error().Err(err).Msg("failed to load all unexecuted blocks") } return e.unit.Ready() @@ -188,13 +188,14 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { // on nodes startup, we need to load all the unexecuted blocks to the execution queues. func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { - // get finalized height + // // get finalized height header, err := e.state.Final().Head() if err != nil { return fmt.Errorf("could not get finalized block: %w", err) } finalizedHeight := header.Height + futureHeight := 8655590 // get the last executed height lastExecutedHeight, _, err := e.execState.GetHighestExecutedBlockID(e.unit.Ctx()) @@ -216,7 +217,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { } count := 0 - for height := lastExecutedHeight + 1; height <= finalizedHeight; height++ { + for height := lastExecutedHeight + 1; height <= futureHeight; height++ { block, err := e.blocks.ByHeight(height) if err != nil { return fmt.Errorf("could not get block by height: %w", err) From 08c5a634e3f24d8cd7aa22464fe2c66457c09030 Mon Sep 17 00:00:00 2001 From: Tarak Ben Youssef Date: Wed, 14 Oct 2020 22:59:48 -0700 Subject: [PATCH 019/105] refactor the remaining tests --- crypto/bls12381_utils_test.go | 72 +++++++++++---------- crypto/ecdsa_test.go | 55 ++++++++-------- crypto/spock_test.go | 115 +++++++++++++++++++--------------- 3 files changed, 129 insertions(+), 113 deletions(-) diff --git a/crypto/bls12381_utils_test.go b/crypto/bls12381_utils_test.go index 14bd918a29a..b4f28cc678d 100644 --- a/crypto/bls12381_utils_test.go +++ b/crypto/bls12381_utils_test.go @@ -59,40 +59,34 @@ func TestG1(t *testing.T) { } -// G1 bench -func BenchmarkG1(b *testing.B) { +// G1 and G2 scalar multiplication +func BenchmarkScalarMult(b *testing.B) { blsInstance.reInit() seed := make([]byte, securityBits/8) rand.Read(seed) seedRelic(seed) var expo scalar randZr(&expo) - var res pointG1 - - b.ResetTimer() - for i := 0; i < b.N; i++ { - genScalarMultG1(&res, &expo) - } - b.StopTimer() - return -} - -// G2 bench -func BenchmarkG2(b *testing.B) { - blsInstance.reInit() - seed := make([]byte, securityBits/8) - rand.Read(seed) - seedRelic(seed) - var expo scalar - randZr(&expo) - var res pointG2 - b.ResetTimer() - for i := 0; i < b.N; i++ { - genScalarMultG2(&res, &expo) - } - b.StopTimer() - return + // G1 bench + b.Run("G1", func(b *testing.B) { + var res pointG1 + b.ResetTimer() + for i := 0; i < b.N; i++ { + genScalarMultG1(&res, &expo) + } + b.StopTimer() + }) + + // G2 bench + b.Run("G2", func(b *testing.B) { + var res pointG2 + b.ResetTimer() + for i := 0; i < b.N; i++ { + genScalarMultG2(&res, &expo) + } + b.StopTimer() + }) } // Hashing to G1 bench @@ -146,19 +140,23 @@ func TestSubgroupCheckG1(t *testing.T) { rand.Read(seed) seedRelic(seed) - simple := 0 - bowe := 1 // tests for simple membership check - check := checkG1Test(1, simple) // point in G1 - assert.True(t, check) - check = checkG1Test(0, simple) // point in E1\G1 - assert.False(t, check) + t.Run("simple check", func(t *testing.T) { + simple := 0 + check := checkG1Test(1, simple) // point in G1 + assert.True(t, check) + check = checkG1Test(0, simple) // point in E1\G1 + assert.False(t, check) + }) // tests for Bowe membership check - check = checkG1Test(1, bowe) // point in G1 - assert.True(t, check) - check = checkG1Test(0, bowe) // point in E1\G1 - assert.False(t, check) + t.Run("bowe check", func(t *testing.T) { + bowe := 1 + check := checkG1Test(1, bowe) // point in G1 + assert.True(t, check) + check = checkG1Test(0, bowe) // point in E1\G1 + assert.False(t, check) + }) } // G1 membership check bench diff --git a/crypto/ecdsa_test.go b/crypto/ecdsa_test.go index 9c5eacbb824..8910d39b673 100644 --- a/crypto/ecdsa_test.go +++ b/crypto/ecdsa_test.go @@ -156,31 +156,36 @@ func TestScalarMult(t *testing.T) { }, } - for _, test := range genericMultTests { - Px, _ := new(big.Int).SetString(test.Px, 16) - Py, _ := new(big.Int).SetString(test.Py, 16) - k, _ := new(big.Int).SetString(test.k, 16) - Qx, _ := new(big.Int).SetString(test.Qx, 16) - Qy, _ := new(big.Int).SetString(test.Qy, 16) - Rx, Ry := test.curve.ScalarMult(Px, Py, k.Bytes()) - assert.Equal(t, Rx.Cmp(Qx), 0) - assert.Equal(t, Ry.Cmp(Qy), 0) - } - for _, test := range baseMultTests { - k, _ := new(big.Int).SetString(test.k, 16) - Qx, _ := new(big.Int).SetString(test.Qx, 16) - Qy, _ := new(big.Int).SetString(test.Qy, 16) - // base mult - Rx, Ry := test.curve.ScalarBaseMult(k.Bytes()) - assert.Equal(t, Rx.Cmp(Qx), 0) - assert.Equal(t, Ry.Cmp(Qy), 0) - // generic mult with base point - Px := new(big.Int).Set(test.curve.Params().Gx) - Py := new(big.Int).Set(test.curve.Params().Gy) - Rx, Ry = test.curve.ScalarMult(Px, Py, k.Bytes()) - assert.Equal(t, Rx.Cmp(Qx), 0) - assert.Equal(t, Ry.Cmp(Qy), 0) - } + t.Run("scalar mult check", func(t *testing.T) { + for _, test := range genericMultTests { + Px, _ := new(big.Int).SetString(test.Px, 16) + Py, _ := new(big.Int).SetString(test.Py, 16) + k, _ := new(big.Int).SetString(test.k, 16) + Qx, _ := new(big.Int).SetString(test.Qx, 16) + Qy, _ := new(big.Int).SetString(test.Qy, 16) + Rx, Ry := test.curve.ScalarMult(Px, Py, k.Bytes()) + assert.Equal(t, Rx.Cmp(Qx), 0) + assert.Equal(t, Ry.Cmp(Qy), 0) + } + }) + + t.Run("base scalar mult check", func(t *testing.T) { + for _, test := range baseMultTests { + k, _ := new(big.Int).SetString(test.k, 16) + Qx, _ := new(big.Int).SetString(test.Qx, 16) + Qy, _ := new(big.Int).SetString(test.Qy, 16) + // base mult + Rx, Ry := test.curve.ScalarBaseMult(k.Bytes()) + assert.Equal(t, Rx.Cmp(Qx), 0) + assert.Equal(t, Ry.Cmp(Qy), 0) + // generic mult with base point + Px := new(big.Int).Set(test.curve.Params().Gx) + Py := new(big.Int).Set(test.curve.Params().Gy) + Rx, Ry = test.curve.ScalarMult(Px, Py, k.Bytes()) + assert.Equal(t, Rx.Cmp(Qx), 0) + assert.Equal(t, Ry.Cmp(Qy), 0) + } + }) } // ECDSA Proof of Possession test diff --git a/crypto/spock_test.go b/crypto/spock_test.go index 19d353e9202..ebdc036f497 100644 --- a/crypto/spock_test.go +++ b/crypto/spock_test.go @@ -17,41 +17,48 @@ func TestSPOCKProveVerifyAgainstData(t *testing.T) { seed := make([]byte, KeyGenSeedMinLenBLSBLS12381) data := make([]byte, 100) - loops := 1 - for j := 0; j < loops; j++ { - n, err := rand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLenBLSBLS12381) - require.NoError(t, err) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - _, err = rand.Read(data) - require.NoError(t, err) - // generate a SPoCK proof - kmac := NewBLSKMAC("spock test") - s, err := SPOCKProve(sk, data, kmac) - require.NoError(t, err) - pk := sk.PublicKey() - // SPoCK verify against the data (happy path) + n, err := rand.Read(seed) + require.Equal(t, n, KeyGenSeedMinLenBLSBLS12381) + require.NoError(t, err) + sk, err := GeneratePrivateKey(BLSBLS12381, seed) + require.NoError(t, err) + _, err = rand.Read(data) + require.NoError(t, err) + + // generate a SPoCK proof + kmac := NewBLSKMAC("spock test") + s, err := SPOCKProve(sk, data, kmac) + require.NoError(t, err) + pk := sk.PublicKey() + + // SPoCK verify against the data (happy path) + t.Run("correctness check", func(t *testing.T) { result, err := SPOCKVerifyAgainstData(pk, s, data, kmac) require.NoError(t, err) assert.True(t, result, fmt.Sprintf( "Verification should succeed:\n signature:%s\n message:%s\n private key:%s", s, data, sk)) - // test with a different message (unhappy path) + }) + + // test with a different message (unhappy path) + t.Run("invalid message", func(t *testing.T) { data[0] ^= 1 - result, err = SPOCKVerifyAgainstData(pk, s, data, kmac) + result, err := SPOCKVerifyAgainstData(pk, s, data, kmac) require.NoError(t, err) assert.False(t, result, fmt.Sprintf( "Verification should fail:\n signature:%s\n message:%s\n private key:%s", s, data, sk)) data[0] ^= 1 - // test with a valid but different key (unhappy path) + }) + + // test with a valid but different key (unhappy path) + t.Run("invalid key", func(t *testing.T) { seed[0] ^= 1 wrongSk, err := GeneratePrivateKey(BLSBLS12381, seed) require.NoError(t, err) - result, err = SPOCKVerifyAgainstData(wrongSk.PublicKey(), s, data, kmac) + result, err := SPOCKVerifyAgainstData(wrongSk.PublicKey(), s, data, kmac) require.NoError(t, err) assert.False(t, result, fmt.Sprintf( "Verification should fail:\n signature:%s\n message:%s\n private key:%s", s, data, sk)) - } + }) } // tests of happy and unhappy paths of SPOCKVerify @@ -61,56 +68,62 @@ func TestSPOCKProveVerify(t *testing.T) { seed2 := make([]byte, KeyGenSeedMinLenBLSBLS12381) data := make([]byte, 100) - loops := 1 - for j := 0; j < loops; j++ { - // data - _, err := rand.Read(data) - require.NoError(t, err) - // sk1 - n, err := rand.Read(seed1) - require.Equal(t, n, KeyGenSeedMinLenBLSBLS12381) - require.NoError(t, err) - sk1, err := GeneratePrivateKey(BLSBLS12381, seed1) - require.NoError(t, err) - // sk2 - n, err = rand.Read(seed2) - require.Equal(t, n, KeyGenSeedMinLenBLSBLS12381) - require.NoError(t, err) - sk2, err := GeneratePrivateKey(BLSBLS12381, seed2) - require.NoError(t, err) + // data + _, err := rand.Read(data) + require.NoError(t, err) + // sk1 + n, err := rand.Read(seed1) + require.Equal(t, n, KeyGenSeedMinLenBLSBLS12381) + require.NoError(t, err) + sk1, err := GeneratePrivateKey(BLSBLS12381, seed1) + require.NoError(t, err) + // sk2 + n, err = rand.Read(seed2) + require.Equal(t, n, KeyGenSeedMinLenBLSBLS12381) + require.NoError(t, err) + sk2, err := GeneratePrivateKey(BLSBLS12381, seed2) + require.NoError(t, err) - // generate SPoCK proofs - kmac := NewBLSKMAC("spock test") - pr1, err := SPOCKProve(sk1, data, kmac) - require.NoError(t, err) - pr2, err := SPOCKProve(sk2, data, kmac) - require.NoError(t, err) - // SPoCK verify against the data, happy path + // generate SPoCK proofs + kmac := NewBLSKMAC("spock test") + pr1, err := SPOCKProve(sk1, data, kmac) + require.NoError(t, err) + pr2, err := SPOCKProve(sk2, data, kmac) + require.NoError(t, err) + + // SPoCK verify against the data, happy path + t.Run("correctness check", func(t *testing.T) { result, err := SPOCKVerify(sk1.PublicKey(), pr1, sk2.PublicKey(), pr2) require.NoError(t, err) assert.True(t, result, fmt.Sprintf( "Verification should succeed:\n proofs:%s\n %s\n private keys:%s\n %s\n data:%s", pr1, pr2, sk1, sk2, data)) - // test with a different message, verification should fail for proofs - // of different messages. + }) + + // test with a different message, verification should fail for proofs + // of different messages. + t.Run("inconsistent proofs", func(t *testing.T) { data[0] ^= 1 // alter the data pr2bis, err := SPOCKProve(sk2, data, kmac) require.NoError(t, err) - result, err = SPOCKVerify(sk1.PublicKey(), pr1, sk2.PublicKey(), pr2bis) + result, err := SPOCKVerify(sk1.PublicKey(), pr1, sk2.PublicKey(), pr2bis) require.NoError(t, err) assert.False(t, result, fmt.Sprintf( "Verification should fail:\n proofs:%s\n %s\n private keys:%s\n %s \n data:%s", pr1, pr2bis, sk1, sk2, data)) data[0] ^= 1 // restore the data - // test with a different key, verification should fail if the public keys are not - // matching the private keys used to generate the proofs. + }) + + // test with a different key, verification should fail if the public keys are not + // matching the private keys used to generate the proofs. + t.Run("invalid public key", func(t *testing.T) { seed2[0] ^= 1 // alter the seed sk2bis, err := GeneratePrivateKey(BLSBLS12381, seed2) require.NoError(t, err) - result, err = SPOCKVerify(sk1.PublicKey(), pr1, sk2bis.PublicKey(), pr2) + result, err := SPOCKVerify(sk1.PublicKey(), pr1, sk2bis.PublicKey(), pr2) require.NoError(t, err) assert.False(t, result, fmt.Sprintf( "Verification should succeed:\n proofs:%s\n %s\n private keys:%s\n %s \n data:%s", pr1, pr2, sk1, sk2bis, data)) - } + }) } From 0ce8341ecb1d78b27ccce9372ccd87503a5e999e Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 14 Oct 2020 23:31:35 -0700 Subject: [PATCH 020/105] fix linting --- engine/execution/ingestion/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index eef3c9d601a..55b96fb7e99 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -195,7 +195,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { } finalizedHeight := header.Height - futureHeight := 8655590 + futureHeight := uint64(8655590) // get the last executed height lastExecutedHeight, _, err := e.execState.GetHighestExecutedBlockID(e.unit.Ctx()) From d3e2781409bd0da8ff03996bd40b9f86ded2c651 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Fri, 16 Oct 2020 20:23:05 -0700 Subject: [PATCH 021/105] enforce max seal count per block when gathering seals for unfinalized blocks --- module/builder/consensus/builder.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index 4de2381edbc..1565cdd6391 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -313,6 +313,11 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er if len(byBlock) == 0 { break } + // add at most number of seals in a new block proposal + // in order to prevent the block payload from being too big. + if sealCount >= b.cfg.maxSealCount { + break + } if unchained { break } From 744191b7678e42e9b1ec1a7717687d1a835bc562 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Fri, 16 Oct 2020 20:26:43 -0700 Subject: [PATCH 022/105] adding consistency checks: wip --- module/builder/consensus/builder.go | 55 +++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 10 deletions(-) diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index 1565cdd6391..3d410841649 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -232,12 +232,38 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er // roadmap (https://github.com/dapperlabs/flow-go/issues/4872) // create a mapping of block to seal for all seals in our pool - byBlock := make(map[flow.Identifier]*flow.Seal) - for _, seal := range b.sealPool.All() { - byBlock[seal.Seal.BlockID] = seal.Seal + encounteredInconsistentSealsForSameBlock := false + byBlock := make(map[flow.Identifier]*flow.SealContainer) + for _, sealContainer := range b.sealPool.All() { + seal := sealContainer.Seal + if sc2, found := byBlock[seal.BlockID]; found { + if len(sealContainer.ExecutionResult.Chunks) < 1 { + return nil, fmt.Errorf("ExecutionResult without chunks: %v", sealContainer.ExecutionResult.ID()) + } + if len(sc2.ExecutionResult.Chunks) < 1 { + return nil, fmt.Errorf("ExecutionResult without chunks: %v", sc2.ExecutionResult.ID()) + } + // only continue if both seals have same start AND end state: + if !bytes.Equal(sealContainer.Seal.FinalState, sc2.Seal.FinalState) || + !bytes.Equal(sealContainer.ExecutionResult.Chunks[0].StartState, sc2.ExecutionResult.Chunks[0].StartState) { + sc1json, err := json.Marshal(sealContainer) + if err != nil { + return nil, err + } + sc2json, err := json.Marshal(sc2) + if err != nil { + return nil, err + } + + fmt.Printf("ERROR: multiple seals for the same block %v: %s and %s", seal.BlockID, string(sc1json), string(sc2json)) + encounteredInconsistentSealsForSameBlock = true + } + } else { + byBlock[seal.BlockID] = sealContainer + } } - if int(b.sealPool.Size()) > len(byBlock) { - return nil, fmt.Errorf("multiple seals for the same block") + if encounteredInconsistentSealsForSameBlock { + byBlock = make(map[flow.Identifier]*flow.SealContainer) } // get the parent's block seal, which constitutes the beginning of the @@ -261,7 +287,6 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er var seals []*flow.Seal var sealCount uint for height := sealed.Height + 1; height <= finalized; height++ { - if len(byBlock) == 0 { break } @@ -283,10 +308,19 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er break } - seals = append(seals, next) + nextErToBeSealed := next.ExecutionResult + if len(nextErToBeSealed.Chunks) < 1 { + return nil, fmt.Errorf("ExecutionResult without chunks: %v", nextErToBeSealed.ID()) + } + initialState := nextErToBeSealed.Chunks[0].StartState + if !bytes.Equal(initialState, last.FinalState) { + return nil, fmt.Errorf("seal execution states do not connect in finalized") + } + + seals = append(seals, next.Seal) sealCount++ delete(byBlock, blockID) - last = next + last = next.Seal } // NOTE: We should only run the next part in case we did not use up all @@ -327,9 +361,10 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er break } - seals = append(seals, next) + seals = append(seals, next.Seal) + sealCount++ delete(byBlock, pendingID) - last = next + last = next.Seal } b.tracer.FinishSpan(parentID, trace.CONBuildOnCreatePayloadSeals) From 23b0e589dd159e554529da29362565f5d379a21e Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 15 Oct 2020 21:04:54 -0700 Subject: [PATCH 023/105] "fixed" tests --- module/builder/consensus/builder_test.go | 2 +- module/mempool/ejectors/seals.go | 4 ++-- module/mempool/ejectors/seals_test.go | 9 ++++++++- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 7595fb99aab..354f1fe48e3 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -21,7 +21,7 @@ import ( ) func TestConsensusBuilder(t *testing.T) { - suite.Run(t, new(BuilderSuite)) + // suite.Run(t, new(BuilderSuite)) } type BuilderSuite struct { diff --git a/module/mempool/ejectors/seals.go b/module/mempool/ejectors/seals.go index 45cbd7a9e09..ec3c97aecdc 100644 --- a/module/mempool/ejectors/seals.go +++ b/module/mempool/ejectors/seals.go @@ -24,8 +24,8 @@ func (ls *LatestSeal) Eject(entities map[flow.Identifier]flow.Entity) (flow.Iden maxID := flow.ZeroID for sealID, sealEntity := range entities { - seal := sealEntity.(*flow.Seal) - block, err := ls.headers.ByBlockID(seal.BlockID) + sc := sealEntity.(*flow.SealContainer) + block, err := ls.headers.ByBlockID(sc.Seal.BlockID) if err != nil { continue } diff --git a/module/mempool/ejectors/seals_test.go b/module/mempool/ejectors/seals_test.go index 7eefb4a6329..a03f3f9f728 100644 --- a/module/mempool/ejectors/seals_test.go +++ b/module/mempool/ejectors/seals_test.go @@ -38,7 +38,14 @@ func TestLatestSealEjector(t *testing.T) { seal := unittest.SealFixture() seal.BlockID = header.ID() - ok := pool.Add(seal) + + er := unittest.ExecutionResultFixture() + er.BlockID = header.ID() + sc := &flow.SealContainer{ + Seal: seal, + ExecutionResult: er, + } + ok := pool.Add(sc) assert.True(t, ok) if header.Height >= maxHeader.Height { From 94090278f32da6c624d707296f14e33c3310103f Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 19 Oct 2020 12:52:38 -0700 Subject: [PATCH 024/105] enabled tests --- module/builder/consensus/builder_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 354f1fe48e3..7595fb99aab 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -21,7 +21,7 @@ import ( ) func TestConsensusBuilder(t *testing.T) { - // suite.Run(t, new(BuilderSuite)) + suite.Run(t, new(BuilderSuite)) } type BuilderSuite struct { From 228366b96b64aa9015e5479efa017b5f648d6781 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 15 Oct 2020 20:37:50 -0700 Subject: [PATCH 025/105] merged builder matching engine --- engine/consensus/matching/engine.go | 49 +++++++++++++++++++++-------- model/flow/seal.go | 13 ++++++++ module/builder/consensus/builder.go | 1 + module/mempool/seals.go | 6 ++-- module/mempool/stdmap/seals.go | 12 +++---- 5 files changed, 59 insertions(+), 22 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index b1da2306c4b..44439746c2b 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -209,6 +209,7 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece resultFinalState, ok := receipt.ExecutionResult.FinalStateCommitment() if !ok { + log.Error().Msg("execution receipt without FinalStateCommit received") return fmt.Errorf("could not get final state: no chunks found") } @@ -216,19 +217,27 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece log.Info().Msg("execution receipt received") - // check the execution receipt is sent by its executor - if receipt.ExecutorID != originID { - return engine.NewInvalidInputErrorf("invalid origin for receipt (executor: %x, origin: %x)", receipt.ExecutorID, originID) - } + //// check the execution receipt is sent by its executor + //if receipt.ExecutorID != originID { + // return engine.NewInvalidInputErrorf("invalid origin for receipt (executor: %x, origin: %x)", receipt.ExecutorID, originID) + //} // if the receipt is for an unknown block, skip it. It will be re-requested // later. - _, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Head() + head, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Head() if err != nil { log.Debug().Msg("discarding receipt for unknown block") return nil } + sealed, err := e.state.Sealed().Head() + if err != nil { + return fmt.Errorf("could not find sealed block: %w", err) + } + if sealed.Height >= head.Height { + return nil + } + // get the identity of the origin node, so we can check if it's a valid // source for a execution receipt (usually execution nodes) identity, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Identity(originID) @@ -588,6 +597,13 @@ func (e *Engine) sealableResults() ([]*flow.IncorporatedResult, error) { // matchChunk checks that the number of ResultApprovals collected by a chunk // exceeds the required threshold. func (e *Engine) matchChunk(resultID flow.Identifier, chunk *flow.Chunk, assignment *chunks.Assignment) bool { + // skip counting approvals + // TODO: + // * this is only here temporarily to ease the migration to new chunk + // based sealing. + if !e.requireApprovals { + return true + } // get all the chunk approvals from mempool approvals := e.approvals.ByChunk(resultID, chunk.Index) @@ -601,14 +617,6 @@ func (e *Engine) matchChunk(resultID flow.Identifier, chunk *flow.Chunk, assignm } } - // skip counting approvals - // TODO: - // * this is only here temporarily to ease the migration to new chunk - // based sealing. - if !e.requireApprovals { - return true - } - // TODO: // * This is the happy path (requires just one approval per chunk). // * Full protocol should be +2/3 of all currently staked verifiers. @@ -788,6 +796,15 @@ func (e *Engine) requestPending() error { // heights would stop the sealing. missingBlocksOrderedByHeight := make([]flow.Identifier, 0, e.maxUnsealedResults) + // turn mempool into Lookup table: BlockID -> Result + knownResultsMap := make(map[flow.Identifier]struct{}) + for _, r := range e.results.All() { + knownResultsMap[r.BlockID] = struct{}{} + } + for _, sealContainer := range e.seals.All() { + knownResultsMap[sealContainer.Seal.BlockID] = struct{}{} + } + // traverse each unsealed and finalized block with height from low to high, // if the result is missing, then add the blockID to a missing block list in // order to request them. @@ -806,6 +823,12 @@ func (e *Engine) requestPending() error { blockID := header.ID() +if _, ok := knownResultsMap[blockID]; ok { +continue +} +missingBlocksOrderedByHeight = append(missingBlocksOrderedByHeight, blockID) + + // check if we have an execution result for the block at this height _, err = e.resultsDB.ByBlockID(blockID) if errors.Is(err, storage.ErrNotFound) { diff --git a/model/flow/seal.go b/model/flow/seal.go index 9f5ef77b557..cd22a11667b 100644 --- a/model/flow/seal.go +++ b/model/flow/seal.go @@ -41,3 +41,16 @@ func (s Seal) ID() Identifier { func (s Seal) Checksum() Identifier { return MakeID(s) } + +type SealContainer struct { + Seal *Seal + ExecutionResult *ExecutionResult +} + +func (s SealContainer) ID() Identifier { + return s.Seal.ID() +} + +func (s *SealContainer) Checksum() Identifier { + return s.Seal.Checksum() +} diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index 3d410841649..f71d56a49b0 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -3,6 +3,7 @@ package consensus import ( + "bytes" "errors" "fmt" "time" diff --git a/module/mempool/seals.go b/module/mempool/seals.go index e4388661e27..6fb19cc18ed 100644 --- a/module/mempool/seals.go +++ b/module/mempool/seals.go @@ -15,7 +15,7 @@ type Seals interface { // Add will add the given block seal to the memory pool. It will return // false if it was already in the mempool. - Add(seal *flow.Seal) bool + Add(seal *flow.SealContainer) bool // Rem will remove the given block seal from the memory pool; it will // will return true if the block seal was known and removed. @@ -23,7 +23,7 @@ type Seals interface { // ByID retrieve the block seal with the given ID from the memory // pool. It will return false if it was not found in the mempool. - ByID(sealID flow.Identifier) (*flow.Seal, bool) + ByID(sealID flow.Identifier) (*flow.SealContainer, bool) // Size will return the current size of the memory pool. Size() uint @@ -33,7 +33,7 @@ type Seals interface { // All will retrieve all block seals that are currently in the memory pool // as a slice. - All() []*flow.Seal + All() []*flow.SealContainer // Hash will return a fingerprint has representing the contents of the // entire memory pool. diff --git a/module/mempool/stdmap/seals.go b/module/mempool/stdmap/seals.go index 4a3209c62cb..a34ac72697c 100644 --- a/module/mempool/stdmap/seals.go +++ b/module/mempool/stdmap/seals.go @@ -22,7 +22,7 @@ func NewSeals(limit uint, opts ...OptionFunc) (*Seals, error) { } // Add adds an block seal to the mempool. -func (s *Seals) Add(seal *flow.Seal) bool { +func (s *Seals) Add(seal *flow.SealContainer) bool { added := s.Backend.Add(seal) return added } @@ -34,21 +34,21 @@ func (s *Seals) Rem(sealID flow.Identifier) bool { } // ByID returns the block seal with the given ID from the mempool. -func (s *Seals) ByID(sealID flow.Identifier) (*flow.Seal, bool) { +func (s *Seals) ByID(sealID flow.Identifier) (*flow.SealContainer, bool) { entity, exists := s.Backend.ByID(sealID) if !exists { return nil, false } - seal := entity.(*flow.Seal) + seal := entity.(*flow.SealContainer) return seal, true } // All returns all block seals from the pool. -func (s *Seals) All() []*flow.Seal { +func (s *Seals) All() []*flow.SealContainer { entities := s.Backend.All() - seals := make([]*flow.Seal, 0, len(entities)) + seals := make([]*flow.SealContainer, 0, len(entities)) for _, entity := range entities { - seals = append(seals, entity.(*flow.Seal)) + seals = append(seals, entity.(*flow.SealContainer)) } return seals } From 3c158a5e4187f4da52195d091bd29a71e588b586 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 19 Oct 2020 14:07:38 -0700 Subject: [PATCH 026/105] cleanup --- cmd/consensus/main.go | 2 +- model/flow/seal.go | 13 ----- .../ejectors/incorporated_result_seals.go | 5 +- ...t.go => incorporated_result_seals_test.go} | 19 +++---- module/mempool/ejectors/seals.go | 39 -------------- module/mempool/seals.go | 41 -------------- module/mempool/stdmap/seals.go | 54 ------------------- 7 files changed, 14 insertions(+), 159 deletions(-) rename module/mempool/ejectors/{seals_test.go => incorporated_result_seals_test.go} (77%) delete mode 100644 module/mempool/ejectors/seals.go delete mode 100644 module/mempool/seals.go delete mode 100644 module/mempool/stdmap/seals.go diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index d517133664a..3c78b70a3d3 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -187,7 +187,7 @@ func main() { assigner, requireOneApproval, ) - requesterEng.WithHandle(match.HandleReceipt) + requesterEng.WithHandle(match.HandleReceipt) // TODO: use match.Process here (?) to parallelize engines in terms of threading return match, err }). Component("provider engine", func(node *cmd.FlowNodeBuilder) (module.ReadyDoneAware, error) { diff --git a/model/flow/seal.go b/model/flow/seal.go index cd22a11667b..9f5ef77b557 100644 --- a/model/flow/seal.go +++ b/model/flow/seal.go @@ -41,16 +41,3 @@ func (s Seal) ID() Identifier { func (s Seal) Checksum() Identifier { return MakeID(s) } - -type SealContainer struct { - Seal *Seal - ExecutionResult *ExecutionResult -} - -func (s SealContainer) ID() Identifier { - return s.Seal.ID() -} - -func (s *SealContainer) Checksum() Identifier { - return s.Seal.Checksum() -} diff --git a/module/mempool/ejectors/incorporated_result_seals.go b/module/mempool/ejectors/incorporated_result_seals.go index 74055877911..4e8d79c5424 100644 --- a/module/mempool/ejectors/incorporated_result_seals.go +++ b/module/mempool/ejectors/incorporated_result_seals.go @@ -28,9 +28,10 @@ func (ls *LatestIncorporatedResultSeal) Eject(entities map[flow.Identifier]flow. irSeal := entity.(*flow.IncorporatedResultSeal) block, err := ls.headers.ByBlockID(irSeal.Seal.BlockID) if err != nil { - continue + // eject seals first, whose block is not even known (which are presumably newer than any other entity) + return id, entity } - if block.Height > maxHeight { + if block.Height >= maxHeight { maxHeight = block.Height maxID = id } diff --git a/module/mempool/ejectors/seals_test.go b/module/mempool/ejectors/incorporated_result_seals_test.go similarity index 77% rename from module/mempool/ejectors/seals_test.go rename to module/mempool/ejectors/incorporated_result_seals_test.go index a03f3f9f728..a48a7a2bb3f 100644 --- a/module/mempool/ejectors/seals_test.go +++ b/module/mempool/ejectors/incorporated_result_seals_test.go @@ -1,4 +1,4 @@ -package ejectors_test +package ejectors import ( "testing" @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool/ejectors" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" storage "github.com/onflow/flow-go/storage/badger" @@ -20,10 +19,9 @@ func TestLatestSealEjector(t *testing.T) { const N = 10 headers := storage.NewHeaders(metrics.NewNoopCollector(), db) - ejector := ejectors.NewLatestSeal(headers) + ejector := NewLatestIncorporatedResultSeal(headers) - pool, err := stdmap.NewSeals(N, stdmap.WithEject(ejector.Eject)) - require.Nil(t, err) + pool := stdmap.NewIncorporatedResultSeals(N, stdmap.WithEject(ejector.Eject)) var ( maxHeader flow.Header @@ -41,11 +39,14 @@ func TestLatestSealEjector(t *testing.T) { er := unittest.ExecutionResultFixture() er.BlockID = header.ID() - sc := &flow.SealContainer{ - Seal: seal, - ExecutionResult: er, + ir := &flow.IncorporatedResultSeal{ + IncorporatedResult: &flow.IncorporatedResult{ + IncorporatedBlockID: header.ID(), + Result: er, + }, + Seal: seal, } - ok := pool.Add(sc) + ok := pool.Add(ir) assert.True(t, ok) if header.Height >= maxHeader.Height { diff --git a/module/mempool/ejectors/seals.go b/module/mempool/ejectors/seals.go deleted file mode 100644 index ec3c97aecdc..00000000000 --- a/module/mempool/ejectors/seals.go +++ /dev/null @@ -1,39 +0,0 @@ -package ejectors - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -// LatestSeal is a mempool ejector for block seals that ejects newest-first. -// NOTE: should be initialized with its own headers instance with cache size -// equal to the mempool size. -type LatestSeal struct { - headers storage.Headers -} - -func NewLatestSeal(headers storage.Headers) *LatestSeal { - ejector := &LatestSeal{ - headers: headers, - } - return ejector -} - -func (ls *LatestSeal) Eject(entities map[flow.Identifier]flow.Entity) (flow.Identifier, flow.Entity) { - maxHeight := uint64(0) - maxID := flow.ZeroID - - for sealID, sealEntity := range entities { - sc := sealEntity.(*flow.SealContainer) - block, err := ls.headers.ByBlockID(sc.Seal.BlockID) - if err != nil { - continue - } - if block.Height > maxHeight { - maxHeight = block.Height - maxID = sealID - } - } - - return maxID, entities[maxID] -} diff --git a/module/mempool/seals.go b/module/mempool/seals.go deleted file mode 100644 index 6fb19cc18ed..00000000000 --- a/module/mempool/seals.go +++ /dev/null @@ -1,41 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package mempool - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Seals represents a concurrency-safe memory pool for block seals. -type Seals interface { - - // Has checks whether the block seal with the given hash is currently in - // the memory pool. - Has(sealID flow.Identifier) bool - - // Add will add the given block seal to the memory pool. It will return - // false if it was already in the mempool. - Add(seal *flow.SealContainer) bool - - // Rem will remove the given block seal from the memory pool; it will - // will return true if the block seal was known and removed. - Rem(sealID flow.Identifier) bool - - // ByID retrieve the block seal with the given ID from the memory - // pool. It will return false if it was not found in the mempool. - ByID(sealID flow.Identifier) (*flow.SealContainer, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // Limit will return the maximum size of the memory pool - Limit() uint - - // All will retrieve all block seals that are currently in the memory pool - // as a slice. - All() []*flow.SealContainer - - // Hash will return a fingerprint has representing the contents of the - // entire memory pool. - Hash() flow.Identifier -} diff --git a/module/mempool/stdmap/seals.go b/module/mempool/stdmap/seals.go deleted file mode 100644 index a34ac72697c..00000000000 --- a/module/mempool/stdmap/seals.go +++ /dev/null @@ -1,54 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package stdmap - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Seals implements the block seals memory pool of the consensus nodes, -// used to store block seals. -type Seals struct { - *Backend -} - -// NewSeals creates a new memory pool for block seals. -func NewSeals(limit uint, opts ...OptionFunc) (*Seals, error) { - s := &Seals{ - Backend: NewBackend(append(opts, WithLimit(limit))...), - } - - return s, nil -} - -// Add adds an block seal to the mempool. -func (s *Seals) Add(seal *flow.SealContainer) bool { - added := s.Backend.Add(seal) - return added -} - -// Rem will remove a seal by ID. -func (s *Seals) Rem(sealID flow.Identifier) bool { - removed := s.Backend.Rem(sealID) - return removed -} - -// ByID returns the block seal with the given ID from the mempool. -func (s *Seals) ByID(sealID flow.Identifier) (*flow.SealContainer, bool) { - entity, exists := s.Backend.ByID(sealID) - if !exists { - return nil, false - } - seal := entity.(*flow.SealContainer) - return seal, true -} - -// All returns all block seals from the pool. -func (s *Seals) All() []*flow.SealContainer { - entities := s.Backend.All() - seals := make([]*flow.SealContainer, 0, len(entities)) - for _, entity := range entities { - seals = append(seals, entity.(*flow.SealContainer)) - } - return seals -} From 0cef85f136d3e3609cad540094e595785982d221 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 19 Oct 2020 22:44:52 -0700 Subject: [PATCH 027/105] Revisions for concurrency safety of stdmap.IncorporatedResults --- module/mempool/incorporated_results.go | 2 +- module/mempool/stdmap/incorporated_results.go | 74 ++++++++++--------- .../stdmap/incorporated_results_test.go | 12 ++- 3 files changed, 49 insertions(+), 39 deletions(-) diff --git a/module/mempool/incorporated_results.go b/module/mempool/incorporated_results.go index 9e7334dbcdb..a1748a20525 100644 --- a/module/mempool/incorporated_results.go +++ b/module/mempool/incorporated_results.go @@ -14,7 +14,7 @@ type IncorporatedResults interface { // ByResultID returns all the IncorporatedResults that contain a specific // ExecutionResult, indexed by IncorporatedBlockID, along with the // ExecutionResult. - ByResultID(resultID flow.Identifier) (*flow.ExecutionResult, map[flow.Identifier]*flow.IncorporatedResult) + ByResultID(resultID flow.Identifier) (*flow.ExecutionResult, map[flow.Identifier]*flow.IncorporatedResult, bool) // Rem removes an IncorporatedResult from the mempool Rem(incorporatedResult *flow.IncorporatedResult) bool diff --git a/module/mempool/stdmap/incorporated_results.go b/module/mempool/stdmap/incorporated_results.go index f6af896b118..c363b7029d7 100644 --- a/module/mempool/stdmap/incorporated_results.go +++ b/module/mempool/stdmap/incorporated_results.go @@ -1,17 +1,18 @@ package stdmap import ( + "errors" "fmt" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mempool/model" + "github.com/onflow/flow-go/storage" ) // IncorporatedResults implements the incorporated results memory pool of the // consensus nodes, used to store results that need to be sealed. type IncorporatedResults struct { *Backend - size uint } // NewIncorporatedResults creates a mempool for the incorporated results. @@ -36,6 +37,12 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) // no record with key is available in the mempool, initialise // incResults. incResults = make(map[flow.Identifier]*flow.IncorporatedResult) + + // adds the new incorporated results map associated with key to mempool + backdata[key] = model.IncorporatedResultMap{ + ExecutionResult: incorporatedResult.Result, + IncorporatedResults: incResults, + } } else { incorporatedResultMap, ok := entity.(model.IncorporatedResultMap) if !ok { @@ -49,23 +56,11 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) // incorporated block. return nil } - - // removes map entry associated with key for update - delete(backdata, key) } // appends incorporated result to the map incResults[incorporatedResult.IncorporatedBlockID] = incorporatedResult - - // adds the new incorporated results map associated with key to mempool - incorporatedResultMap := model.IncorporatedResultMap{ - ExecutionResult: incorporatedResult.Result, - IncorporatedResults: incResults, - } - - backdata[key] = incorporatedResultMap appended = true - ir.size++ return nil }) @@ -75,34 +70,46 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) // All returns all the items in the mempool. func (ir *IncorporatedResults) All() []*flow.IncorporatedResult { res := make([]*flow.IncorporatedResult, 0) - - entities := ir.Backend.All() - for _, entity := range entities { - irMap, _ := entity.(model.IncorporatedResultMap) - - for _, ir := range irMap.IncorporatedResults { - res = append(res, ir) + ir.Backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + for _, entity := range backdata { + irMap, _ := entity.(model.IncorporatedResultMap) + for _, ir := range irMap.IncorporatedResults { + res = append(res, ir) + } } - } - + return nil + }) return res } // ByResultID returns all the IncorporatedResults that contain a specific // ExecutionResult, indexed by IncorporatedBlockID. -func (ir *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.ExecutionResult, map[flow.Identifier]*flow.IncorporatedResult) { - - entity, exists := ir.Backend.ByID(resultID) - if !exists { - return nil, nil - } +func (ir *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.ExecutionResult, map[flow.Identifier]*flow.IncorporatedResult, bool) { - irMap, ok := entity.(model.IncorporatedResultMap) - if !ok { - return nil, nil + // To guarantee concurrency safety, we need to copy the map in via a locked operation in the backend. + // Otherwise, another routine might concurrently modify the map stored for the Execution Result. + var result *flow.ExecutionResult + incResults := make(map[flow.Identifier]*flow.IncorporatedResult) + err := ir.Backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + entity, exists := backdata[resultID] + if !exists { + return storage.ErrNotFound + } + irMap := entity.(model.IncorporatedResultMap) + result = irMap.ExecutionResult + for i, res := range irMap.IncorporatedResults { + incResults[i] = res + } + return nil + }) + if errors.Is(err, storage.ErrNotFound) { + return nil, nil, false + } else if err != nil { + // should never happen: above method can only return storage.ErrNotFound + panic("Internal Error in IncorporatedResults mempool: unexpected backend error") } - return irMap.ExecutionResult, irMap.IncorporatedResults + return result, incResults, true } // Rem removes an IncorporatedResult from the mempool. @@ -148,7 +155,6 @@ func (ir *IncorporatedResults) Rem(incorporatedResult *flow.IncorporatedResult) } removed = true - ir.size-- return nil }) @@ -157,5 +163,5 @@ func (ir *IncorporatedResults) Rem(incorporatedResult *flow.IncorporatedResult) // Size returns the number of incorporated results in the mempool. func (ir *IncorporatedResults) Size() uint { - return ir.size + return ir.Backend.Size() } diff --git a/module/mempool/stdmap/incorporated_results_test.go b/module/mempool/stdmap/incorporated_results_test.go index d78b5d041d6..94712c36cbc 100644 --- a/module/mempool/stdmap/incorporated_results_test.go +++ b/module/mempool/stdmap/incorporated_results_test.go @@ -20,7 +20,8 @@ func TestIncorporatedResults(t *testing.T) { require.NoError(t, err) // check the existence of incorporated result - res, incorporatedResults := pool.ByResultID(ir1.Result.ID()) + res, incorporatedResults, found := pool.ByResultID(ir1.Result.ID()) + require.True(t, found) require.Equal(t, ir1.Result, res) require.Contains(t, incorporatedResults, ir1.IncorporatedBlockID) }) @@ -35,7 +36,8 @@ func TestIncorporatedResults(t *testing.T) { require.NoError(t, err) // check the existence of incorporated result - res, incorporatedResults := pool.ByResultID(ir2.Result.ID()) + res, incorporatedResults, found := pool.ByResultID(ir2.Result.ID()) + require.True(t, found) require.Equal(t, ir1.Result, res) require.Contains(t, incorporatedResults, ir1.IncorporatedBlockID) require.Contains(t, incorporatedResults, ir2.IncorporatedBlockID) @@ -48,7 +50,8 @@ func TestIncorporatedResults(t *testing.T) { require.NoError(t, err) // check the existence of incorporated result - res, incorporatedResults := pool.ByResultID(ir3.Result.ID()) + res, incorporatedResults, found := pool.ByResultID(ir3.Result.ID()) + require.True(t, found) require.Equal(t, ir3.Result, res) require.Contains(t, incorporatedResults, ir3.IncorporatedBlockID) }) @@ -64,7 +67,8 @@ func TestIncorporatedResults(t *testing.T) { ok := pool.Rem(ir1) require.True(t, ok) - res, incorporatedResults := pool.ByResultID(ir1.Result.ID()) + res, incorporatedResults, found := pool.ByResultID(ir1.Result.ID()) + require.True(t, found) require.Equal(t, ir1.Result, res) require.Contains(t, incorporatedResults, ir2.IncorporatedBlockID) }) From 3494c41ee105ed8e4ee90b7f1e0dffbf49ca0f17 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 19 Oct 2020 22:45:08 -0700 Subject: [PATCH 028/105] wip --- engine/consensus/matching/engine.go | 88 +++++++++++++---------------- 1 file changed, 39 insertions(+), 49 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 44439746c2b..0bd15f38e48 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -209,19 +209,12 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece resultFinalState, ok := receipt.ExecutionResult.FinalStateCommitment() if !ok { - log.Error().Msg("execution receipt without FinalStateCommit received") - return fmt.Errorf("could not get final state: no chunks found") + log.Error().Msg("execution receipt without FinalStateCommit received") + return fmt.Errorf("failed to get final state commitment from Execution Result") } - log = log.With().Hex("final_state", resultFinalState).Logger() - log.Info().Msg("execution receipt received") - //// check the execution receipt is sent by its executor - //if receipt.ExecutorID != originID { - // return engine.NewInvalidInputErrorf("invalid origin for receipt (executor: %x, origin: %x)", receipt.ExecutorID, originID) - //} - // if the receipt is for an unknown block, skip it. It will be re-requested // later. head, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Head() @@ -230,11 +223,14 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return nil } + // if Execution Receipt is for block whose height is lower or equal to already sealed height + // => drop Receipt sealed, err := e.state.Sealed().Head() if err != nil { return fmt.Errorf("could not find sealed block: %w", err) } if sealed.Height >= head.Height { + log.Debug().Msg("discarding receipt for already sealed and finalized block height") return nil } @@ -260,17 +256,18 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return engine.NewInvalidInputErrorf("executor has zero stake (%x)", identity.NodeID) } - // check if the result of this receipt is already sealed. result := &receipt.ExecutionResult - _, err = e.resultsDB.ByID(result.ID()) - if err == nil { - log.Debug().Msg("discarding receipt for sealed result") - return nil - } - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("could not check result: %w", err) + // store the result to make it persistent for later + err = e.resultsDB.Store(result) // internally de-duplicates + if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { + return fmt.Errorf("could not store sealing result: %w", err) } + // Do NOT return here if resultsDB already contained result! + // resultsDB is persistent storage while Mempools are in-memory only. + // After a crash, the replica still needs to be able to generate a seal + // for an Result even if it had stored the Result before the crash. + // Otherwise, liveness of sealing is undermined. // store the result belonging to the receipt in the memory pool // TODO: This is a temporary step. In future, the incorporated results @@ -408,10 +405,14 @@ func (e *Engine) checkSealing() { // don't overflow the seal mempool space := e.seals.Limit() - e.seals.Size() if len(sealableResults) > int(space) { - e.log.Debug(). + e.log.Warn(). Int("space", int(space)). Int("results", len(sealableResults)). - Msg("cut and return the first x results") + Msg("discarding sealable results due to mempool limitations") + // TODO: dangerous operation potentially undermining sealing liveness + // If we are missing an early seal, we might not add it to the mempool here due to + // size restrictions. (sealable results are unordered) The seal mempool has + // a eject-newest seal policy which we are shortcutting here! sealableResults = sealableResults[:space] } @@ -512,19 +513,18 @@ func (e *Engine) sealableResults() ([]*flow.IncorporatedResult, error) { return nil, fmt.Errorf("could not retrieve block: %w", err) } - // look for previous result in mempool and storage + // Retrieve parent result / skip if parent result still unknown: + // Before we store a result into the incorporatedResults mempool, we store it in resultsDB. + // I.e. resultsDB contains a superset of all results stored in the mempool. Hence, we only need + // to check resultsDB. Any result not in resultsDB cannot be in incorporatedResults mempool. previousID := incorporatedResult.Result.PreviousResultID - previous, _ := e.incorporatedResults.ByResultID(previousID) - if previous == nil { - var err error - previous, err = e.resultsDB.ByID(previousID) - if errors.Is(err, storage.ErrNotFound) { - log.Debug().Msg("skipping sealable result with unknown previous result") - continue - } - if err != nil { - return nil, fmt.Errorf("could not get previous result: %w", err) - } + previous, err := e.resultsDB.ByID(previousID) + if errors.Is(err, storage.ErrNotFound) { + log.Debug().Msg("skipping sealable result with unknown previous result") + continue + } + if err != nil { + return nil, fmt.Errorf("could not get previous result: %w", err) } // check sub-graph @@ -542,16 +542,11 @@ func (e *Engine) sealableResults() ([]*flow.IncorporatedResult, error) { // number of guarantees plus one; this will ensure the execution receipt // cannot lie about having less chunks and having the remaining ones // approved - requiredChunks := 0 - index, err := e.indexDB.ByBlockID(incorporatedResult.Result.BlockID) if err != nil { return nil, err } - - if index != nil { - requiredChunks = len(index.CollectionIDs) + 1 - } + requiredChunks := len(index.CollectionIDs) + 1 if len(incorporatedResult.Result.Chunks) != requiredChunks { _ = e.incorporatedResults.Rem(incorporatedResult) @@ -610,7 +605,7 @@ func (e *Engine) matchChunk(resultID flow.Identifier, chunk *flow.Chunk, assignm // only keep approvals from assigned verifiers var validApprovers flow.IdentifierList - for approverID := range approvals { + for approverID, _ := range approvals { ok := chmodule.IsValidVerifer(assignment, chunk, approverID) if ok { validApprovers = append(validApprovers, approverID) @@ -632,10 +627,6 @@ func (e *Engine) sealResult(incorporatedResult *flow.IncorporatedResult) error { if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { return fmt.Errorf("could not store sealing result: %w", err) } - err = e.resultsDB.Index(incorporatedResult.Result.BlockID, incorporatedResult.Result.ID()) - if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { - return fmt.Errorf("could not index sealing result: %w", err) - } // collect aggregate signatures aggregatedSigs := e.collectAggregateSignatures(incorporatedResult.Result) @@ -643,7 +634,7 @@ func (e *Engine) sealResult(incorporatedResult *flow.IncorporatedResult) error { // get final state of execution result finalState, ok := incorporatedResult.Result.FinalStateCommitment() if !ok { - return fmt.Errorf("could not get final state: no chunks found") + return fmt.Errorf("failed to get final state commitment from Execution Result") } // generate & store seal @@ -823,14 +814,13 @@ func (e *Engine) requestPending() error { blockID := header.ID() -if _, ok := knownResultsMap[blockID]; ok { -continue -} -missingBlocksOrderedByHeight = append(missingBlocksOrderedByHeight, blockID) - + if _, ok := knownResultsMap[blockID]; ok { + continue + } + missingBlocksOrderedByHeight = append(missingBlocksOrderedByHeight, blockID) // check if we have an execution result for the block at this height - _, err = e.resultsDB.ByBlockID(blockID) + _, err = e.resultsDB.ByBlockID(blockID) !! if errors.Is(err, storage.ErrNotFound) { missingBlocksOrderedByHeight = append(missingBlocksOrderedByHeight, blockID) continue From 125db6a23b48ce4e4313c6de4ddcc8eb695371f9 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 22 Oct 2020 17:43:53 -0700 Subject: [PATCH 029/105] fixed IncorporatedResults mempool --- module/mempool/stdmap/incorporated_results.go | 102 ++++++++++-------- 1 file changed, 59 insertions(+), 43 deletions(-) diff --git a/module/mempool/stdmap/incorporated_results.go b/module/mempool/stdmap/incorporated_results.go index 76affa827e2..b807f74ef2f 100644 --- a/module/mempool/stdmap/incorporated_results.go +++ b/module/mempool/stdmap/incorporated_results.go @@ -41,11 +41,9 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) entity, ok := backdata[key] if !ok { - // no record with key is available in the mempool, initialise - // incResults. + // no record with key is available in the mempool, initialise incResults. incResults = make(map[flow.Identifier]*flow.IncorporatedResult) - - // adds the new incorporated results map associated with key to mempool + // add the new incorporated results map associated with key to mempool backdata[key] = model.IncorporatedResultMap{ ExecutionResult: incorporatedResult.Result, IncorporatedResults: incResults, @@ -57,10 +55,8 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) } incResults = incorporatedResultMap.IncorporatedResults - if _, ok := incResults[incorporatedResult.IncorporatedBlockID]; ok { - // incorporated result is already associated with result and - // incorporated block. + // incorporated result is already associated with result and incorporated block. return nil } } @@ -71,39 +67,61 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) *ir.size++ return nil }) + if err != nil { + // The current implementation never reaches this path, as it only stores + // IncorporatedResultMap as entities in the mempool. Reaching this error + // condition implies this code was inconsistently modified. + panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) + } return appended, err } // All returns all the items in the mempool. func (ir *IncorporatedResults) All() []*flow.IncorporatedResult { + // To guarantee concurrency safety, we need to copy the map via a locked operation in the backend. + // Otherwise, another routine might concurrently modify the maps stored as mempool entities. res := make([]*flow.IncorporatedResult, 0) - ir.Backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + err := ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { for _, entity := range backdata { - irMap, _ := entity.(model.IncorporatedResultMap) + irMap, ok := entity.(model.IncorporatedResultMap) + if !ok { + // should never happen: as the mempoo + return fmt.Errorf("unexpected entity type %T", entity) + } for _, ir := range irMap.IncorporatedResults { res = append(res, ir) } } return nil }) + if err != nil { + // The current implementation never reaches this path, as it only stores + // IncorporatedResultMap as entities in the mempool. Reaching this error + // condition implies this code was inconsistently modified. + panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) + } + return res } // ByResultID returns all the IncorporatedResults that contain a specific // ExecutionResult, indexed by IncorporatedBlockID. func (ir *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.ExecutionResult, map[flow.Identifier]*flow.IncorporatedResult, bool) { - - // To guarantee concurrency safety, we need to copy the map in via a locked operation in the backend. - // Otherwise, another routine might concurrently modify the map stored for the Execution Result. + // To guarantee concurrency safety, we need to copy the map via a locked operation in the backend. + // Otherwise, another routine might concurrently modify the map stored for the same resultID. var result *flow.ExecutionResult incResults := make(map[flow.Identifier]*flow.IncorporatedResult) - err := ir.Backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + err := ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { entity, exists := backdata[resultID] if !exists { return storage.ErrNotFound } - irMap := entity.(model.IncorporatedResultMap) + irMap, ok := entity.(model.IncorporatedResultMap) + if !ok { + // should never happen: as the mempoo + return fmt.Errorf("unexpected entity type %T", entity) + } result = irMap.ExecutionResult for i, res := range irMap.IncorporatedResults { incResults[i] = res @@ -113,8 +131,10 @@ func (ir *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.Execu if errors.Is(err, storage.ErrNotFound) { return nil, nil, false } else if err != nil { - // should never happen: above method can only return storage.ErrNotFound - panic("Internal Error in IncorporatedResults mempool: unexpected backend error") + // The current implementation never reaches this path, as it only stores + // IncorporatedResultMap as entities in the mempool. Reaching this error + // condition implies this code was inconsistently modified. + panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) } return result, incResults, true @@ -125,47 +145,43 @@ func (ir *IncorporatedResults) Rem(incorporatedResult *flow.IncorporatedResult) key := incorporatedResult.Result.ID() removed := false - _ = ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + err := ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { var incResults map[flow.Identifier]*flow.IncorporatedResult entity, ok := backdata[key] if !ok { // there are no items for this result return nil - } else { - incorporatedResultMap, ok := entity.(model.IncorporatedResultMap) - if !ok { - return fmt.Errorf("could not assert entity to IncorporatedResultMap") - } - - incResults = incorporatedResultMap.IncorporatedResults - - if _, ok := incResults[incorporatedResult.IncorporatedBlockID]; !ok { - // there are no items for this IncorporatedBlockID - return nil - } - - // removes map entry associated with key for update - delete(backdata, key) + } + incorporatedResultMap, ok := entity.(model.IncorporatedResultMap) + if !ok { + return fmt.Errorf("unexpected entity type %T", entity) } - // remove item from map - delete(incResults, incorporatedResult.IncorporatedBlockID) - - if len(incResults) > 0 { - // adds the new incorporated results map associated with key to mempool - incorporatedResultMap := model.IncorporatedResultMap{ - ExecutionResult: incorporatedResult.Result, - IncorporatedResults: incResults, - } - - backdata[key] = incorporatedResultMap + incResults = incorporatedResultMap.IncorporatedResults + if _, ok := incResults[incorporatedResult.IncorporatedBlockID]; !ok { + // there are no items for this IncorporatedBlockID + return nil + } + if len(incResults) == 1 { + // special case: there is only a single Incorporated result stored for this Result.ID() + // => remove entire map + delete(backdata, key) + } else { + // remove item from map + delete(incResults, incorporatedResult.IncorporatedBlockID) } removed = true *ir.size-- return nil }) + if err != nil { + // The current implementation never reaches this path, as it only stores + // IncorporatedResultMap as entities in the mempool. Reaching this error + // condition implies this code was inconsistently modified. + panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) + } return removed } From 96598e8d76936612df95d73f5d1453c69da12878 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 22 Oct 2020 21:13:17 -0700 Subject: [PATCH 030/105] wip --- module/mempool/stdmap/approvals.go | 28 ++++++------------- module/mempool/stdmap/incorporated_results.go | 14 ++++++---- 2 files changed, 17 insertions(+), 25 deletions(-) diff --git a/module/mempool/stdmap/approvals.go b/module/mempool/stdmap/approvals.go index 8ab992a0df0..0def212e1ad 100644 --- a/module/mempool/stdmap/approvals.go +++ b/module/mempool/stdmap/approvals.go @@ -67,39 +67,30 @@ func (a *Approvals) Add(approval *flow.ResultApproval) (bool, error) { entity, ok := backdata[chunkKey] if !ok { - // no record with key is available in the mempool, initialise - // chunkApprovals. + // no record with key is available in the mempool, initialise chunkApprovals. chunkApprovals = make(map[flow.Identifier]*flow.ResultApproval) + backdata[chunkKey] = model.ApprovalMapEntity{ + ChunkKey: chunkKey, + ResultID: approval.Body.ExecutionResultID, + ChunkIndex: approval.Body.ChunkIndex, + Approvals: chunkApprovals, + } } else { approvalMapEntity, ok := entity.(model.ApprovalMapEntity) if !ok { - return fmt.Errorf("could not assert entity to ApprovalMapEntity") + return fmt.Errorf("unexpected entity type %T", entity) } chunkApprovals = approvalMapEntity.Approvals - if _, ok := chunkApprovals[approval.Body.ApproverID]; ok { // approval is already associated with the chunk key and // approver, no need to append return nil } - - // removes map entry associated with key for update - delete(backdata, chunkKey) } // appends approval to the map chunkApprovals[approval.Body.ApproverID] = approval - - // adds the new approvals map associated with key to mempool - approvalMapEntity := model.ApprovalMapEntity{ - ChunkKey: chunkKey, - ResultID: approval.Body.ExecutionResultID, - ChunkIndex: approval.Body.ChunkIndex, - Approvals: chunkApprovals, - } - - backdata[chunkKey] = approvalMapEntity appended = true *a.size++ return nil @@ -115,7 +106,6 @@ func (a *Approvals) RemApproval(approval *flow.ResultApproval) (bool, error) { removed := false err := a.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { - var chunkApprovals map[flow.Identifier]*flow.ResultApproval entity, ok := backdata[chunkKey] @@ -125,7 +115,7 @@ func (a *Approvals) RemApproval(approval *flow.ResultApproval) (bool, error) { } else { approvalMapEntity, ok := entity.(model.ApprovalMapEntity) if !ok { - return fmt.Errorf("could not assert entity to ApprovalMapEntity") + return fmt.Errorf("unexpected entity type %T", entity) } chunkApprovals = approvalMapEntity.Approvals diff --git a/module/mempool/stdmap/incorporated_results.go b/module/mempool/stdmap/incorporated_results.go index b807f74ef2f..1b33908ee76 100644 --- a/module/mempool/stdmap/incorporated_results.go +++ b/module/mempool/stdmap/incorporated_results.go @@ -30,7 +30,7 @@ func NewIncorporatedResults(limit uint) *IncorporatedResults { } // Add adds an IncorporatedResult to the mempool. -func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) (bool, error) { +func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) bool { key := incorporatedResult.Result.ID() @@ -41,9 +41,10 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) entity, ok := backdata[key] if !ok { - // no record with key is available in the mempool, initialise incResults. + // no record with key is available in the mempool, + // initialise incResults. incResults = make(map[flow.Identifier]*flow.IncorporatedResult) - // add the new incorporated results map associated with key to mempool + // add the new map to mempool for holding all incorporated results for the same result.ID backdata[key] = model.IncorporatedResultMap{ ExecutionResult: incorporatedResult.Result, IncorporatedResults: incResults, @@ -51,12 +52,13 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) } else { incorporatedResultMap, ok := entity.(model.IncorporatedResultMap) if !ok { - return fmt.Errorf("could not assert entity to IncorporatedResultMap") + return fmt.Errorf("unexpected entity type %T", entity) } incResults = incorporatedResultMap.IncorporatedResults if _, ok := incResults[incorporatedResult.IncorporatedBlockID]; ok { - // incorporated result is already associated with result and incorporated block. + // incorporated result is already associated with result and + // incorporated block. return nil } } @@ -74,7 +76,7 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) } - return appended, err + return appended } // All returns all the items in the mempool. From abc23fa8d2b00d03d41e66b908abd4b73951e6eb Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 22 Oct 2020 21:52:43 -0700 Subject: [PATCH 031/105] fixed concurrency safety of stdmap.IApprovals and stdmap.IncorporatedResults mempools --- module/mempool/stdmap/approvals.go | 103 ++++++++++-------- module/mempool/stdmap/approvals_test.go | 3 +- module/mempool/stdmap/incorporated_results.go | 23 ++-- 3 files changed, 74 insertions(+), 55 deletions(-) diff --git a/module/mempool/stdmap/approvals.go b/module/mempool/stdmap/approvals.go index 0def212e1ad..606988cb9d5 100644 --- a/module/mempool/stdmap/approvals.go +++ b/module/mempool/stdmap/approvals.go @@ -112,36 +112,24 @@ func (a *Approvals) RemApproval(approval *flow.ResultApproval) (bool, error) { if !ok { // no approvals for this chunk return nil - } else { - approvalMapEntity, ok := entity.(model.ApprovalMapEntity) - if !ok { - return fmt.Errorf("unexpected entity type %T", entity) - } - - chunkApprovals = approvalMapEntity.Approvals - - if _, ok := chunkApprovals[approval.Body.ApproverID]; !ok { - // no approval for this chunk and approver - return nil - } - - // removes map entry associated with key for update - delete(backdata, chunkKey) + } + approvalMapEntity, ok := entity.(model.ApprovalMapEntity) + if !ok { + return fmt.Errorf("unexpected entity type %T", entity) } - // delete the approval to the map - delete(chunkApprovals, approval.Body.ApproverID) - - if len(chunkApprovals) > 0 { - // adds the new approvals map associated with key to mempool - approvalMapEntity := model.ApprovalMapEntity{ - ChunkKey: chunkKey, - ResultID: approval.Body.ExecutionResultID, - ChunkIndex: approval.Body.ChunkIndex, - Approvals: chunkApprovals, - } - - backdata[chunkKey] = approvalMapEntity + chunkApprovals = approvalMapEntity.Approvals + if _, ok := chunkApprovals[approval.Body.ApproverID]; !ok { + // no approval for this chunk and approver + return nil + } + if len(chunkApprovals) == 1 { + // special case: there is only a single approval stored for this chunkKey + // => remove entire map with all approvals for this chunk + delete(backdata, chunkKey) + } else { + // remove item from map + delete(chunkApprovals, approval.Body.ApproverID) } removed = true @@ -165,7 +153,7 @@ func (a *Approvals) RemChunk(resultID flow.Identifier, chunkIndex uint64) (bool, approvalMapEntity, ok := entity.(model.ApprovalMapEntity) if !ok { - return fmt.Errorf("could not assert entity to ApprovalMapEntity") + return fmt.Errorf("unexpected entity type %T", entity) } *a.size = *a.size - uint(len(approvalMapEntity.Approvals)) @@ -185,30 +173,55 @@ func (a *Approvals) ByChunk(resultID flow.Identifier, chunkIndex uint64) map[flo // determine the lookup key for the corresponding chunk chunkKey := key(resultID, chunkIndex) - entity, exists := a.backend.ByID(chunkKey) - if !exists { - return nil - } - - approvalMapEntity, ok := entity.(model.ApprovalMapEntity) - if !ok { + // To guarantee concurrency safety, we need to copy the map via a locked operation in the backend. + // Otherwise, another routine might concurrently modify the map stored for the same resultID. + approvals := make(map[flow.Identifier]*flow.ResultApproval) + err := a.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + entity, exists := backdata[chunkKey] + if !exists { + return nil + } + approvalMapEntity, ok := entity.(model.ApprovalMapEntity) + if !ok { + return fmt.Errorf("unexpected entity type %T", entity) + } + for i, app := range approvalMapEntity.Approvals { + approvals[i] = app + } return nil + }) + if err != nil { + // The current implementation never reaches this path, as it only stores + // ApprovalMapEntity as entities in the mempool. Reaching this error + // condition implies this code was inconsistently modified. + panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) } - return approvalMapEntity.Approvals + return approvals } // All will return all approvals in the memory pool. func (a *Approvals) All() []*flow.ResultApproval { res := make([]*flow.ResultApproval, 0) - entities := a.backend.All() - for _, entity := range entities { - approvalMapEntity, _ := entity.(model.ApprovalMapEntity) - - for _, approval := range approvalMapEntity.Approvals { - res = append(res, approval) + err := a.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + for _, entity := range backdata { + approvalMapEntity, ok := entity.(model.ApprovalMapEntity) + if !ok { + // should never happen: as the mempool only stores ApprovalMapEntity + return fmt.Errorf("unexpected entity type %T", entity) + } + for _, approval := range approvalMapEntity.Approvals { + res = append(res, approval) + } } + return nil + }) + if err != nil { + // The current implementation never reaches this path, as it only stores + // ApprovalMapEntity as entities in the mempool. Reaching this error + // condition implies this code was inconsistently modified. + panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) } return res @@ -216,5 +229,9 @@ func (a *Approvals) All() []*flow.ResultApproval { // Size returns the number of approvals in the mempool. func (a *Approvals) Size() uint { + // To guarantee concurrency safety, i.e. that the read retrieves the latest size value, + // we need run utilize the backend lock. + a.backend.RLock() + defer a.backend.RUnlock() return *a.size } diff --git a/module/mempool/stdmap/approvals_test.go b/module/mempool/stdmap/approvals_test.go index 989775f480d..51c1fdfbd87 100644 --- a/module/mempool/stdmap/approvals_test.go +++ b/module/mempool/stdmap/approvals_test.go @@ -3,10 +3,10 @@ package stdmap import ( "testing" + "github.com/onflow/flow-go/model/flow" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -22,6 +22,7 @@ func TestApprovals(t *testing.T) { ok, err := approvalPL.Add(approval1) require.True(t, ok) require.NoError(t, err) + println("Foo") // checks the existence of approval for key approvals := approvalPL.ByChunk(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex) diff --git a/module/mempool/stdmap/incorporated_results.go b/module/mempool/stdmap/incorporated_results.go index 1b33908ee76..743d607f5f9 100644 --- a/module/mempool/stdmap/incorporated_results.go +++ b/module/mempool/stdmap/incorporated_results.go @@ -30,7 +30,7 @@ func NewIncorporatedResults(limit uint) *IncorporatedResults { } // Add adds an IncorporatedResult to the mempool. -func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) bool { +func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) (bool, error) { key := incorporatedResult.Result.ID() @@ -41,8 +41,8 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) entity, ok := backdata[key] if !ok { - // no record with key is available in the mempool, - // initialise incResults. + // no record with key is available in the mempool, initialise + // incResults. incResults = make(map[flow.Identifier]*flow.IncorporatedResult) // add the new map to mempool for holding all incorporated results for the same result.ID backdata[key] = model.IncorporatedResultMap{ @@ -69,14 +69,8 @@ func (ir *IncorporatedResults) Add(incorporatedResult *flow.IncorporatedResult) *ir.size++ return nil }) - if err != nil { - // The current implementation never reaches this path, as it only stores - // IncorporatedResultMap as entities in the mempool. Reaching this error - // condition implies this code was inconsistently modified. - panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) - } - return appended + return appended, err } // All returns all the items in the mempool. @@ -88,7 +82,7 @@ func (ir *IncorporatedResults) All() []*flow.IncorporatedResult { for _, entity := range backdata { irMap, ok := entity.(model.IncorporatedResultMap) if !ok { - // should never happen: as the mempoo + // should never happen: as the mempool only stores IncorporatedResultMap return fmt.Errorf("unexpected entity type %T", entity) } for _, ir := range irMap.IncorporatedResults { @@ -190,5 +184,12 @@ func (ir *IncorporatedResults) Rem(incorporatedResult *flow.IncorporatedResult) // Size returns the number of incorporated results in the mempool. func (ir *IncorporatedResults) Size() uint { + // To guarantee concurrency safety, i.e. that the read retrieves the latest size value, + // we need run the read through a locked operation in the backend. + // To guarantee concurrency safety, i.e. that the read retrieves the latest size value, + // we need run utilize the backend lock. + ir.backend.RLock() + defer ir.backend.RUnlock() return *ir.size + } From 050ad336131a14fa35af846df5f3464e5d35bd8b Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 22 Oct 2020 22:07:29 -0700 Subject: [PATCH 032/105] some more polishing of stdmap.IApprovals and stdmap.IncorporatedResults mempools including SizeEjector --- module/mempool/stdmap/approvals.go | 2 +- module/mempool/stdmap/eject.go | 14 ++++++++++---- module/mempool/stdmap/incorporated_results.go | 3 +-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/module/mempool/stdmap/approvals.go b/module/mempool/stdmap/approvals.go index 606988cb9d5..8d47b067910 100644 --- a/module/mempool/stdmap/approvals.go +++ b/module/mempool/stdmap/approvals.go @@ -230,7 +230,7 @@ func (a *Approvals) All() []*flow.ResultApproval { // Size returns the number of approvals in the mempool. func (a *Approvals) Size() uint { // To guarantee concurrency safety, i.e. that the read retrieves the latest size value, - // we need run utilize the backend lock. + // we need run utilize the backend's lock. a.backend.RLock() defer a.backend.RUnlock() return *a.size diff --git a/module/mempool/stdmap/eject.go b/module/mempool/stdmap/eject.go index 16af7eacef3..82b1e552c95 100644 --- a/module/mempool/stdmap/eject.go +++ b/module/mempool/stdmap/eject.go @@ -126,12 +126,21 @@ func (q *LRUEjector) Eject(entities map[flow.Identifier]flow.Entity) (flow.Ident // SizeEjector is a wrapper around EjectTrueRandom that can be used to decrement // an external size variable everytime an item is removed from the mempool. +// WARNING: requires external means for concurrency safety +// As SizeEjector writes to the externally-provided size variable, SizeEjector +// itself cannot provide concurrency safety for this variable. Instead, the +// concurrency safety must be implemented through the means of the Mempool which +// is using the ejector. type SizeEjector struct { - sync.Mutex size *uint } // NewSizeEjector returns a SizeEjector linked to the provided size variable. +// WARNING: requires external means for concurrency safety +// As SizeEjector writes to the externally-provided size variable, SizeEjector +// itself cannot provide concurrency safety for this variable. Instead, the +// concurrency safety must be implemented through the means of the Mempool which +// is using the ejector. func NewSizeEjector(size *uint) *SizeEjector { return &SizeEjector{ size: size, @@ -141,9 +150,6 @@ func NewSizeEjector(size *uint) *SizeEjector { // Eject calls EjectTrueRandom and decrements the size variable if an item was // returned by EjectTrueRandom. func (q *SizeEjector) Eject(entities map[flow.Identifier]flow.Entity) (flow.Identifier, flow.Entity) { - q.Lock() - defer q.Unlock() - id, entity := EjectTrueRandom(entities) if _, ok := entities[id]; ok { diff --git a/module/mempool/stdmap/incorporated_results.go b/module/mempool/stdmap/incorporated_results.go index 743d607f5f9..d0576131f5c 100644 --- a/module/mempool/stdmap/incorporated_results.go +++ b/module/mempool/stdmap/incorporated_results.go @@ -187,9 +187,8 @@ func (ir *IncorporatedResults) Size() uint { // To guarantee concurrency safety, i.e. that the read retrieves the latest size value, // we need run the read through a locked operation in the backend. // To guarantee concurrency safety, i.e. that the read retrieves the latest size value, - // we need run utilize the backend lock. + // we need run utilize the backend's lock. ir.backend.RLock() defer ir.backend.RUnlock() return *ir.size - } From c0dc548418d36f74fe87956ee9edd000139b2214 Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Fri, 23 Oct 2020 11:13:14 -0700 Subject: [PATCH 033/105] Use proper context --- engine/execution/ingestion/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 9d03078a470..08a7d3df9ff 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -833,7 +833,7 @@ func (e *Engine) saveExecutionResults( return nil, fmt.Errorf("could not generate execution result: %w", err) } - err = e.execState.PersistExecutionResult(ctx, executionResult) + err = e.execState.PersistExecutionResult(childCtx, executionResult) if err != nil { return nil, fmt.Errorf("could not persist execution result: %w", err) } From 7fcba70d0524386996f9252c52055d291f6656fe Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Fri, 23 Oct 2020 19:36:33 -0700 Subject: [PATCH 034/105] wip --- engine/consensus/matching/engine.go | 125 ++++---- engine/consensus/matching/engine_test.go | 335 +++++++++++++------- module/mempool/mock/incorporated_results.go | 11 +- utils/unittest/fixtures.go | 33 +- 4 files changed, 331 insertions(+), 173 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 481d5bb6f0e..4d6628d1603 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -211,13 +211,12 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece resultFinalState, ok := receipt.ExecutionResult.FinalStateCommitment() if !ok { log.Error().Msg("execution receipt without FinalStateCommit received") - return fmt.Errorf("failed to get final state commitment from Execution Result") + return engine.NewInvalidInputErrorf("execution receipt without FinalStateCommit: %x", receipt.ID()) } log = log.With().Hex("final_state", resultFinalState).Logger() log.Info().Msg("execution receipt received") - // if the receipt is for an unknown block, skip it. It will be re-requested - // later. + // if the receipt is for an unknown block, skip it. It will be re-requested later. head, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Head() if err != nil { log.Debug().Msg("discarding receipt for unknown block") @@ -226,6 +225,15 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece // if Execution Receipt is for block whose height is lower or equal to already sealed height // => drop Receipt + forUnsealedBlock, err := e.isAboveSealedHeight(head) + if err != nil { + return fmt.Errorf("could not find sealed block: %w", err) + } + if !forUnsealedBlock { + log.Debug().Msg("discarding receipt for already sealed and finalized block height") + return nil + } + sealed, err := e.state.Sealed().Head() if err != nil { return fmt.Errorf("could not find sealed block: %w", err) @@ -235,6 +243,8 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return nil } + err := e.ensureStakedNodeWithRole(receipt.ExecutorID, receipt.ExecutionResult.BlockID, flow.RoleExecution) + // get the identity of the origin node, so we can check if it's a valid // source for a execution receipt (usually execution nodes) identity, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Identity(originID) @@ -312,7 +322,7 @@ func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultAppro // Check if the approver was a staked verifier at that block. Don't error // out if the block is not known yet, because this method will be called // again for some house cleaning when we try to match approvals to chunks. - err := e.checkApproverIsStakedVerifier(approval.Body.ApproverID, approval.Body.BlockID) + err := e.ensureStakedNodeWithRole(approval.Body.ApproverID, approval.Body.BlockID, flow.RoleVerification) if err != nil { // don't error out if the block was not found yet if !errors.Is(err, ErrBlockNotFound) { @@ -386,7 +396,6 @@ func (e *Engine) checkSealing() { // don't overflow the seal mempool space := e.seals.Limit() - e.seals.Size() if len(sealableResults) > int(space) { --- e.log.Warn(). Int("space", int(space)). Int("results", len(sealableResults)). @@ -525,17 +534,16 @@ RES_LOOP: // number of guarantees plus one; this will ensure the execution receipt // cannot lie about having less chunks and having the remaining ones // approved - requiredChunks := 0 + requiredChunks := 1 // system chunk must exist for each block's ExecutionResult, even if the block payload itself does not contain any chunks index, err := e.indexDB.ByBlockID(incorporatedResult.Result.BlockID) if err != nil { - // the block could have no payload if !errors.Is(err, storage.ErrNotFound) { return nil, err } ---> required chunks must be 1 + // reaching this line means the block is empty, i.e. it has no payload => we expect only the system chunk } else { - requiredChunks = len(index.CollectionIDs) + 1 + requiredChunks += len(index.CollectionIDs) } if incorporatedResult.Result.Chunks.Len() != requiredChunks { @@ -554,9 +562,9 @@ RES_LOOP: continue } if err != nil { ---> this is a fatal implementation bug: return nil, fmt.Errorf("could not assign verifiers: %w", err) - log.Warn().Msgf("could not compute chunk assignment: %v", err) - continue + // at this point, we know the block and a valid child block exists. Not being able to compute + // the assignment constitutes a fatal implementation bug: + return nil, fmt.Errorf("could not determine chunk assignment: %w", err) } // check that each chunk collects enough approvals @@ -579,18 +587,14 @@ RES_LOOP: continue RES_LOOP } - matched := e.matchChunk(incorporatedResult, chunk, assignment) - if !matched { - allChunksMatched = false - break + if !e.matchChunk(incorporatedResult, chunk, assignment) { + continue RES_LOOP } } // add the result to the results that should be sealed - if allChunksMatched { - e.log.Info().Msg("adding result with sufficient verification") - results = append(results, incorporatedResult) - } + e.log.Info().Msg("adding result with sufficient verification") + results = append(results, incorporatedResult) } return results, nil @@ -619,7 +623,7 @@ func (e *Engine) matchChunk(incorporatedResult *flow.IncorporatedResult, chunk * if !ok { // if the approval comes from a node that wasn't even a staked // verifier at that block, remove the approval from the mempool. - err := e.checkApproverIsStakedVerifier(approverID, incorporatedResult.Result.BlockID) + err := e.ensureStakedNodeWithRole(approverID, incorporatedResult.Result.BlockID, flow.RoleVerification) if err != nil { // don't remove the approval if the error indicates that the // block is not known yet. @@ -651,45 +655,53 @@ func (e *Engine) matchChunk(incorporatedResult *flow.IncorporatedResult, chunk * return validApprovals > 0 } -// checkApproverIsStakedVerifier checks if the approver was a valid staked -// verifier at a given block, and returns an error if it wasn't, or if the block -// is not known yet. If the block is not known yet, it returns a -// ErrBlockNotFound sentinel error. -func (e *Engine) checkApproverIsStakedVerifier(approverID flow.Identifier, blockID flow.Identifier) error { - - // if we dont know the block yet, return a ErrBlockNotFound error - _, err := e.state.AtBlockID(blockID).Head() - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return ErrBlockNotFound - } - return err - } - +// checkIsStakedNodeWithRole checks whether, at the given block, `nodeID` +// * is an authorized member of the network +// * has _positive_ weight +// * and has the expected role +// Returns the following errors: +// * sentinel engine.InvalidInputError if any of the above-listed conditions are violated. +// * generic error indicating a fatal internal bug +// Note: the method receives the block header as proof of its existence. +// Therefore, we consider the case where the respective block is unknown to the +// protocol state as a symptom of a fatal implementation bug. +func (e *Engine) ensureStakedNodeWithRole(nodeID flow.Identifier, block *flow.Header, expectedRole flow.Role) error { // get the identity of the origin node - identity, err := e.state.AtBlockID(blockID).Identity(approverID) + identity, err := e.state.AtBlockID(block.ID()).Identity(nodeID) if err != nil { if protocol.IsIdentityNotFound(err) { - return engine.NewInvalidInputErrorf("could not get approver identity: %w", err) + return engine.NewInvalidInputErrorf("unknown node identity: %w", err) } - - // unknown exception - return fmt.Errorf("could not get approver identity: %w", err) + // unexpected exception + return fmt.Errorf("failed to retrieve node identity: %w", err) } // check that the origin is a verification node - if identity.Role != flow.RoleVerification { - return engine.NewInvalidInputErrorf("invalid approver node role (%s)", identity.Role) + if identity.Role != expectedRole { + return engine.NewInvalidInputErrorf("expected node %x to have identity %s but got %s", nodeID, expectedRole, identity.Role) } // check if the identity has a stake if identity.Stake == 0 { - return engine.NewInvalidInputErrorf("verifier has zero stake (%x)", identity.NodeID) + return engine.NewInvalidInputErrorf("node has zero stake (%x)", identity.NodeID) } return nil } +// isAboveSealedHeight returns true if and only if block's Height is +// strictly larger than the highest _sealed and finalized_ block. +func (e *Engine) isAboveSealedHeight(block *flow.Header) (bool, error) { + sealed, err := e.state.Sealed().Head() + if err != nil { + return false, fmt.Errorf("could not retrieve sealed block: %w", err) + } + if sealed.Height >= block.Height { + return false, nil + } + return true, nil +} + // sealResult creates a seal for the incorporated result and adds it to the // seals mempool. func (e *Engine) sealResult(incorporatedResult *flow.IncorporatedResult) error { @@ -827,12 +839,12 @@ func (e *Engine) requestPending() error { missingBlocksOrderedByHeight := make([]flow.Identifier, 0, e.maxUnsealedResults) // turn mempool into Lookup table: BlockID -> Result - knownResultsMap := make(map[flow.Identifier]struct{}) - for _, r := range e.results.All() { - knownResultsMap[r.BlockID] = struct{}{} + knownResultForBlock := make(map[flow.Identifier]struct{}) + for _, r := range e.incorporatedResults.All() { + knownResultForBlock[r.Result.BlockID] = struct{}{} } - for _, sealContainer := range e.seals.All() { - knownResultsMap[sealContainer.Seal.BlockID] = struct{}{} + for _, s := range e.seals.All() { + knownResultForBlock[s.Seal.BlockID] = struct{}{} } // traverse each unsealed and finalized block with height from low to high, @@ -851,21 +863,10 @@ func (e *Engine) requestPending() error { return fmt.Errorf("could not get header (height=%d): %w", height, err) } + // check if we have an result for the block at this height blockID := header.ID() - - if _, ok := knownResultsMap[blockID]; ok { - continue - } - missingBlocksOrderedByHeight = append(missingBlocksOrderedByHeight, blockID) - - // check if we have an execution result for the block at this height - _, err = e.resultsDB.ByBlockID(blockID) !! - if errors.Is(err, storage.ErrNotFound) { + if _, ok := knownResultForBlock[blockID]; !ok { missingBlocksOrderedByHeight = append(missingBlocksOrderedByHeight, blockID) - continue - } - if err != nil { - return fmt.Errorf("could not get execution result (block_id=%x): %w", blockID, err) } } diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 5fecb006977..bb267132f35 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -3,7 +3,6 @@ package matching import ( - "fmt" "math/rand" "os" "testing" @@ -31,7 +30,7 @@ import ( // 1. Matching engine should validate the incoming receipt (aka ExecutionReceipt): // 1. it should stores it to the mempool if valid // 2. it should ignore it when: -// 1. the origin is invalid +// 1. the origin is invalid [Condition removed for now -> will be replaced by valid EN signature in future] // 2. the role is invalid // 3. the result (a receipt has one result, multiple receipts might have the same result) has been sealed already // 4. the receipt has been received before @@ -63,42 +62,57 @@ func TestMatchingEngine(t *testing.T) { type MatchingSuite struct { suite.Suite + // IDENTITIES conID flow.Identifier exeID flow.Identifier verID flow.Identifier identities map[flow.Identifier]*flow.Identity + approvers flow.IdentityList - approvers flow.IdentityList - - state *protocol.State + // BLOCKS + rootBlock flow.Block + latestSealedBlock flow.Block + latestFinalizedBlock flow.Block + unfinalizedBlock flow.Block + blocks map[flow.Identifier]*flow.Block + // PROTOCOL STATE + state *protocol.State sealedSnapshot *protocol.Snapshot finalSnapshot *protocol.Snapshot - sealedResults map[flow.Identifier]*flow.ExecutionResult - blocks map[flow.Identifier]*flow.Block - + // MEMPOOLS and STORAGE which are injected into Matching Engine + // mock storage.ExecutionResults: backed by in-memory map sealedResults sealedResultsDB *storage.ExecutionResults - headersDB *storage.Headers - indexDB *storage.Index + sealedResults map[flow.Identifier]*flow.ExecutionResult + // mock mempool.IncorporatedResults: backed by in-memory map pendingResults + resultsPL *mempool.IncorporatedResults pendingResults map[flow.Identifier]*flow.IncorporatedResult - pendingSeals map[flow.Identifier]*flow.IncorporatedResultSeal - resultsPL *mempool.IncorporatedResults + // mock mempool.IncorporatedResultSeals: backed by in-memory map pendingSeals + sealsPL *mempool.IncorporatedResultSeals + pendingSeals map[flow.Identifier]*flow.IncorporatedResultSeal + + // mock BLOCK STORAGE: backed by in-memory map blocks + headersDB *storage.Headers // backed by map blocks + indexDB *storage.Index // backed by map blocks + + // mock mempool.Approvals: used to test whether or not Matching Engine stores approvals approvalsPL *mempool.Approvals - sealsPL *mempool.IncorporatedResultSeals + // misc SERVICE COMPONENTS which are injected into Matching Engine requester *module.Requester + assigner *module.ChunkAssigner - assigner *module.ChunkAssigner - + // MATCHING ENGINE matching *Engine } func (ms *MatchingSuite) SetupTest() { + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ SETUP IDENTITIES ~~~~~~~~~~~~~~~~~~~~~~~~~~ // unit := engine.NewUnit() log := zerolog.New(os.Stderr) metrics := metrics.NewNoopCollector() @@ -118,59 +132,122 @@ func (ms *MatchingSuite) SetupTest() { ms.approvers = unittest.IdentityListFixture(4, unittest.WithRole(flow.RoleVerification)) + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SETUP BLOCKS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // rootBlock <- latestSealedBlock <- latestFinalizedBlock <- unfinalizedBlock + ms.rootBlock = unittest.BlockFixture() + ms.latestSealedBlock = unittest.BlockWithParentFixture(ms.rootBlock.Header) + ms.latestFinalizedBlock = unittest.BlockWithParentFixture(ms.latestSealedBlock.Header) + ms.unfinalizedBlock = unittest.BlockWithParentFixture(ms.latestFinalizedBlock.Header) + + ms.blocks = make(map[flow.Identifier]*flow.Block) + ms.blocks[ms.rootBlock.ID()] = &ms.rootBlock + ms.blocks[ms.latestSealedBlock.ID()] = &ms.latestSealedBlock + ms.blocks[ms.latestFinalizedBlock.ID()] = &ms.latestFinalizedBlock + ms.blocks[ms.unfinalizedBlock.ID()] = &ms.unfinalizedBlock + + // ~~~~~~~~~~~~~~~~~~~~~~~~ SETUP PROTOCOL STATE ~~~~~~~~~~~~~~~~~~~~~~~~ // ms.state = &protocol.State{} - ms.state.On("Sealed").Return( - func() realproto.Snapshot { - return ms.sealedSnapshot - }, - nil, - ) + + // define the protocol state snapshot of the latest finalized block ms.state.On("Final").Return( func() realproto.Snapshot { return ms.finalSnapshot }, nil, ) - ms.state.On("AtBlockID", mock.Anything).Return( - func(blockID flow.Identifier) realproto.Snapshot { - return ms.finalSnapshot + ms.finalSnapshot = &protocol.Snapshot{} + ms.finalSnapshot.On("Head").Return( + func() *flow.Header { + return ms.latestFinalizedBlock.Header }, nil, ) - ms.finalSnapshot = &protocol.Snapshot{} - ms.finalSnapshot.On("Identity", mock.Anything).Return( - func(nodeID flow.Identifier) *flow.Identity { - identity := ms.identities[nodeID] - return identity - }, - func(nodeID flow.Identifier) error { - _, found := ms.identities[nodeID] - if !found { - return fmt.Errorf("could not get identity (%x)", nodeID) - } - return nil - }, - ) - ms.finalSnapshot.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { - return ms.approvers - }, - func(selector flow.IdentityFilter) error { - return nil + // define the protocol state snapshot of the latest finalized and sealed block + ms.state.On("Sealed").Return( + func() realproto.Snapshot { + return ms.sealedSnapshot }, + nil, ) - ms.finalSnapshot.On("Head").Return( + ms.sealedSnapshot = &protocol.Snapshot{} + ms.sealedSnapshot.On("Head").Return( func() *flow.Header { - return &flow.Header{} // we don't care + return ms.latestSealedBlock.Header }, nil, ) - ms.sealedSnapshot = &protocol.Snapshot{} + // define the protocol state snapshot for any block in `ms.blocks` + ms.state.On("AtBlockID", mock.Anything).Return( + func(blockID flow.Identifier) realproto.Snapshot { + block, found := ms.blocks[blockID] + if !found { + return stateSnapshotForUnknownBlock() + } + return stateSnapshotForKnownBlock(block.Header, ms.identities) + }, + ) + //ms.finalSnapshot.On("Identity", mock.Anything).Return( + // func(nodeID flow.Identifier) *flow.Identity { + // identity := ms.identities[nodeID] + // return identity + // }, + // func(nodeID flow.Identifier) error { + // _, found := ms.identities[nodeID] + // if !found { + // return fmt.Errorf("could not get identity (%x)", nodeID) + // } + // return nil + // }, + //) + //ms.finalSnapshot.On("Identities", mock.Anything).Return( + // func(selector flow.IdentityFilter) flow.IdentityList { + // return ms.approvers + // }, + // func(selector flow.IdentityFilter) error { + // return nil + // }, + //) + // + //ms.state.On("AtBlockID", mock.Anything).Return( + // func(blockID flow.Identifier) realproto.Snapshot { + // return ms.refBlockSnapshot + // }, + // nil, + //) + + //ms.refBlockHeader = &flow.Header{Height: 20} // only need height + //ms.refBlockSnapshot = &protocol.Snapshot{} + //ms.refBlockSnapshot.On("Identity", mock.Anything).Return( + // func(nodeID flow.Identifier) *flow.Identity { + // identity := ms.identities[nodeID] + // return identity + // }, + // func(nodeID flow.Identifier) error { + // _, found := ms.identities[nodeID] + // if !found { + // return fmt.Errorf("could not get identity (%x)", nodeID) + // } + // return nil + // }, + //) + //ms.refBlockSnapshot.On("Identities", mock.Anything).Return( + // func(selector flow.IdentityFilter) flow.IdentityList { + // return ms.approvers + // }, + // func(selector flow.IdentityFilter) error { + // return nil + // }, + //) + //ms.refBlockSnapshot.On("Head").Return( + // func() *flow.Header { + // return ms.refBlockHeader + // }, + // nil, + //) ms.sealedResults = make(map[flow.Identifier]*flow.ExecutionResult) - ms.blocks = make(map[flow.Identifier]*flow.Block) ms.sealedResultsDB = &storage.ExecutionResults{} ms.sealedResultsDB.On("ByID", mock.Anything).Return( @@ -185,8 +262,12 @@ func (ms *MatchingSuite) SetupTest() { return nil }, ) - ms.sealedResultsDB.On("Index", mock.Anything, mock.Anything).Return( - func(blockID, resultID flow.Identifier) error { + ms.sealedResultsDB.On("Store", mock.Anything).Return( + func(result *flow.ExecutionResult) error { + _, found := ms.sealedResults[result.BlockID] + if found { + return storerr.ErrAlreadyExists + } return nil }, ) @@ -306,6 +387,9 @@ func (ms *MatchingSuite) SetupTest() { } func (ms *MatchingSuite) TestOnReceiptInvalidOrigin() { + // we don't validate the origin of an execution receipt anymore, as Execution Nodes + // might forward us Execution Receipts from others for blocks they haven't computed themselves + ms.T().Skip() // try to submit a receipt with a random origin ID originID := ms.exeID @@ -314,85 +398,93 @@ func (ms *MatchingSuite) TestOnReceiptInvalidOrigin() { err := ms.matching.onReceipt(originID, receipt) ms.Require().Error(err, "should reject receipt with mismatching origin and executor") + ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit a receipt from a NON-ExecutionNode (here: consensus node) func (ms *MatchingSuite) TestOnReceiptUnknownBlock() { - // try ot submit a receipt from a consensus node originID := ms.conID - receipt := unittest.ExecutionReceiptFixture() - receipt.ExecutorID = originID - - // force state to not find the receipt's corresponding block - ms.state = &protocol.State{} - ms.state.On("AtBlockID", mock.Anything).Return( - func(blockID flow.Identifier) realproto.Snapshot { - snapshot := &protocol.Snapshot{} - snapshot.On("Head").Return(nil, fmt.Errorf("forced error")) - return snapshot - }, - nil, - ) - ms.matching.state = ms.state + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(originID)) + + //// force state to not find the receipt's corresponding block + //ms.state = &protocol.State{} + //ms.state.On("AtBlockID", mock.Anything).Return( + // func(blockID flow.Identifier) realproto.Snapshot { + // snapshot := &protocol.Snapshot{} + // snapshot.On("Head").Return(nil, fmt.Errorf("forced error")) + // return snapshot + // }, + // nil, + //) + //ms.matching.state = ms.state // onReceipt should not throw an error err := ms.matching.onReceipt(originID, receipt) ms.Require().NoError(err, "should ignore receipt for unknown block") + ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit a receipt for a known block from a consensus node func (ms *MatchingSuite) TestOnReceiptInvalidRole() { - - // try ot submit a receipt from a consensus node originID := ms.conID - receipt := unittest.ExecutionReceiptFixture() - receipt.ExecutorID = originID + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(originID), + unittest.WithBlock(&ms.unfinalizedBlock), + ) err := ms.matching.onReceipt(originID, receipt) ms.Require().Error(err, "should reject receipt from wrong node role") + ms.Require().True(engine.IsInvalidInputError(err)) + ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try ot submit a receipt from an Execution node, with zero unstaked node func (ms *MatchingSuite) TestOnReceiptUnstakedExecutor() { - - // try ot submit a receipt from an unstaked node originID := ms.exeID - receipt := unittest.ExecutionReceiptFixture() - receipt.ExecutorID = originID + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(originID), + unittest.WithBlock(&ms.unfinalizedBlock), + ) ms.identities[originID].Stake = 0 err := ms.matching.onReceipt(originID, receipt) ms.Require().Error(err, "should reject receipt from unstaked node") + ms.Require().True(engine.IsInvalidInputError(err)) + ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// matching engine should drop Result for known block that is already sealed +// without trying to store anything func (ms *MatchingSuite) TestOnReceiptSealedResult() { - - // try to submit a receipt for a sealed result originID := ms.exeID - receipt := unittest.ExecutionReceiptFixture() - receipt.ExecutorID = originID - ms.sealedResults[receipt.ExecutionResult.ID()] = &receipt.ExecutionResult + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(originID), + unittest.WithBlock(&ms.latestSealedBlock), + ) err := ms.matching.onReceipt(originID, receipt) ms.Require().NoError(err, "should ignore receipt for sealed result") + ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit a receipt for an already received result func (ms *MatchingSuite) TestOnReceiptPendingResult() { - - // try to submit a receipt for a sealed result originID := ms.exeID - receipt := unittest.ExecutionReceiptFixture() + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(originID)) receipt.ExecutorID = originID ms.resultsPL.On("Add", mock.Anything).Run( @@ -407,14 +499,14 @@ func (ms *MatchingSuite) TestOnReceiptPendingResult() { ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 1) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) + ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 1) } func (ms *MatchingSuite) TestOnReceiptValid() { // try to submit a receipt that should be valid originID := ms.exeID - receipt := unittest.ExecutionReceiptFixture() - receipt.ExecutorID = originID + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(originID)) ms.resultsPL.On("Add", mock.Anything).Run( func(args mock.Arguments) { @@ -431,14 +523,23 @@ func (ms *MatchingSuite) TestOnReceiptValid() { } func (ms *MatchingSuite) TestApprovalInvalidOrigin() { - - // try to submit an approval with a random origin ID + // approval from valid origin (i.e. a verification node) but with random ApproverID originID := ms.verID - approval := unittest.ResultApprovalFixture() + approval := unittest.ResultApprovalFixture() // with random ApproverID err := ms.matching.onApproval(originID, approval) ms.Require().Error(err, "should reject approval with mismatching origin and executor") + ms.Require().True(engine.IsInvalidInputError(err)) + + // approval from random origin but with valid ApproverID (i.e. a verification node) + originID = unittest.IdentifierFixture() // random origin + approval = unittest.ResultApprovalFixture(unittest.WithApproverID(ms.verID)) + + err = ms.matching.onApproval(originID, approval) + ms.Require().Error(err, "should reject approval with mismatching origin and executor") + ms.Require().True(engine.IsInvalidInputError(err)) + // In both cases, we expect the approval to be rejected without hitting the mempools ms.approvalsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } @@ -446,20 +547,7 @@ func (ms *MatchingSuite) TestApprovalInvalidOrigin() { func (ms *MatchingSuite) TestApprovalUnknownBlock() { // try to submit an approval for an unknown block originID := ms.verID - approval := unittest.ResultApprovalFixture() - approval.Body.ApproverID = originID - - // force state to not find the receipt's corresponding block - ms.state = &protocol.State{} - ms.state.On("AtBlockID", mock.Anything).Return( - func(blockID flow.Identifier) realproto.Snapshot { - snapshot := &protocol.Snapshot{} - snapshot.On("Head").Return(nil, storerr.ErrNotFound) - return snapshot - }, - nil, - ) - ms.matching.state = ms.state + approval := unittest.ResultApprovalFixture(unittest.WithApproverID(originID)) // generates approval for random block // make sure the approval is added to the cache for future processing // check calls have the correct parameters @@ -468,7 +556,7 @@ func (ms *MatchingSuite) TestApprovalUnknownBlock() { added := args.Get(0).(*flow.ResultApproval) ms.Assert().Equal(approval, added) }, - ).Return(false, nil) + ).Return(true, nil) // onApproval should not throw an error err := ms.matching.onApproval(originID, approval) @@ -480,29 +568,33 @@ func (ms *MatchingSuite) TestApprovalUnknownBlock() { } func (ms *MatchingSuite) TestOnApprovalInvalidRole() { - // try to submit an approval from a consensus node originID := ms.conID - approval := unittest.ResultApprovalFixture() - approval.Body.ApproverID = originID + approval := unittest.ResultApprovalFixture( + unittest.WithBlockID(ms.unfinalizedBlock.ID()), + unittest.WithApproverID(originID), + ) err := ms.matching.onApproval(originID, approval) ms.Require().Error(err, "should reject approval from wrong approver role") + ms.Require().True(engine.IsInvalidInputError(err)) ms.approvalsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } func (ms *MatchingSuite) TestOnApprovalInvalidStake() { - // try to submit an approval from an unstaked approver originID := ms.verID - approval := unittest.ResultApprovalFixture() - approval.Body.ApproverID = originID + approval := unittest.ResultApprovalFixture( + unittest.WithBlockID(ms.unfinalizedBlock.ID()), + unittest.WithApproverID(originID), + ) ms.identities[originID].Stake = 0 err := ms.matching.onApproval(originID, approval) ms.Require().Error(err, "should reject approval from unstaked approver") + ms.Require().True(engine.IsInvalidInputError(err)) ms.approvalsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) @@ -1134,3 +1226,32 @@ func (ms *MatchingSuite) TestRequestReceiptsPendingBlocks() { // should request n-1 blocks if n > requestReceiptThreshold ms.Assert().Equal(len(requestedBlocks), n-1) } + +func stateSnapshotForUnknownBlock() *protocol.Snapshot { + snapshot := &protocol.Snapshot{} + snapshot.On("Identity", mock.Anything).Return( + nil, storerr.ErrNotFound, + ) + snapshot.On("Head", mock.Anything).Return( + nil, storerr.ErrNotFound, + ) + return snapshot +} + +func stateSnapshotForKnownBlock(block *flow.Header, identities map[flow.Identifier]*flow.Identity) *protocol.Snapshot { + snapshot := &protocol.Snapshot{} + snapshot.On("Identity", mock.Anything).Return( + func(nodeID flow.Identifier) *flow.Identity { + return identities[nodeID] + }, + func(nodeID flow.Identifier) error { + _, found := identities[nodeID] + if !found { + return realproto.IdentityNotFoundErr{NodeID: nodeID} + } + return nil + }, + ) + snapshot.On("Head").Return(block, nil) + return snapshot +} diff --git a/module/mempool/mock/incorporated_results.go b/module/mempool/mock/incorporated_results.go index be4796dcb8a..4276d71c884 100644 --- a/module/mempool/mock/incorporated_results.go +++ b/module/mempool/mock/incorporated_results.go @@ -51,7 +51,7 @@ func (_m *IncorporatedResults) All() []*flow.IncorporatedResult { } // ByResultID provides a mock function with given fields: resultID -func (_m *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.ExecutionResult, map[flow.Identifier]*flow.IncorporatedResult) { +func (_m *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.ExecutionResult, map[flow.Identifier]*flow.IncorporatedResult, bool) { ret := _m.Called(resultID) var r0 *flow.ExecutionResult @@ -72,7 +72,14 @@ func (_m *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.Execu } } - return r0, r1 + var r2 bool + if rf, ok := ret.Get(2).(func(flow.Identifier) bool); ok { + r2 = rf(resultID) + } else { + r2 = ret.Get(2).(bool) + } + + return r0, r1, r2 } // Rem provides a mock function with given fields: incorporatedResult diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index ea86dfb6366..8ef034915c5 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -415,13 +415,30 @@ func ResultForBlockFixture(block *flow.Block) *flow.ExecutionResult { } } -func ExecutionReceiptFixture() *flow.ExecutionReceipt { - return &flow.ExecutionReceipt{ +func WithExecutorID(id flow.Identifier) func(*flow.ExecutionReceipt) { + return func(receipt *flow.ExecutionReceipt) { + receipt.ExecutorID = id + } +} + +func WithBlock(block *flow.Block) func(*flow.ExecutionReceipt) { + return func(receipt *flow.ExecutionReceipt) { + receipt.ExecutionResult = *ResultForBlockFixture(block) + } +} + +func ExecutionReceiptFixture(opts ...func(*flow.ExecutionReceipt)) *flow.ExecutionReceipt { + receipt := &flow.ExecutionReceipt{ ExecutorID: IdentifierFixture(), ExecutionResult: *ExecutionResultFixture(), Spocks: nil, ExecutorSignature: SignatureFixture(), } + + for _, apply := range opts { + apply(receipt) + } + return receipt } func ExecutionResultFixture() *flow.ExecutionResult { @@ -457,6 +474,18 @@ func WithExecutionResultID(id flow.Identifier) func(*flow.ResultApproval) { } } +func WithApproverID(id flow.Identifier) func(*flow.ResultApproval) { + return func(ra *flow.ResultApproval) { + ra.Body.ApproverID = id + } +} + +func WithBlockID(id flow.Identifier) func(*flow.ResultApproval) { + return func(ra *flow.ResultApproval) { + ra.Body.Attestation.BlockID = id + } +} + func ResultApprovalFixture(opts ...func(*flow.ResultApproval)) *flow.ResultApproval { attestation := flow.Attestation{ BlockID: IdentifierFixture(), From 9828276171469075c6327d5159511d988772f049 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Fri, 23 Oct 2020 20:58:56 -0700 Subject: [PATCH 035/105] some more refactoring of matching engine --- engine/consensus/matching/engine.go | 138 +++++++++++----------------- 1 file changed, 52 insertions(+), 86 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 4d6628d1603..628c85a0c90 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -225,15 +225,6 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece // if Execution Receipt is for block whose height is lower or equal to already sealed height // => drop Receipt - forUnsealedBlock, err := e.isAboveSealedHeight(head) - if err != nil { - return fmt.Errorf("could not find sealed block: %w", err) - } - if !forUnsealedBlock { - log.Debug().Msg("discarding receipt for already sealed and finalized block height") - return nil - } - sealed, err := e.state.Sealed().Head() if err != nil { return fmt.Errorf("could not find sealed block: %w", err) @@ -243,33 +234,16 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return nil } - err := e.ensureStakedNodeWithRole(receipt.ExecutorID, receipt.ExecutionResult.BlockID, flow.RoleExecution) - - // get the identity of the origin node, so we can check if it's a valid - // source for a execution receipt (usually execution nodes) - identity, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Identity(originID) + err = e.ensureStakedNodeWithRole(receipt.ExecutorID, head, flow.RoleExecution) if err != nil { - if protocol.IsIdentityNotFound(err) { - return engine.NewInvalidInputErrorf("could not get executor identity: %w", err) - } - - // unknown exception - return fmt.Errorf("could not get executor identity: %w", err) + return fmt.Errorf("failed to process execution receipt: %w", err) } - // check that the origin is an execution node - if identity.Role != flow.RoleExecution { - return engine.NewInvalidInputErrorf("invalid executor node role (%s)", identity.Role) - } - - // check if the identity has a stake - if identity.Stake == 0 { - return engine.NewInvalidInputErrorf("executor has zero stake (%x)", identity.NodeID) - } - - result := &receipt.ExecutionResult + // TODO: check the approval's cryptographic integrityt. + // if !errors.Is(err // store the result to make it persistent for later + result := &receipt.ExecutionResult err = e.resultsDB.Store(result) // internally de-duplicates if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { return fmt.Errorf("could not store sealing result: %w", err) @@ -293,9 +267,7 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece e.log.Debug().Msg("skipping result already in mempool") return nil } - e.mempool.MempoolEntries(metrics.ResourceResult, e.incorporatedResults.Size()) - e.log.Info().Msg("execution result added to mempool") // kick off a check for potential seal formation @@ -306,12 +278,10 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece // onApproval processes a new result approval. func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultApproval) error { - log := e.log.With(). Hex("approval_id", logging.Entity(approval)). Hex("result_id", approval.Body.ExecutionResultID[:]). Logger() - log.Info().Msg("result approval received") // check approver matches the origin ID @@ -319,27 +289,32 @@ func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultAppro return engine.NewInvalidInputErrorf("invalid origin for approval: %x", originID) } - // Check if the approver was a staked verifier at that block. Don't error - // out if the block is not known yet, because this method will be called - // again for some house cleaning when we try to match approvals to chunks. - err := e.ensureStakedNodeWithRole(approval.Body.ApproverID, approval.Body.BlockID, flow.RoleVerification) + // check if we already have the block the approval pertains to + head, err := e.state.AtBlockID(approval.Body.BlockID).Head() if err != nil { - // don't error out if the block was not found yet if !errors.Is(err, ErrBlockNotFound) { - return err + return fmt.Errorf("failed to retrieve header for block %x: %w", approval.Body.BlockID, err) + } + // Don't error if the block is not known yet, because the checks in the + // else-branch below are called again when we try to match approvals to chunks. + } else { + // drop approval, if it is for block whose height is lower or equal to already sealed height + sealed, err := e.state.Sealed().Head() + if err != nil { + return fmt.Errorf("could not find sealed block: %w", err) + } + if sealed.Height >= head.Height { + log.Debug().Msg("discarding approval for already sealed and finalized block height") + return nil } - } - // TODO: check the approval's cryptographic integrity + // Check if the approver was a staked verifier at that block. + err = e.ensureStakedNodeWithRole(approval.Body.ApproverID, head, flow.RoleVerification) + if err != nil { + return fmt.Errorf("failed to process approval: %w", err) + } - // check if the result of this approval is already sealed - _, err = e.resultsDB.ByID(approval.Body.ExecutionResultID) - if err == nil { - log.Debug().Msg("discarding approval for sealed result") - return nil - } - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("could not check result: %w", err) + // TODO: check the approval's cryptographic integrity } // store in the memory pool (it won't be added if it is already in there). @@ -347,12 +322,10 @@ func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultAppro if err != nil { return err } - if !added { e.log.Debug().Msg("skipping approval already in mempool") return nil } - e.mempool.MempoolEntries(metrics.ResourceApproval, e.approvals.Size()) // kick off a check for potential seal formation @@ -494,13 +467,11 @@ func (e *Engine) sealableResults() ([]*flow.IncorporatedResult, error) { RES_LOOP: for _, incorporatedResult := range e.incorporatedResults.All() { - // if we have not received the block yet, we will just keep rechecking - // until the block has been received or the result has been purged + // not finding the block header for an incorporated result is a fatal + // implementation bug, as we only add results to the IncorporatedResults + // mempool, where _both_ the block that incorporates the result as well + // as the block the result pertains to are known block, err := e.headersDB.ByBlockID(incorporatedResult.Result.BlockID) - if errors.Is(err, storage.ErrNotFound) { - log.Debug().Msg("skipping result with unknown block") - continue - } if err != nil { return nil, fmt.Errorf("could not retrieve block: %w", err) } @@ -529,12 +500,12 @@ RES_LOOP: continue } - // we create one chunk per collection (at least for now), plus the + // we create one chunk per collection, plus the // system chunk. so we can check if the chunk number matches with the // number of guarantees plus one; this will ensure the execution receipt // cannot lie about having less chunks and having the remaining ones // approved - requiredChunks := 1 // system chunk must exist for each block's ExecutionResult, even if the block payload itself does not contain any chunks + requiredChunks := 1 // system chunk: must exist for block's ExecutionResult, even if block payload itself is empty index, err := e.indexDB.ByBlockID(incorporatedResult.Result.BlockID) if err != nil { @@ -568,7 +539,7 @@ RES_LOOP: } // check that each chunk collects enough approvals - for i := 0; i < assignment.Len(); i++ { + for i := 0; i < requiredChunks; i++ { // arriving at a failure condition here means that the execution // result is invalid; we should skip it and move on to the next // execution result. @@ -577,6 +548,7 @@ RES_LOOP: chunk, ok := incorporatedResult.Result.Chunks.ByIndex(uint64(i)) if !ok { log.Warn().Msgf("chunk out of range requested: %d", i) + _ = e.incorporatedResults.Rem(incorporatedResult) continue RES_LOOP } @@ -584,10 +556,15 @@ RES_LOOP: // result contains all chunks and no duplicates. if chunk.Index != uint64(i) { log.Warn().Msgf("chunk out of place: pos = %d, index = %d", i, chunk.Index) + _ = e.incorporatedResults.Rem(incorporatedResult) continue RES_LOOP } - if !e.matchChunk(incorporatedResult, chunk, assignment) { + matched, err := e.matchChunk(incorporatedResult, block, chunk, assignment) + if err != nil { + return nil, fmt.Errorf("") + } + if !matched { continue RES_LOOP } } @@ -603,7 +580,7 @@ RES_LOOP: // matchChunk checks that the number of ResultApprovals collected by a chunk // exceeds the required threshold. It also populates the IncorporatedResult's // collection of approval signatures to avoid repeated work. -func (e *Engine) matchChunk(incorporatedResult *flow.IncorporatedResult, chunk *flow.Chunk, assignment *chunks.Assignment) bool { +func (e *Engine) matchChunk(incorporatedResult *flow.IncorporatedResult, block *flow.Header, chunk *flow.Chunk, assignment *chunks.Assignment) (bool, error) { // get all the chunk approvals from mempool approvals := e.approvals.ByChunk(incorporatedResult.Result.ID(), chunk.Index) @@ -623,15 +600,17 @@ func (e *Engine) matchChunk(incorporatedResult *flow.IncorporatedResult, chunk * if !ok { // if the approval comes from a node that wasn't even a staked // verifier at that block, remove the approval from the mempool. - err := e.ensureStakedNodeWithRole(approverID, incorporatedResult.Result.BlockID, flow.RoleVerification) + err := e.ensureStakedNodeWithRole(approverID, block, flow.RoleVerification) if err != nil { - // don't remove the approval if the error indicates that the - // block is not known yet. - if !errors.Is(err, ErrBlockNotFound) { - _, _ = e.approvals.RemApproval(approval) + if engine.IsInvalidInputError(err) { + _, err = e.approvals.RemApproval(approval) + if err != nil { + return false, fmt.Errorf("failed to remove approval from mempool: %w", err) + } + continue } + return false, fmt.Errorf("failed to match chunks: %w", err) } - continue } // Add signature to incorporated result so that we don't have to check @@ -646,13 +625,13 @@ func (e *Engine) matchChunk(incorporatedResult *flow.IncorporatedResult, chunk * // TODO: this is only here temporarily to ease the migration to new chunk // based sealing. if !e.requireApprovals { - return true + return true, nil } // TODO: // * This is the happy path (requires just one approval per chunk). // * Full protocol should be +2/3 of all currently staked verifiers. - return validApprovals > 0 + return validApprovals > 0, nil } // checkIsStakedNodeWithRole checks whether, at the given block, `nodeID` @@ -689,19 +668,6 @@ func (e *Engine) ensureStakedNodeWithRole(nodeID flow.Identifier, block *flow.He return nil } -// isAboveSealedHeight returns true if and only if block's Height is -// strictly larger than the highest _sealed and finalized_ block. -func (e *Engine) isAboveSealedHeight(block *flow.Header) (bool, error) { - sealed, err := e.state.Sealed().Head() - if err != nil { - return false, fmt.Errorf("could not retrieve sealed block: %w", err) - } - if sealed.Height >= block.Height { - return false, nil - } - return true, nil -} - // sealResult creates a seal for the incorporated result and adds it to the // seals mempool. func (e *Engine) sealResult(incorporatedResult *flow.IncorporatedResult) error { @@ -852,7 +818,7 @@ func (e *Engine) requestPending() error { // order to request them. for height := sealed.Height; height < final.Height; height++ { // add at most number of results - if len(missingBlocksOrderedByHeight) >= int(e.maxUnsealedResults) { + if len(missingBlocksOrderedByHeight) >= e.maxUnsealedResults { break } From 2b29afcc6db43895ecea200ebe6ee42d8f85932f Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Fri, 23 Oct 2020 22:01:07 -0700 Subject: [PATCH 036/105] fixing tests (in progress) --- engine/consensus/matching/engine.go | 4 +- engine/consensus/matching/engine_test.go | 156 ++++++++++++----------- 2 files changed, 81 insertions(+), 79 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 628c85a0c90..f6f34c3b2c3 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -27,8 +27,6 @@ import ( "github.com/onflow/flow-go/utils/logging" ) -var ErrBlockNotFound = errors.New("block not found") - // Engine is the Matching engine, which builds seals by matching receipts (aka // ExecutionReceipt, from execution nodes) and approvals (aka ResultApproval, // from verification nodes), and saves the seals into seals mempool for adding @@ -292,7 +290,7 @@ func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultAppro // check if we already have the block the approval pertains to head, err := e.state.AtBlockID(approval.Body.BlockID).Head() if err != nil { - if !errors.Is(err, ErrBlockNotFound) { + if !errors.Is(err, storage.ErrNotFound) { return fmt.Errorf("failed to retrieve header for block %x: %w", approval.Body.BlockID, err) } // Don't error if the block is not known yet, because the checks in the diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index bb267132f35..2740d1256a4 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -83,9 +83,9 @@ type MatchingSuite struct { finalSnapshot *protocol.Snapshot // MEMPOOLS and STORAGE which are injected into Matching Engine - // mock storage.ExecutionResults: backed by in-memory map sealedResults - sealedResultsDB *storage.ExecutionResults - sealedResults map[flow.Identifier]*flow.ExecutionResult + // mock storage.ExecutionResults: backed by in-memory map persistedResults + resultsDB *storage.ExecutionResults + persistedResults map[flow.Identifier]*flow.ExecutionResult // mock mempool.IncorporatedResults: backed by in-memory map pendingResults resultsPL *mempool.IncorporatedResults @@ -247,24 +247,24 @@ func (ms *MatchingSuite) SetupTest() { // nil, //) - ms.sealedResults = make(map[flow.Identifier]*flow.ExecutionResult) - - ms.sealedResultsDB = &storage.ExecutionResults{} - ms.sealedResultsDB.On("ByID", mock.Anything).Return( + // ~~~~~~~~~~~~~~~~~~~~~~~ SETUP RESULTS STORAGE ~~~~~~~~~~~~~~~~~~~~~~~~ // + ms.persistedResults = make(map[flow.Identifier]*flow.ExecutionResult) + ms.resultsDB = &storage.ExecutionResults{} + ms.resultsDB.On("ByID", mock.Anything).Return( func(resultID flow.Identifier) *flow.ExecutionResult { - return ms.sealedResults[resultID] + return ms.persistedResults[resultID] }, func(resultID flow.Identifier) error { - _, found := ms.sealedResults[resultID] + _, found := ms.persistedResults[resultID] if !found { return storerr.ErrNotFound } return nil }, ) - ms.sealedResultsDB.On("Store", mock.Anything).Return( + ms.resultsDB.On("Store", mock.Anything).Return( func(result *flow.ExecutionResult) error { - _, found := ms.sealedResults[result.BlockID] + _, found := ms.persistedResults[result.BlockID] if found { return storerr.ErrAlreadyExists } @@ -272,6 +272,7 @@ func (ms *MatchingSuite) SetupTest() { }, ) + // ~~~~~~~~~~~~~~~~~~~~ SETUP BLOCK HEADER STORAGE ~~~~~~~~~~~~~~~~~~~~~ // ms.headersDB = &storage.Headers{} ms.headersDB.On("ByBlockID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Header { @@ -308,6 +309,7 @@ func (ms *MatchingSuite) SetupTest() { }, ) + // ~~~~~~~~~~~~~~~~~~~~ SETUP BLOCK PAYLOAD STORAGE ~~~~~~~~~~~~~~~~~~~~~ // ms.indexDB = &storage.Index{} ms.indexDB.On("ByBlockID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Index { @@ -332,9 +334,8 @@ func (ms *MatchingSuite) SetupTest() { }, ) + // ~~~~~~~~~~~~~~~~ SETUP INCORPORATED RESULTS MEMPOOL ~~~~~~~~~~~~~~~~~ // ms.pendingResults = make(map[flow.Identifier]*flow.IncorporatedResult) - ms.pendingSeals = make(map[flow.Identifier]*flow.IncorporatedResultSeal) - ms.resultsPL = &mempool.IncorporatedResults{} ms.resultsPL.On("Size").Return(uint(0)) // only for metrics ms.resultsPL.On("All").Return( @@ -347,9 +348,12 @@ func (ms *MatchingSuite) SetupTest() { }, ) + // ~~~~~~~~~~~~~~~~~~~~~~ SETUP APPROVALS MEMPOOL ~~~~~~~~~~~~~~~~~~~~~~ // ms.approvalsPL = &mempool.Approvals{} ms.approvalsPL.On("Size").Return(uint(0)) // only for metrics + // ~~~~~~~~~~~~~~~~~~~~~~~~ SETUP SEALS MEMPOOL ~~~~~~~~~~~~~~~~~~~~~~~~ // + ms.pendingSeals = make(map[flow.Identifier]*flow.IncorporatedResultSeal) ms.sealsPL = &mempool.IncorporatedResultSeals{} ms.sealsPL.On("Size").Return(uint(0)) // only for metrics ms.sealsPL.On("ByID", mock.Anything).Return( @@ -362,6 +366,7 @@ func (ms *MatchingSuite) SetupTest() { }, ) + // ~~~~~~~~~~~~~~~~~~~~~~~ SETUP MATCHING ENGINE ~~~~~~~~~~~~~~~~~~~~~~~ // ms.requester = new(module.Requester) ms.assigner = &module.ChunkAssigner{} @@ -372,7 +377,7 @@ func (ms *MatchingSuite) SetupTest() { mempool: metrics, state: ms.state, requester: ms.requester, - resultsDB: ms.sealedResultsDB, + resultsDB: ms.resultsDB, headersDB: ms.headersDB, indexDB: ms.indexDB, incorporatedResults: ms.resultsPL, @@ -398,7 +403,7 @@ func (ms *MatchingSuite) TestOnReceiptInvalidOrigin() { err := ms.matching.onReceipt(originID, receipt) ms.Require().Error(err, "should reject receipt with mismatching origin and executor") - ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } @@ -408,23 +413,11 @@ func (ms *MatchingSuite) TestOnReceiptUnknownBlock() { originID := ms.conID receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(originID)) - //// force state to not find the receipt's corresponding block - //ms.state = &protocol.State{} - //ms.state.On("AtBlockID", mock.Anything).Return( - // func(blockID flow.Identifier) realproto.Snapshot { - // snapshot := &protocol.Snapshot{} - // snapshot.On("Head").Return(nil, fmt.Errorf("forced error")) - // return snapshot - // }, - // nil, - //) - //ms.matching.state = ms.state - // onReceipt should not throw an error err := ms.matching.onReceipt(originID, receipt) ms.Require().NoError(err, "should ignore receipt for unknown block") - ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } @@ -441,7 +434,7 @@ func (ms *MatchingSuite) TestOnReceiptInvalidRole() { ms.Require().Error(err, "should reject receipt from wrong node role") ms.Require().True(engine.IsInvalidInputError(err)) - ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } @@ -459,7 +452,7 @@ func (ms *MatchingSuite) TestOnReceiptUnstakedExecutor() { ms.Require().Error(err, "should reject receipt from unstaked node") ms.Require().True(engine.IsInvalidInputError(err)) - ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } @@ -476,7 +469,7 @@ func (ms *MatchingSuite) TestOnReceiptSealedResult() { err := ms.matching.onReceipt(originID, receipt) ms.Require().NoError(err, "should ignore receipt for sealed result") - ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 0) ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 0) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } @@ -484,8 +477,10 @@ func (ms *MatchingSuite) TestOnReceiptSealedResult() { // try to submit a receipt for an already received result func (ms *MatchingSuite) TestOnReceiptPendingResult() { originID := ms.exeID - receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(originID)) - receipt.ExecutorID = originID + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(originID), + unittest.WithBlock(&ms.unfinalizedBlock), + ) ms.resultsPL.On("Add", mock.Anything).Run( func(args mock.Arguments) { @@ -496,17 +491,25 @@ func (ms *MatchingSuite) TestOnReceiptPendingResult() { err := ms.matching.onReceipt(originID, receipt) ms.Require().NoError(err, "should ignore receipt for already pending result") - ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 1) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 1) + + // resubmit receipt + err = ms.matching.onReceipt(originID, receipt) + ms.Require().NoError(err, "should ignore receipt for already pending result") + ms.resultsPL.AssertNumberOfCalls(ms.T(), "Add", 2) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 2) + ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) - ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 1) } +// try to submit a receipt that should be valid func (ms *MatchingSuite) TestOnReceiptValid() { - - // try to submit a receipt that should be valid originID := ms.exeID - receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(originID)) + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(originID), + unittest.WithBlock(&ms.unfinalizedBlock), + ) ms.resultsPL.On("Add", mock.Anything).Run( func(args mock.Arguments) { @@ -522,6 +525,7 @@ func (ms *MatchingSuite) TestOnReceiptValid() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit an approval where the message origin is inconsistent with the message creator func (ms *MatchingSuite) TestApprovalInvalidOrigin() { // approval from valid origin (i.e. a verification node) but with random ApproverID originID := ms.verID @@ -544,10 +548,12 @@ func (ms *MatchingSuite) TestApprovalInvalidOrigin() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// Try to submit an approval for an unknown block. +// As the block is unknown, the ID of teh sender should +//not matter as there is no block to verify it against func (ms *MatchingSuite) TestApprovalUnknownBlock() { - // try to submit an approval for an unknown block - originID := ms.verID - approval := unittest.ResultApprovalFixture(unittest.WithApproverID(originID)) // generates approval for random block + originID := ms.conID + approval := unittest.ResultApprovalFixture(unittest.WithApproverID(originID)) // generates approval for random block ID // make sure the approval is added to the cache for future processing // check calls have the correct parameters @@ -567,8 +573,8 @@ func (ms *MatchingSuite) TestApprovalUnknownBlock() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit an approval from a consensus node func (ms *MatchingSuite) TestOnApprovalInvalidRole() { - // try to submit an approval from a consensus node originID := ms.conID approval := unittest.ResultApprovalFixture( unittest.WithBlockID(ms.unfinalizedBlock.ID()), @@ -583,8 +589,8 @@ func (ms *MatchingSuite) TestOnApprovalInvalidRole() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit an approval from an unstaked approver func (ms *MatchingSuite) TestOnApprovalInvalidStake() { - // try to submit an approval from an unstaked approver originID := ms.verID approval := unittest.ResultApprovalFixture( unittest.WithBlockID(ms.unfinalizedBlock.ID()), @@ -600,13 +606,13 @@ func (ms *MatchingSuite) TestOnApprovalInvalidStake() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit an approval for a sealed result func (ms *MatchingSuite) TestOnApprovalSealedResult() { - - // try to submit an approval for a sealed result originID := ms.verID - approval := unittest.ResultApprovalFixture() - approval.Body.ApproverID = originID - ms.sealedResults[approval.Body.ExecutionResultID] = unittest.ExecutionResultFixture() + approval := unittest.ResultApprovalFixture( + unittest.WithBlockID(ms.latestSealedBlock.ID()), + unittest.WithApproverID(originID), + ) err := ms.matching.onApproval(originID, approval) ms.Require().NoError(err, "should ignore approval for sealed result") @@ -615,12 +621,10 @@ func (ms *MatchingSuite) TestOnApprovalSealedResult() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit an approval that is already in the mempool func (ms *MatchingSuite) TestOnApprovalPendingApproval() { - - // try to submit an approval that is already in the mempool originID := ms.verID - approval := unittest.ResultApprovalFixture() - approval.Body.ApproverID = originID + approval := unittest.ResultApprovalFixture(unittest.WithApproverID(originID)) // check calls have the correct parameters ms.approvalsPL.On("Add", mock.Anything).Run( @@ -637,12 +641,13 @@ func (ms *MatchingSuite) TestOnApprovalPendingApproval() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to submit an approval for a known block func (ms *MatchingSuite) TestOnApprovalValid() { - - // try to submit an approval for a sealed result originID := ms.verID - approval := unittest.ResultApprovalFixture() - approval.Body.ApproverID = originID + approval := unittest.ResultApprovalFixture( + unittest.WithBlockID(ms.unfinalizedBlock.ID()), + unittest.WithApproverID(originID), + ) // check calls have the correct parameters ms.approvalsPL.On("Add", mock.Anything).Run( @@ -659,31 +664,29 @@ func (ms *MatchingSuite) TestOnApprovalValid() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to get matched results with nothing in memory pools func (ms *MatchingSuite) TestSealableResultsEmptyMempools() { - - // try to get matched results with nothing in memory pools results, err := ms.matching.sealableResults() ms.Require().NoError(err, "should not error with empty mempools") ms.Assert().Empty(results, "should not have matched results with empty mempools") } +// Try to seal a result for which we don't have the block. +// This tests verifies that Matching engine is performing self-consistency checking: +// Not finding the block for an incorporated result is a fatal +// implementation bug, as we only add results to the IncorporatedResults +// mempool, where _both_ the block that incorporates the result as well +// as the block the result pertains to are known func (ms *MatchingSuite) TestSealableResultsMissingBlock() { - - // try to seal a result for which we don't have the index payload incorporatedResult := unittest.IncorporatedResultFixture() - ms.pendingResults[incorporatedResult.ID()] = incorporatedResult - results, err := ms.matching.sealableResults() - ms.Require().NoError(err) - - ms.Assert().Empty(results, "should not select result with unknown block") - ms.resultsPL.AssertNumberOfCalls(ms.T(), "Rem", 0) + _, err := ms.matching.sealableResults() + ms.Require().Error(err) } -func (ms *MatchingSuite) TestSealableResulstUnknownPrevious() { - - // try to seal a result with a missing previous result +// try to seal a result with a missing previous result +func (ms *MatchingSuite) TestSealableResultUnknownPrevious() { block := unittest.BlockFixture() ms.blocks[block.Header.ID()] = &block incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) @@ -692,16 +695,17 @@ func (ms *MatchingSuite) TestSealableResulstUnknownPrevious() { // check that it is looking for the previous result, but return nil as if // not found - ms.resultsPL.On("ByResultID", mock.Anything).Run( + ms.resultsDB.On("ByID", mock.Anything).Run( func(args mock.Arguments) { previousResultID := args.Get(0).(flow.Identifier) ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) }, - ).Return(nil, nil) + ).Return(nil, storerr.ErrNotFound) results, err := ms.matching.sealableResults() ms.Require().NoError(err) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "ByID", 1) ms.Assert().Empty(results, "should not select result with unsealed previous") ms.resultsPL.AssertNumberOfCalls(ms.T(), "Rem", 0) } @@ -715,7 +719,7 @@ func (ms *MatchingSuite) TestSealableResultsPreviousNotInMempool() { incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) previous := unittest.ExecutionResultFixture() // previous does not reference the same block as block parent incorporatedResult.Result.PreviousResultID = previous.ID() - ms.sealedResults[previous.ID()] = previous + ms.persistedResults[previous.ID()] = previous // add incorporated result to mempool ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult @@ -730,7 +734,7 @@ func (ms *MatchingSuite) TestSealableResultsPreviousNotInMempool() { ).Return(nil, nil) // check that it is looking for previous in resultsDB, and return previous - ms.sealedResultsDB.On("ByID", mock.Anything).Run( + ms.resultsDB.On("ByID", mock.Anything).Run( func(args mock.Arguments) { previousResultID := args.Get(0).(flow.Identifier) ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) @@ -1110,7 +1114,7 @@ func (ms *MatchingSuite) TestSealValid() { ) // check match when we are storing entities - ms.sealedResultsDB.On("Store", mock.Anything).Run( + ms.resultsDB.On("Store", mock.Anything).Run( func(args mock.Arguments) { stored := args.Get(0).(*flow.ExecutionResult) ms.Assert().Equal(incorporatedResult.Result, stored) @@ -1132,7 +1136,7 @@ func (ms *MatchingSuite) TestSealValid() { err = ms.matching.sealResult(incorporatedResult) ms.Require().NoError(err, "should generate seal on correct sealable result") - ms.sealedResultsDB.AssertNumberOfCalls(ms.T(), "Store", 1) + ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 1) ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 1) } @@ -1209,7 +1213,7 @@ func (ms *MatchingSuite) TestRequestReceiptsPendingBlocks() { ms.matching.state = ms.state // the results are not in the DB, which will trigger request - ms.sealedResultsDB.On("ByBlockID", mock.Anything).Return(nil, storerr.ErrNotFound) + ms.resultsDB.On("ByBlockID", mock.Anything).Return(nil, storerr.ErrNotFound) // keep track of requested blocks requestedBlocks := []flow.Identifier{} From 67ad9fdfad89d60463b85e1560061faf8b3c95ba Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Sun, 25 Oct 2020 12:32:26 -0700 Subject: [PATCH 037/105] Update engine/execution/ingestion/engine.go Co-authored-by: Kan Zhang --- engine/execution/ingestion/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 55b96fb7e99..2749e2dcfd1 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -217,7 +217,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { } count := 0 - for height := lastExecutedHeight + 1; height <= futureHeight; height++ { + for height := lastExecutedHeight; height <= futureHeight; height++ { block, err := e.blocks.ByHeight(height) if err != nil { return fmt.Errorf("could not get block by height: %w", err) From e8f48a693e3c4d348a405a05e965744fe7d6cac1 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Sun, 25 Oct 2020 12:33:20 -0700 Subject: [PATCH 038/105] add comments --- engine/execution/ingestion/engine.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 2749e2dcfd1..6e14cee2ddb 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -659,6 +659,9 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col } // since we've received this collection, remove it from the index + // this also prevents from executing the same block twice, because the second + // time when the collection arrives, it will not be found in the blockByCollectionID + // index. backdata.Rem(collID) return nil From cfa26e307fa0bc65ae05ecf9c529f4f14d679101 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Sun, 25 Oct 2020 13:14:52 -0700 Subject: [PATCH 039/105] deduplicate queue --- engine/execution/ingestion/engine.go | 12 ++++++------ module/mempool/queue/queue.go | 6 +++++- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 6e14cee2ddb..86c9adba559 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -220,7 +220,7 @@ func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { for height := lastExecutedHeight; height <= futureHeight; height++ { block, err := e.blocks.ByHeight(height) if err != nil { - return fmt.Errorf("could not get block by height: %w", err) + return fmt.Errorf("could not get block by height: %v %w", height, err) } executableBlock := &entity.ExecutableBlock{ @@ -630,7 +630,8 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col // or it was ejected from the mempool when it was full. // either way, we will return if !exists { - log.Debug().Msg("could not find block for collection") + e.log.Debug().Hex("collection_id", collID[:]). + Msg("could not find block for collection") return nil } @@ -639,7 +640,8 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col completeCollection, ok := executableBlock.CompleteCollections[collID] if !ok { - return fmt.Errorf("cannot handle collection: internal inconsistency - collection pointing to block %v which does not contain said collection", blockID) + return fmt.Errorf("cannot handle collection: internal inconsistency - collection pointing to block %v which does not contain said collection", + blockID) } if completeCollection.IsCompleted() { @@ -653,9 +655,7 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col completeCollection.Transactions = collection.Transactions // check if the block becomes executable - completed := e.executeBlockIfComplete(executableBlock) - - log.Debug().Hex("block_id", blockID[:]).Bool("completed", completed).Msg("collection added to block") + _ = e.executeBlockIfComplete(executableBlock) } // since we've received this collection, remove it from the index diff --git a/module/mempool/queue/queue.go b/module/mempool/queue/queue.go index 102fb52c942..9bd9087189f 100644 --- a/module/mempool/queue/queue.go +++ b/module/mempool/queue/queue.go @@ -47,6 +47,8 @@ func (q *Queue) Size() int { } // Returns difference between lowest and highest element in the queue +// Formally, the Queue stores a tree. The height of the tree is the +// number of edges on the longest downward path between the root and any leaf. func (q *Queue) Height() uint64 { return q.Highest.Item.Height() - q.Head.Item.Height() } @@ -96,11 +98,13 @@ func dequeue(queue *Queue) *Queue { cache := make(map[flow.Identifier]*Node) //copy all but head caches + headID := queue.Head.Item.ID() for key, val := range queue.Nodes { - if key != queue.Head.Item.ID() { + if key != headID { cache[key] = val } } + return &Queue{ Head: onlyChild, Nodes: cache, From b50d86d10fe1746b29bc1d34279c901f47b5406d Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Sun, 25 Oct 2020 18:24:01 -0700 Subject: [PATCH 040/105] refactor block reloading --- engine/execution/ingestion/engine.go | 195 +++++++++++++++------------ engine/execution/state/state.go | 16 +++ 2 files changed, 123 insertions(+), 88 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 86c9adba559..f56d5159657 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -133,7 +133,7 @@ func New( // Ready returns a channel that will close when the engine has // successfully started. func (e *Engine) Ready() <-chan struct{} { - err := e.loadAllFinalizedAndUnexecutedBlocks() + err := e.reloadUnexecutedBlocks() if err != nil { e.log.Error().Err(err).Msg("failed to load all unexecuted blocks") } @@ -187,111 +187,117 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } // on nodes startup, we need to load all the unexecuted blocks to the execution queues. -func (e *Engine) loadAllFinalizedAndUnexecutedBlocks() error { - // // get finalized height - header, err := e.state.Final().Head() +// blocks have to be loaded in the way that the parent has been loaded before loading its children +func (e *Engine) reloadUnexecutedBlocks() error { + err := e.reloadFinalizedUnexecutedBlocks() if err != nil { - return fmt.Errorf("could not get finalized block: %w", err) + return fmt.Errorf("could not reload finalized unexecuted blocks") } - finalizedHeight := header.Height - futureHeight := uint64(8655590) + err = e.reloadPendingUnexecutedBlocks() + if err != nil { + return fmt.Errorf("could not reload pending unexecuted blocks") + } - // get the last executed height - lastExecutedHeight, _, err := e.execState.GetHighestExecutedBlockID(e.unit.Ctx()) + return nil +} + +func (e *Engine) reloadFinalizedUnexecutedBlocks() error { + // get finalized height + final, err := e.state.Final().Head() if err != nil { - return fmt.Errorf("could not get last executed block: %w", err) + return fmt.Errorf("could not get finalized block: %w", err) } - unexecuted := int64(finalizedHeight) - int64(lastExecutedHeight) + // find the last executed and finalized block + lastExecutedHeight := final.Height + for ; ; lastExecutedHeight-- { + header, err := e.state.AtHeight(lastExecutedHeight).Head() + if err != nil { + return fmt.Errorf("could not get header at height: %v, %w", lastExecutedHeight, err) + } - e.log.Info(). - Int64("count", unexecuted). - Uint64("last_executed_height", lastExecutedHeight). - Uint64("last_finalized_height", finalizedHeight). - Msg("reloading finalized and unexecuted blocks to execution queues...") + executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, header.ID()) + if err != nil { + return fmt.Errorf("could not check whether block is executed: %w", err) + } - // log the number of unexecuted blocks - if unexecuted <= 0 { - return nil + if executed { + break + } } - count := 0 - for height := lastExecutedHeight; height <= futureHeight; height++ { - block, err := e.blocks.ByHeight(height) + e.log.Info().Msgf("last finalized and executed height: %v", lastExecutedHeight) + + // starting from the last executed block, go through each unexecuted and finalized block + // reload its block to execution queues + for height := lastExecutedHeight; height <= final.Height; height++ { + header, err := e.state.AtHeight(height).Head() if err != nil { - return fmt.Errorf("could not get block by height: %v %w", height, err) + return fmt.Errorf("could not get header at height: %v, %w", height, err) } - executableBlock := &entity.ExecutableBlock{ - Block: block, - CompleteCollections: make(map[flow.Identifier]*entity.CompleteCollection), + err = e.reloadBlock(header.ID()) + if err != nil { + return fmt.Errorf("could not reload block %v, %w", height, err) } - blockID := executableBlock.ID() + e.log.Info().Msgf("reloaded block at height: %v", height) - // acquiring the lock so that there is only one process modifying the queue - err = e.mempool.Run( - func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - // adding the block to the queue, - queue, added := enqueue(executableBlock, executionQueues) - if !added { - // we started from an empty queue, and added each finalized block to the - // queue. Each block should always be added to the queues. - // a sanity check it must be an exception if not added. - return fmt.Errorf("block %v is not added to the queue", blockID) - } - - // check if a block is executable. - // a block is executable if the following conditions are all true - // 1) the parent state commitment is ready - // 2) the collections for the block payload are ready - // 3) the child block is ready for querying the randomness - - // check if the block's parent has been executed. (we can't execute the block if the parent has - // not been executed yet) - // check if there is a statecommitment for the parent block - parentCommitment, err := e.execState.StateCommitmentByBlockID(e.unit.Ctx(), block.Header.ParentID) - - // if we found the statecommitment for the parent block, then add it to the executable block. - if err == nil { - executableBlock.StartState = parentCommitment - } else if errors.Is(err, storage.ErrNotFound) { - // the parent block is an unexecuted block. - // if the queue only has one block, and its parent doesn't - // exist in the queue, then we need to load the block from the storage. - _, ok := queue.Nodes[blockID] - if !ok { - log.Error().Msgf("an unexecuted parent block is missing in the queue") - } - } else { - // if there is exception, then crash - log.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") - } + } - // check if we have all the collections for the block, and request them if there is missing. - err = e.matchOrRequestCollections(executableBlock, blockByCollection) - if err != nil { - return fmt.Errorf("cannot send collection requests: %w", err) - } + return nil +} - // execute the block if the block is ready to be executed - e.executeBlockIfComplete(executableBlock) - return nil - }) +func (e *Engine) reloadPendingUnexecutedBlocks() error { + pendings, err := e.state.Final().Pending() + if err != nil { + return fmt.Errorf("could not get pending blocks: %w", err) + } + for _, pending := range pendings { + reloaded, err := e.reloadBlockIfNotExecuted(pending) if err != nil { - return fmt.Errorf("failed to recover block %v", err) + return fmt.Errorf("could not reload block for block %w", err) } - count++ + e.log.Info().Bool("reloaded", reloaded).Msgf("reloaded block %v", pending) + } + + return nil +} + +// reload the block to execution queues if has not been executed. +// return whether the block was reloaded. +func (e *Engine) reloadBlockIfNotExecuted(blockID flow.Identifier) (bool, error) { + executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, blockID) + if err != nil { + return false, fmt.Errorf("could not check block executed or not: %w", err) + } + + if executed { + return false, nil + } + + err = e.reloadBlock(blockID) + if err != nil { + return false, fmt.Errorf("could not reload block: %w", err) + } + + return true, nil +} + +func (e *Engine) reloadBlock(blockID flow.Identifier) error { + block, err := e.blocks.ByID(blockID) + if err != nil { + return fmt.Errorf("could not get block by ID: %v %w", blockID, err) } - e.log.Info().Int("count", count). - Msg("reloaded all the finalized and unexecuted blocks to execution queues") + err = e.enqueueBlockAndCheckExecutable(block, false) + + if err != nil { + return fmt.Errorf("could not enqueue block on reloading: %w", err) + } return nil } @@ -339,11 +345,22 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { // unexecuted block e.metrics.StartBlockReceivedToExecuted(blockID) + err = e.enqueueBlockAndCheckExecutable(block, true) + if err != nil { + return fmt.Errorf("could not enqueue block: %w", err) + } + + return nil +} + +func (e *Engine) enqueueBlockAndCheckExecutable(block *flow.Block, checkStateSync bool) error { executableBlock := &entity.ExecutableBlock{ Block: block, CompleteCollections: make(map[flow.Identifier]*entity.CompleteCollection), } + blockID := executableBlock.ID() + // acquiring the lock so that there is only one process modifying the queue return e.mempool.Run( func( @@ -360,12 +377,14 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { return nil } - // whenever the queue grows, we need to check whether the state sync should be - // triggered. - firstUnexecutedHeight := queue.Head.Item.Height() - e.unit.Launch(func() { - e.checkStateSyncStart(firstUnexecutedHeight) - }) + if checkStateSync { + // whenever the queue grows, we need to check whether the state sync should be + // triggered. + firstUnexecutedHeight := queue.Head.Item.Height() + e.unit.Launch(func() { + e.checkStateSyncStart(firstUnexecutedHeight) + }) + } // check if a block is executable. // a block is executable if the following conditions are all true @@ -376,7 +395,7 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { // check if the block's parent has been executed. (we can't execute the block if the parent has // not been executed yet) // check if there is a statecommitment for the parent block - parentCommitment, err := e.execState.StateCommitmentByBlockID(ctx, block.Header.ParentID) + parentCommitment, err := e.execState.StateCommitmentByBlockID(e.unit.Ctx(), block.Header.ParentID) // if we found the statecommitment for the parent block, then add it to the executable block. if err == nil { diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 84f530e9c0e..551296ebd33 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -2,6 +2,7 @@ package state import ( "context" + "errors" "fmt" "github.com/dgraph-io/badger/v2" @@ -50,6 +51,21 @@ type ReadOnlyExecutionState interface { GetCollection(identifier flow.Identifier) (*flow.Collection, error) } +// IsBlockExecuted returns whether the block has been executed. +// it checks whether the statecommitment exists in execution state. +func IsBlockExecuted(ctx context.Context, state ReadOnlyExecutionState, block flow.Identifier) (bool, error) { + _, err := state.StateCommitmentByBlockID(ctx, block) + if err == nil { + return false, nil + } + + if errors.Is(err, storage.ErrNotFound) { + return true, nil + } + + return false, err +} + // TODO Many operations here are should be transactional, so we need to refactor this // to store a reference to DB and compose operations and procedures rather then // just being amalgamate of proxies for single transactions operation From 5823b50b6c8dd9b138606b4597a93fee39f4143b Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Sun, 25 Oct 2020 20:22:00 -0700 Subject: [PATCH 041/105] continue fixing tests --- engine/consensus/matching/engine_test.go | 198 +++++++++++++---------- utils/unittest/fixtures.go | 44 ++++- utils/unittest/incorporated_results.go | 30 ++++ 3 files changed, 182 insertions(+), 90 deletions(-) create mode 100644 utils/unittest/incorporated_results.go diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 2740d1256a4..c94aeb9d2ea 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -3,6 +3,7 @@ package matching import ( + "fmt" "math/rand" "os" "testing" @@ -261,7 +262,7 @@ func (ms *MatchingSuite) SetupTest() { } return nil }, - ) + ).Maybe() ms.resultsDB.On("Store", mock.Anything).Return( func(result *flow.ExecutionResult) error { _, found := ms.persistedResults[result.BlockID] @@ -270,7 +271,7 @@ func (ms *MatchingSuite) SetupTest() { } return nil }, - ) + ).Maybe() // this call is optional // ~~~~~~~~~~~~~~~~~~~~ SETUP BLOCK HEADER STORAGE ~~~~~~~~~~~~~~~~~~~~~ // ms.headersDB = &storage.Headers{} @@ -337,7 +338,7 @@ func (ms *MatchingSuite) SetupTest() { // ~~~~~~~~~~~~~~~~ SETUP INCORPORATED RESULTS MEMPOOL ~~~~~~~~~~~~~~~~~ // ms.pendingResults = make(map[flow.Identifier]*flow.IncorporatedResult) ms.resultsPL = &mempool.IncorporatedResults{} - ms.resultsPL.On("Size").Return(uint(0)) // only for metrics + ms.resultsPL.On("Size").Return(uint(0)).Maybe() // only for metrics ms.resultsPL.On("All").Return( func() []*flow.IncorporatedResult { results := make([]*flow.IncorporatedResult, 0, len(ms.pendingResults)) @@ -346,16 +347,16 @@ func (ms *MatchingSuite) SetupTest() { } return results }, - ) + ).Maybe() // ~~~~~~~~~~~~~~~~~~~~~~ SETUP APPROVALS MEMPOOL ~~~~~~~~~~~~~~~~~~~~~~ // ms.approvalsPL = &mempool.Approvals{} - ms.approvalsPL.On("Size").Return(uint(0)) // only for metrics + ms.approvalsPL.On("Size").Return(uint(0)).Maybe() // only for metrics // ~~~~~~~~~~~~~~~~~~~~~~~~ SETUP SEALS MEMPOOL ~~~~~~~~~~~~~~~~~~~~~~~~ // ms.pendingSeals = make(map[flow.Identifier]*flow.IncorporatedResultSeal) ms.sealsPL = &mempool.IncorporatedResultSeals{} - ms.sealsPL.On("Size").Return(uint(0)) // only for metrics + ms.sealsPL.On("Size").Return(uint(0)).Maybe() // only for metrics ms.sealsPL.On("ByID", mock.Anything).Return( func(sealID flow.Identifier) *flow.IncorporatedResultSeal { return ms.pendingSeals[sealID] @@ -427,7 +428,7 @@ func (ms *MatchingSuite) TestOnReceiptInvalidRole() { originID := ms.conID receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(originID), - unittest.WithBlock(&ms.unfinalizedBlock), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&ms.unfinalizedBlock))), ) err := ms.matching.onReceipt(originID, receipt) @@ -444,7 +445,7 @@ func (ms *MatchingSuite) TestOnReceiptUnstakedExecutor() { originID := ms.exeID receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(originID), - unittest.WithBlock(&ms.unfinalizedBlock), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&ms.unfinalizedBlock))), ) ms.identities[originID].Stake = 0 @@ -463,7 +464,7 @@ func (ms *MatchingSuite) TestOnReceiptSealedResult() { originID := ms.exeID receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(originID), - unittest.WithBlock(&ms.latestSealedBlock), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&ms.latestSealedBlock))), ) err := ms.matching.onReceipt(originID, receipt) @@ -479,7 +480,7 @@ func (ms *MatchingSuite) TestOnReceiptPendingResult() { originID := ms.exeID receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(originID), - unittest.WithBlock(&ms.unfinalizedBlock), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&ms.unfinalizedBlock))), ) ms.resultsPL.On("Add", mock.Anything).Run( @@ -508,7 +509,7 @@ func (ms *MatchingSuite) TestOnReceiptValid() { originID := ms.exeID receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(originID), - unittest.WithBlock(&ms.unfinalizedBlock), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&ms.unfinalizedBlock))), ) ms.resultsPL.On("Add", mock.Anything).Run( @@ -664,6 +665,78 @@ func (ms *MatchingSuite) TestOnApprovalValid() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// TestSealableResultsValid tests matching.Engine.sealableResults(): +// * a well-formed incorporated result R is in the mempool +// * sufficient number of valid result approvals for result R +// * R.PreviousResultID references a known result (i.e. stored in resultsDB) +// * R forms a valid sub-graph with its previous result (aka parent result) +// Method Engine.sealableResults() should return R as an element of the sealable results +func (ms *MatchingSuite) TestSealableResultsValid() { + // BLOCKS: <- previousBlock <- block + //previousBlock := unittest.BlockWithParentFixture(ms.unfinalizedBlock.Header) + previousBlock := unittest.BlockFixture() + block := unittest.BlockWithParentFixture(previousBlock.Header) + + // RESULTS for blocks: + previousResult := unittest.ExecutionResultFixture(unittest.WithBlock(&previousBlock)) + result := unittest.ExecutionResultFixture( + unittest.WithBlock(&block), + unittest.WithPreviousResult(previousResult.ID()), + ) + + // Exec Receipt for block with valid subgraph + incorporatedResult := unittest.IncorporatedResult.Fixture(unittest.IncorporatedResult.WithResult(result)) + + // add entities to mempools and persistent storage mocks: + ms.blocks[block.Header.ID()] = &block + ms.persistedResults[previousResult.ID()] = previousResult + ms.persistedResults[result.ID()] = result + ms.pendingResults[incorporatedResult.ID()] = incorporatedResult + + // assign each chunk to each approver + assignment := chunks.NewAssignment() + for _, chunk := range incorporatedResult.Result.Chunks { + assignment.Add(chunk, ms.approvers.NodeIDs()) + } + ms.assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil).Once() + + // add enough approvals for each chunk + print(fmt.Sprintf("%d\n", len(incorporatedResult.Result.Chunks))) + for index := 0; index < len(incorporatedResult.Result.Chunks); index++ { + chunkApprovals := make(map[flow.Identifier]*flow.ResultApproval) + for _, approver := range ms.approvers { + chunkApprovals[approver.NodeID] = approvalFor(incorporatedResult.Result, uint64(index), approver.NodeID) + } + ms.approvalsPL.On("ByChunk", incorporatedResult.Result.ID(), uint64(index)).Return(chunkApprovals).Once() + } + + // test output of Matching Engine's sealableResults() + results, err := ms.matching.sealableResults() + ms.Require().NoError(err) + ms.Assert().Equal(1, len(results), "expecting a single return value") + ms.Assert().Equal(incorporatedResult.ID(), results[0].ID(), "expecting a single return value") + + ms.resultsDB.AssertExpectations(ms.T()) + ms.assigner.AssertExpectations(ms.T()) + ms.approvalsPL.AssertExpectations(ms.T()) +} + +func approvalFor(result *flow.ExecutionResult, chunkIdx uint64, approverID flow.Identifier) *flow.ResultApproval { + return unittest.ResultApprovalFixture( + unittest.WithBlockID(result.BlockID), + unittest.WithExecutionResultID(result.ID()), + unittest.WithApproverID(approverID), + unittest.WithChunk(chunkIdx), + ) +} + +func expectedID(expectedID flow.Identifier) interface{} { + return mock.MatchedBy( + func(actualID flow.Identifier) bool { + return expectedID == actualID + }) +} + // try to get matched results with nothing in memory pools func (ms *MatchingSuite) TestSealableResultsEmptyMempools() { results, err := ms.matching.sealableResults() @@ -685,7 +758,10 @@ func (ms *MatchingSuite) TestSealableResultsMissingBlock() { ms.Require().Error(err) } -// try to seal a result with a missing previous result +// Given an incorporated result in the mempool, whose previous result +// (aka parent result) is not known: +// * skip this result +// * this result should not be removed from the mempool func (ms *MatchingSuite) TestSealableResultUnknownPrevious() { block := unittest.BlockFixture() ms.blocks[block.Header.ID()] = &block @@ -704,92 +780,46 @@ func (ms *MatchingSuite) TestSealableResultUnknownPrevious() { results, err := ms.matching.sealableResults() ms.Require().NoError(err) + ms.Assert().Empty(results, "should not select result with unsealed previous") ms.resultsDB.AssertNumberOfCalls(ms.T(), "ByID", 1) - ms.Assert().Empty(results, "should not select result with unsealed previous") ms.resultsPL.AssertNumberOfCalls(ms.T(), "Rem", 0) } -// if the previous is not found in mempool, we should look for it in the -// resultsDB -func (ms *MatchingSuite) TestSealableResultsPreviousNotInMempool() { - // try to seal a result with a persisted previous result - block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() // previous does not reference the same block as block parent - incorporatedResult.Result.PreviousResultID = previous.ID() - ms.persistedResults[previous.ID()] = previous - - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult - - // check that it is looking for the previous result in the mempool and - // return nil - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(nil, nil) - - // check that it is looking for previous in resultsDB, and return previous - ms.resultsDB.On("ByID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous) - - // check that we are trying to remove the incorporated result from mempool - ms.resultsPL.On("Rem", mock.Anything).Run( - func(args mock.Arguments) { - incResult := args.Get(0).(*flow.IncorporatedResult) - ms.Assert().Equal(incorporatedResult.ID(), incResult.ID()) - }, - ).Return(true) - - results, err := ms.matching.sealableResults() - ms.Require().NoError(err) - - ms.Assert().Empty(results, "should not select result with invalid subgraph") - ms.resultsPL.AssertNumberOfCalls(ms.T(), "Rem", 1) -} - +// TestSealableResultsInvalidSubgraph tests matching.Engine.sealableResults(): // let R1 be a result that references block A, and R2 be R1's parent result. -// Then R2 should reference A's parent. +// * the execution results form a valid subgraph if and only if +// R2 should reference A's parent. +// Method sealableResults() should +// * neither consider R1 nor R2 sealable incorporated results and +// * remove R1 from IncorporatedResults mempool, i.e. `resultsPL` func (ms *MatchingSuite) TestSealableResultsInvalidSubgraph() { - // try to seal a result with a persisted previous result - block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() // previous does not reference the same block as block parent - incorporatedResult.Result.PreviousResultID = previous.ID() + blockA := unittest.BlockFixture() // the parent block's ID is randomly generated here - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult + // RESULTS for blocks: + resultR2 := unittest.ExecutionResultFixture() // the result pertains to a block whose ID is random generated here + resultR1 := unittest.ExecutionResultFixture( + unittest.WithBlock(&blockA), + unittest.WithPreviousResult(resultR2.ID()), + ) - // check that it is looking for the previous result, and return previous - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous, nil) + // Exec Receipt for block with valid subgraph + incorporatedResult := unittest.IncorporatedResult.Fixture(unittest.IncorporatedResult.WithResult(resultR1)) - // check that we are trying to remove the incorporated result from mempool - ms.resultsPL.On("Rem", mock.Anything).Run( - func(args mock.Arguments) { - incResult := args.Get(0).(*flow.IncorporatedResult) - ms.Assert().Equal(incorporatedResult.ID(), incResult.ID()) - }, - ).Return(true) + // add entities to mempools and persistent storage mocks: + ms.blocks[blockA.Header.ID()] = &blockA + ms.persistedResults[resultR2.ID()] = resultR2 + ms.persistedResults[resultR1.ID()] = resultR1 + ms.pendingResults[incorporatedResult.ID()] = incorporatedResult + + // we expect business logic to remove the incorporated result with failed sub-graph check from mempool + ms.resultsPL.On("Rem", incorporatedResult.ID()).Return(true).Once() results, err := ms.matching.sealableResults() ms.Require().NoError(err) - ms.Assert().Empty(results, "should not select result with invalid subgraph") - ms.resultsPL.AssertNumberOfCalls(ms.T(), "Rem", 1) + + ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } func (ms *MatchingSuite) TestSealResultInvalidChunks() { diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 8ef034915c5..2ff091b7408 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -421,9 +421,9 @@ func WithExecutorID(id flow.Identifier) func(*flow.ExecutionReceipt) { } } -func WithBlock(block *flow.Block) func(*flow.ExecutionReceipt) { +func WithResult(result *flow.ExecutionResult) func(*flow.ExecutionReceipt) { return func(receipt *flow.ExecutionReceipt) { - receipt.ExecutionResult = *ResultForBlockFixture(block) + receipt.ExecutionResult = *result } } @@ -441,9 +441,23 @@ func ExecutionReceiptFixture(opts ...func(*flow.ExecutionReceipt)) *flow.Executi return receipt } -func ExecutionResultFixture() *flow.ExecutionResult { +func WithPreviousResult(resultID flow.Identifier) func(*flow.ExecutionResult) { + return func(result *flow.ExecutionResult) { + result.PreviousResultID = resultID + } +} + +func WithBlock(block *flow.Block) func(*flow.ExecutionResult) { + return func(result *flow.ExecutionResult) { + updatedResult := *ResultForBlockFixture(block) + result.BlockID = updatedResult.BlockID + result.Chunks = updatedResult.Chunks + } +} + +func ExecutionResultFixture(opts ...func(*flow.ExecutionResult)) *flow.ExecutionResult { blockID := IdentifierFixture() - return &flow.ExecutionResult{ + result := &flow.ExecutionResult{ ExecutionResultBody: flow.ExecutionResultBody{ PreviousResultID: IdentifierFixture(), BlockID: IdentifierFixture(), @@ -454,14 +468,26 @@ func ExecutionResultFixture() *flow.ExecutionResult { }, Signatures: SignaturesFixture(6), } + + for _, apply := range opts { + apply(result) + } + return result } -func IncorporatedResultFixture() *flow.IncorporatedResult { +// TODO replace by usage unittest.IncorporatedResult +func IncorporatedResultFixture(opts ...func(*flow.IncorporatedResult)) *flow.IncorporatedResult { result := ExecutionResultFixture() incorporatedBlockID := IdentifierFixture() - return flow.NewIncorporatedResult(incorporatedBlockID, result) + ir := flow.NewIncorporatedResult(incorporatedBlockID, result) + + for _, apply := range opts { + apply(ir) + } + return ir } +// TODO replace by usage unittest.IncorporatedResult func IncorporatedResultForBlockFixture(block *flow.Block) *flow.IncorporatedResult { result := ResultForBlockFixture(block) incorporatedBlockID := IdentifierFixture() @@ -486,6 +512,12 @@ func WithBlockID(id flow.Identifier) func(*flow.ResultApproval) { } } +func WithChunk(chunkIdx uint64) func(*flow.ResultApproval) { + return func(approval *flow.ResultApproval) { + approval.Body.ChunkIndex = chunkIdx + } +} + func ResultApprovalFixture(opts ...func(*flow.ResultApproval)) *flow.ResultApproval { attestation := flow.Attestation{ BlockID: IdentifierFixture(), diff --git a/utils/unittest/incorporated_results.go b/utils/unittest/incorporated_results.go new file mode 100644 index 00000000000..e61ebf1fb56 --- /dev/null +++ b/utils/unittest/incorporated_results.go @@ -0,0 +1,30 @@ +package unittest + +import "github.com/onflow/flow-go/model/flow" + +var IncorporatedResult incorporatedResultFactory + +type incorporatedResultFactory struct{} + +func (f *incorporatedResultFactory) Fixture(opts ...func(*flow.IncorporatedResult)) *flow.IncorporatedResult { + result := ExecutionResultFixture() + incorporatedBlockID := IdentifierFixture() + ir := flow.NewIncorporatedResult(incorporatedBlockID, result) + + for _, apply := range opts { + apply(ir) + } + return ir +} + +func (f *incorporatedResultFactory) WithResult(result *flow.ExecutionResult) func(*flow.IncorporatedResult) { + return func(incResult *flow.IncorporatedResult) { + incResult.Result = result + } +} + +func (f *incorporatedResultFactory) WithIncorporatedBlockID(id flow.Identifier) func(*flow.IncorporatedResult) { + return func(incResult *flow.IncorporatedResult) { + incResult.IncorporatedBlockID = id + } +} From 10e9cde2e4c51b0f62ce4cd878d853d1b9cca342 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Sun, 25 Oct 2020 20:26:24 -0700 Subject: [PATCH 042/105] fixed more tests --- engine/consensus/matching/engine_test.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index c94aeb9d2ea..3796ca95e40 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -737,6 +737,13 @@ func expectedID(expectedID flow.Identifier) interface{} { }) } +func entityWithID(expectedID flow.Identifier) interface{} { + return mock.MatchedBy( + func(entity flow.Entity) bool { + return expectedID == entity.ID() + }) +} + // try to get matched results with nothing in memory pools func (ms *MatchingSuite) TestSealableResultsEmptyMempools() { results, err := ms.matching.sealableResults() @@ -813,7 +820,7 @@ func (ms *MatchingSuite) TestSealableResultsInvalidSubgraph() { ms.pendingResults[incorporatedResult.ID()] = incorporatedResult // we expect business logic to remove the incorporated result with failed sub-graph check from mempool - ms.resultsPL.On("Rem", incorporatedResult.ID()).Return(true).Once() + ms.resultsPL.On("Rem", entityWithID(incorporatedResult.ID())).Return(true).Once() results, err := ms.matching.sealableResults() ms.Require().NoError(err) From c604e5b405695745e14ea689a0946b16e821f469 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Sun, 25 Oct 2020 21:22:49 -0700 Subject: [PATCH 043/105] fix IsBlockExecuted --- engine/execution/state/state.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 551296ebd33..9f0e2ebb19d 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -55,12 +55,15 @@ type ReadOnlyExecutionState interface { // it checks whether the statecommitment exists in execution state. func IsBlockExecuted(ctx context.Context, state ReadOnlyExecutionState, block flow.Identifier) (bool, error) { _, err := state.StateCommitmentByBlockID(ctx, block) + + // statecommitment exists means the block has been executed if err == nil { - return false, nil + return true, nil } + // statecommitment not exists means the block hasn't been executed yet if errors.Is(err, storage.ErrNotFound) { - return true, nil + return false, nil } return false, err From aae4d84ff03a49e725d3a954c45e68e2f77e187c Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Sun, 25 Oct 2020 23:00:19 -0700 Subject: [PATCH 044/105] ... tests --- engine/consensus/matching/engine.go | 32 +- engine/consensus/matching/engine_test.go | 387 ++++++++++++----------- module/chunks/publicAssign.go | 2 +- 3 files changed, 223 insertions(+), 198 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index f6f34c3b2c3..106c76aaac8 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -593,28 +593,26 @@ func (e *Engine) matchChunk(incorporatedResult *flow.IncorporatedResult, block * continue } - // check if the approver is assigned to this chunk. - ok = chmodule.IsValidVerifer(assignment, chunk, approverID) - if !ok { - // if the approval comes from a node that wasn't even a staked - // verifier at that block, remove the approval from the mempool. - err := e.ensureStakedNodeWithRole(approverID, block, flow.RoleVerification) - if err != nil { - if engine.IsInvalidInputError(err) { - _, err = e.approvals.RemApproval(approval) - if err != nil { - return false, fmt.Errorf("failed to remove approval from mempool: %w", err) - } - continue + // if the approval comes from a node that wasn't even a staked + // verifier at that block, remove the approval from the mempool. + err := e.ensureStakedNodeWithRole(approverID, block, flow.RoleVerification) + if err != nil { + if engine.IsInvalidInputError(err) { + _, err = e.approvals.RemApproval(approval) + if err != nil { + return false, fmt.Errorf("failed to remove approval from mempool: %w", err) } - return false, fmt.Errorf("failed to match chunks: %w", err) + continue } + return false, fmt.Errorf("failed to match chunks: %w", err) + } + // skip approval if verifier was not assigned to this chunk. + if !chmodule.IsValidVerifer(assignment, chunk, approverID) { + continue } - // Add signature to incorporated result so that we don't have to check - // it again. + // Add signature to incorporated result so that we don't have to check it again. incorporatedResult.AddSignature(chunk.Index, approverID, approval.Body.AttestationSignature) - validApprovals++ } diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 3796ca95e40..6c60b482df0 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -3,7 +3,6 @@ package matching import ( - "fmt" "math/rand" "os" "testing" @@ -132,6 +131,9 @@ func (ms *MatchingSuite) SetupTest() { ms.identities[ms.verID] = ver ms.approvers = unittest.IdentityListFixture(4, unittest.WithRole(flow.RoleVerification)) + for _, verifier := range ms.approvers { + ms.identities[verifier.ID()] = verifier + } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SETUP BLOCKS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // rootBlock <- latestSealedBlock <- latestFinalizedBlock <- unfinalizedBlock @@ -672,76 +674,14 @@ func (ms *MatchingSuite) TestOnApprovalValid() { // * R forms a valid sub-graph with its previous result (aka parent result) // Method Engine.sealableResults() should return R as an element of the sealable results func (ms *MatchingSuite) TestSealableResultsValid() { - // BLOCKS: <- previousBlock <- block - //previousBlock := unittest.BlockWithParentFixture(ms.unfinalizedBlock.Header) - previousBlock := unittest.BlockFixture() - block := unittest.BlockWithParentFixture(previousBlock.Header) - - // RESULTS for blocks: - previousResult := unittest.ExecutionResultFixture(unittest.WithBlock(&previousBlock)) - result := unittest.ExecutionResultFixture( - unittest.WithBlock(&block), - unittest.WithPreviousResult(previousResult.ID()), - ) - - // Exec Receipt for block with valid subgraph - incorporatedResult := unittest.IncorporatedResult.Fixture(unittest.IncorporatedResult.WithResult(result)) - - // add entities to mempools and persistent storage mocks: - ms.blocks[block.Header.ID()] = &block - ms.persistedResults[previousResult.ID()] = previousResult - ms.persistedResults[result.ID()] = result - ms.pendingResults[incorporatedResult.ID()] = incorporatedResult - - // assign each chunk to each approver - assignment := chunks.NewAssignment() - for _, chunk := range incorporatedResult.Result.Chunks { - assignment.Add(chunk, ms.approvers.NodeIDs()) - } - ms.assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil).Once() - - // add enough approvals for each chunk - print(fmt.Sprintf("%d\n", len(incorporatedResult.Result.Chunks))) - for index := 0; index < len(incorporatedResult.Result.Chunks); index++ { - chunkApprovals := make(map[flow.Identifier]*flow.ResultApproval) - for _, approver := range ms.approvers { - chunkApprovals[approver.NodeID] = approvalFor(incorporatedResult.Result, uint64(index), approver.NodeID) - } - ms.approvalsPL.On("ByChunk", incorporatedResult.Result.ID(), uint64(index)).Return(chunkApprovals).Once() - } + valSubgrph := ms.validSubgraphFixture() + ms.addSubgraphFixtureToMempools(valSubgrph) // test output of Matching Engine's sealableResults() results, err := ms.matching.sealableResults() ms.Require().NoError(err) ms.Assert().Equal(1, len(results), "expecting a single return value") - ms.Assert().Equal(incorporatedResult.ID(), results[0].ID(), "expecting a single return value") - - ms.resultsDB.AssertExpectations(ms.T()) - ms.assigner.AssertExpectations(ms.T()) - ms.approvalsPL.AssertExpectations(ms.T()) -} - -func approvalFor(result *flow.ExecutionResult, chunkIdx uint64, approverID flow.Identifier) *flow.ResultApproval { - return unittest.ResultApprovalFixture( - unittest.WithBlockID(result.BlockID), - unittest.WithExecutionResultID(result.ID()), - unittest.WithApproverID(approverID), - unittest.WithChunk(chunkIdx), - ) -} - -func expectedID(expectedID flow.Identifier) interface{} { - return mock.MatchedBy( - func(actualID flow.Identifier) bool { - return expectedID == actualID - }) -} - -func entityWithID(expectedID flow.Identifier) interface{} { - return mock.MatchedBy( - func(entity flow.Entity) bool { - return expectedID == entity.ID() - }) + ms.Assert().Equal(valSubgrph.IncorporatedResult.ID(), results[0].ID(), "expecting a single return value") } // try to get matched results with nothing in memory pools @@ -758,8 +698,9 @@ func (ms *MatchingSuite) TestSealableResultsEmptyMempools() { // mempool, where _both_ the block that incorporates the result as well // as the block the result pertains to are known func (ms *MatchingSuite) TestSealableResultsMissingBlock() { - incorporatedResult := unittest.IncorporatedResultFixture() - ms.pendingResults[incorporatedResult.ID()] = incorporatedResult + valSubgrph := ms.validSubgraphFixture() + ms.addSubgraphFixtureToMempools(valSubgrph) + delete(ms.blocks, valSubgrph.Block.ID()) // remove block the execution receipt pertains to _, err := ms.matching.sealableResults() ms.Require().Error(err) @@ -770,20 +711,9 @@ func (ms *MatchingSuite) TestSealableResultsMissingBlock() { // * skip this result // * this result should not be removed from the mempool func (ms *MatchingSuite) TestSealableResultUnknownPrevious() { - block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - - ms.pendingResults[incorporatedResult.ID()] = incorporatedResult - - // check that it is looking for the previous result, but return nil as if - // not found - ms.resultsDB.On("ByID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(nil, storerr.ErrNotFound) + subgrph := ms.validSubgraphFixture() + ms.addSubgraphFixtureToMempools(subgrph) + delete(ms.persistedResults, subgrph.PreviousResult.ID()) // remove previous execution result from storage layer results, err := ms.matching.sealableResults() ms.Require().NoError(err) @@ -801,80 +731,95 @@ func (ms *MatchingSuite) TestSealableResultUnknownPrevious() { // * neither consider R1 nor R2 sealable incorporated results and // * remove R1 from IncorporatedResults mempool, i.e. `resultsPL` func (ms *MatchingSuite) TestSealableResultsInvalidSubgraph() { - blockA := unittest.BlockFixture() // the parent block's ID is randomly generated here + subgrph := ms.validSubgraphFixture() + subgrph.PreviousResult.BlockID = unittest.IdentifierFixture() // invalidate subgraph + subgrph.Result.PreviousResultID = subgrph.PreviousResult.ID() + ms.addSubgraphFixtureToMempools(subgrph) - // RESULTS for blocks: - resultR2 := unittest.ExecutionResultFixture() // the result pertains to a block whose ID is random generated here - resultR1 := unittest.ExecutionResultFixture( - unittest.WithBlock(&blockA), - unittest.WithPreviousResult(resultR2.ID()), - ) + // we expect business logic to remove the incorporated result with failed sub-graph check from mempool + ms.resultsPL.On("Rem", entityWithID(subgrph.IncorporatedResult.ID())).Return(true).Once() - // Exec Receipt for block with valid subgraph - incorporatedResult := unittest.IncorporatedResult.Fixture(unittest.IncorporatedResult.WithResult(resultR1)) + results, err := ms.matching.sealableResults() + ms.Require().NoError(err) + ms.Assert().Empty(results, "should not select result with invalid subgraph") + ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called +} - // add entities to mempools and persistent storage mocks: - ms.blocks[blockA.Header.ID()] = &blockA - ms.persistedResults[resultR2.ID()] = resultR2 - ms.persistedResults[resultR1.ID()] = resultR1 - ms.pendingResults[incorporatedResult.ID()] = incorporatedResult +// TestSealableResultsInvalidChunks tests that matching.Engine.sealableResults() +// performs the following chunk checks on the result: +// * the number k of chunks in the execution result equals to +// the number of collections in the corresponding block _plus_ 1 (for system chunk) +// * for each index idx := 0, 1, ..., k +// there exists once chunk +// Here we test that an IncorporatedResult with too _few_ chunks is not sealed and removed from the mempool +func (ms *MatchingSuite) TestSealableResults_TooFewChunks() { + subgrph := ms.validSubgraphFixture() + chunks := subgrph.Result.Chunks + subgrph.Result.Chunks = chunks[0 : len(chunks)-2] // drop the last chunk + ms.addSubgraphFixtureToMempools(subgrph) // we expect business logic to remove the incorporated result with failed sub-graph check from mempool - ms.resultsPL.On("Rem", entityWithID(incorporatedResult.ID())).Return(true).Once() + ms.resultsPL.On("Rem", entityWithID(subgrph.IncorporatedResult.ID())).Return(true).Once() results, err := ms.matching.sealableResults() ms.Require().NoError(err) - ms.Assert().Empty(results, "should not select result with invalid subgraph") - + ms.Assert().Empty(results, "should not select result with too many chunks") ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } -func (ms *MatchingSuite) TestSealResultInvalidChunks() { +// TestSealableResults_TooManyChunks tests that matching.Engine.sealableResults() +// performs the following chunk checks on the result: +// * the number k of chunks in the execution result equals to +// the number of collections in the corresponding block _plus_ 1 (for system chunk) +// * for each index idx := 0, 1, ..., k +// there exists once chunk +// Here we test that an IncorporatedResult with too _many_ chunks is not sealed and removed from the mempool +func (ms *MatchingSuite) TestSealableResults_TooManyChunks() { + subgrph := ms.validSubgraphFixture() + chunks := subgrph.Result.Chunks + subgrph.Result.Chunks = append(chunks, chunks[len(chunks)-1]) // duplicate the last entry + ms.addSubgraphFixtureToMempools(subgrph) - // try to seal a result with a mismatching chunk count (one too many) - block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() - previous.BlockID = block.Header.ParentID - incorporatedResult.Result.PreviousResultID = previous.ID() - - // add an extra chunk - chunk := unittest.ChunkFixture(block.ID()) - chunk.Index = uint64(len(block.Payload.Guarantees)) - incorporatedResult.Result.Chunks = append(incorporatedResult.Result.Chunks, chunk) + // we expect business logic to remove the incorporated result with failed sub-graph check from mempool + ms.resultsPL.On("Rem", entityWithID(subgrph.IncorporatedResult.ID())).Return(true).Once() - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult + results, err := ms.matching.sealableResults() + ms.Require().NoError(err) + ms.Assert().Empty(results, "should not select result with too few chunks") + ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called +} - // check that it is looking for the previous result, and return previous - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous, nil) +// TestSealableResults_InvalidChunks tests that matching.Engine.sealableResults() +// performs the following chunk checks on the result: +// * the number k of chunks in the execution result equals to +// the number of collections in the corresponding block _plus_ 1 (for system chunk) +// * for each index idx := 0, 1, ..., k +// there exists once chunk +// Here we test that an IncorporatedResult with +// * correct number of chunks +// * but one missing chunk and one duplicated chunk +// is not sealed and removed from the mempool +func (ms *MatchingSuite) TestSealableResults_InvalidChunks() { + subgrph := ms.validSubgraphFixture() + chunks := subgrph.Result.Chunks + chunks[len(chunks)-2] = chunks[len(chunks)-1] // overwrite second-last with last entry, which is now duplicated + // yet we have the correct number of elements in the chunk list + ms.addSubgraphFixtureToMempools(subgrph) - // check that we are trying to remove the incorporated result from mempool - ms.resultsPL.On("Rem", mock.Anything).Run( - func(args mock.Arguments) { - incResult := args.Get(0).(*flow.IncorporatedResult) - ms.Assert().Equal(incorporatedResult.ID(), incResult.ID()) - }, - ).Return(true) + // we expect business logic to remove the incorporated result with failed sub-graph check from mempool + ms.resultsPL.On("Rem", entityWithID(subgrph.IncorporatedResult.ID())).Return(true).Once() results, err := ms.matching.sealableResults() ms.Require().NoError(err) - - ms.Assert().Empty(results, "should not select result with invalid number of chunks") - ms.resultsPL.AssertNumberOfCalls(ms.T(), "Rem", 1) + ms.Assert().Empty(results, "should not select result with invalid chunk list") + ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } func (ms *MatchingSuite) TestSealableResultsNoPayload() { - + ms.T().Fail() block := unittest.BlockFixture() block.Payload = nil // empty payload - ms.blocks[block.Header.ID()] = &block + ms.blocks[block.ID()] = &block incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) previous := unittest.ExecutionResultFixture() previous.BlockID = block.Header.ParentID @@ -911,55 +856,30 @@ func (ms *MatchingSuite) TestSealableResultsNoPayload() { } func (ms *MatchingSuite) TestSealableResultsUnassignedVerifiers() { + subgrph := ms.validSubgraphFixture() - block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() - previous.BlockID = block.Header.ParentID - incorporatedResult.Result.PreviousResultID = previous.ID() - - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult - - // check that it is looking for the previous result, and return previous - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous, nil) - - // list of 3 approvers - assignedApprovers := ms.approvers[:3] - - // create assignment with 3 verification node assigned to every chunk + assignedVerifiersPerChunk := uint(len(ms.approvers) / 2) assignment := chunks.NewAssignment() - for _, chunk := range incorporatedResult.Result.Chunks { - assignment.Add(chunk, assignedApprovers.NodeIDs()) - } - // mock assigner - ms.assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil) - - realApprovalPool, err := stdmap.NewApprovals(1000) - ms.Require().NoError(err) - ms.matching.approvals = realApprovalPool + approvals := make(map[uint64]map[flow.Identifier]*flow.ResultApproval) + for _, chunk := range subgrph.IncorporatedResult.Result.Chunks { + assignment.Add(chunk, ms.approvers[0:assignedVerifiersPerChunk].NodeIDs()) // assign leading half verifiers - // approve every chunk by an unassigned verifier. - unassignedApprover := ms.approvers[3] - for index := uint64(0); index < uint64(len(incorporatedResult.Result.Chunks)); index++ { - approval := unittest.ResultApprovalFixture() - approval.Body.BlockID = block.Header.ID() - approval.Body.ExecutionResultID = incorporatedResult.Result.ID() - approval.Body.ApproverID = unassignedApprover.NodeID - approval.Body.ChunkIndex = index - _, err := ms.matching.approvals.Add(approval) - ms.Require().NoError(err) + // generate approvals by _tailing_ half verifiers + chunkApprovals := make(map[flow.Identifier]*flow.ResultApproval) + for _, approver := range ms.approvers[assignedVerifiersPerChunk:len(ms.approvers)] { + chunkApprovals[approver.NodeID] = approvalFor(subgrph.IncorporatedResult.Result, chunk.Index, approver.NodeID) + } + approvals[chunk.Index] = chunkApprovals } + subgrph.Assignment = assignment + subgrph.Approvals = approvals + + ms.addSubgraphFixtureToMempools(subgrph) results, err := ms.matching.sealableResults() ms.Require().NoError(err) - ms.Assert().Len(results, 0, "should not count approvals from unassigned verifiers") + ms.Assert().Empty(results, "should not select result with ") + ms.approvalsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } // Insert an approval from a node that wasn't a staked verifier at that block @@ -968,7 +888,7 @@ func (ms *MatchingSuite) TestSealableResultsUnassignedVerifiers() { // block becomes known. func (ms *MatchingSuite) TestRemoveApprovalsFromInvalidVerifiers() { block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block + ms.blocks[block.ID()] = &block incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) previous := unittest.ExecutionResultFixture() previous.BlockID = block.Header.ParentID @@ -1001,7 +921,7 @@ func (ms *MatchingSuite) TestRemoveApprovalsFromInvalidVerifiers() { // add an approval from an unstaked verifier for the first chunk approval := unittest.ResultApprovalFixture() - approval.Body.BlockID = block.Header.ID() + approval.Body.BlockID = block.ID() approval.Body.ExecutionResultID = incorporatedResult.Result.ID() approval.Body.ApproverID = unittest.IdentifierFixture() // this is not a staked verifier approval.Body.ChunkIndex = 0 @@ -1021,7 +941,7 @@ func (ms *MatchingSuite) TestRemoveApprovalsFromInvalidVerifiers() { func (ms *MatchingSuite) TestSealableResultsInsufficientApprovals() { block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block + ms.blocks[block.ID()] = &block incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) previous := unittest.ExecutionResultFixture() previous.BlockID = block.Header.ParentID @@ -1085,7 +1005,7 @@ func (ms *MatchingSuite) TestSealableResultsInsufficientApprovals() { func (ms *MatchingSuite) TestSealValid() { block := unittest.BlockFixture() - ms.blocks[block.Header.ID()] = &block + ms.blocks[block.ID()] = &block incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) previous := unittest.ExecutionResultFixture() previous.BlockID = block.Header.ParentID @@ -1127,7 +1047,7 @@ func (ms *MatchingSuite) TestSealValid() { for _, approver := range ms.approvers { for index := uint64(0); index < uint64(len(incorporatedResult.Result.Chunks)); index++ { approval := unittest.ResultApprovalFixture() - approval.Body.BlockID = block.Header.ID() + approval.Body.BlockID = block.ID() approval.Body.ExecutionResultID = incorporatedResult.Result.ID() approval.Body.ApproverID = approver.NodeID approval.Body.ChunkIndex = index @@ -1296,3 +1216,110 @@ func stateSnapshotForKnownBlock(block *flow.Header, identities map[flow.Identifi snapshot.On("Head").Return(block, nil) return snapshot } + +func approvalFor(result *flow.ExecutionResult, chunkIdx uint64, approverID flow.Identifier) *flow.ResultApproval { + return unittest.ResultApprovalFixture( + unittest.WithBlockID(result.BlockID), + unittest.WithExecutionResultID(result.ID()), + unittest.WithApproverID(approverID), + unittest.WithChunk(chunkIdx), + ) +} + +func expectedID(expectedID flow.Identifier) interface{} { + return mock.MatchedBy( + func(actualID flow.Identifier) bool { + return expectedID == actualID + }) +} + +func entityWithID(expectedID flow.Identifier) interface{} { + return mock.MatchedBy( + func(entity flow.Entity) bool { + return expectedID == entity.ID() + }) +} + +// subgraphFixture represents a subgraph of the blockchain: +// Result -----------------------------------> Block +// | | +// | v +// | ParentBlock +// v +// PreviousResult ---> PreviousResult.BlockID +// +// Depending on validity of the subgraph: +// * valid: PreviousResult.BlockID == ParentBlock.ID() +// * invalid: PreviousResult.BlockID != ParentBlock.ID() +type subgraphFixture struct { + Block *flow.Block + ParentBlock *flow.Block + Result *flow.ExecutionResult + PreviousResult *flow.ExecutionResult + IncorporatedResult *flow.IncorporatedResult + Assignment *chunks.Assignment + Approvals map[uint64]map[flow.Identifier]*flow.ResultApproval // chunkIndex -> Verifier Node ID -> Approval +} + +// Generates a valid subgraph: +// let +// * R1 be a result which pertains to blockA +// * R2 be R1's previous result, +// where R2 pertains to blockB +// The execution results form a valid subgraph if and only if: +// blockA.ParentID == blockB.ID +func (ms *MatchingSuite) validSubgraphFixture() subgraphFixture { + // BLOCKS: <- previousBlock <- block + parentBlock := unittest.BlockFixture() + block := unittest.BlockWithParentFixture(parentBlock.Header) + + // RESULTS for blocks: + previousResult := unittest.ExecutionResultFixture(unittest.WithBlock(&parentBlock)) + result := unittest.ExecutionResultFixture( + unittest.WithBlock(&block), + unittest.WithPreviousResult(previousResult.ID()), + ) + + // Exec Receipt for block with valid subgraph + incorporatedResult := unittest.IncorporatedResult.Fixture(unittest.IncorporatedResult.WithResult(result)) + + // assign each chunk to 50% of validation Nodes and generate respective approvals + assignment := chunks.NewAssignment() + assignedVerifiersPerChunk := uint(len(ms.approvers) / 2) + approvals := make(map[uint64]map[flow.Identifier]*flow.ResultApproval) + for _, chunk := range incorporatedResult.Result.Chunks { + assignedVerifiers := ms.approvers.Sample(assignedVerifiersPerChunk) + assignment.Add(chunk, assignedVerifiers.NodeIDs()) + + // generate approvals + chunkApprovals := make(map[flow.Identifier]*flow.ResultApproval) + for _, approver := range assignedVerifiers { + chunkApprovals[approver.NodeID] = approvalFor(incorporatedResult.Result, chunk.Index, approver.NodeID) + } + approvals[chunk.Index] = chunkApprovals + } + + return subgraphFixture{ + Block: &block, + ParentBlock: &parentBlock, + Result: result, + PreviousResult: previousResult, + IncorporatedResult: incorporatedResult, + Assignment: assignment, + Approvals: approvals, + } +} + +// addSubgraphFixtureToMempools adds add entities in subgraph to mempools and persistent storage mocks +func (ms *MatchingSuite) addSubgraphFixtureToMempools(subgraph subgraphFixture) { + ms.blocks[subgraph.ParentBlock.ID()] = subgraph.ParentBlock + ms.blocks[subgraph.Block.ID()] = subgraph.Block + ms.persistedResults[subgraph.PreviousResult.ID()] = subgraph.PreviousResult + ms.persistedResults[subgraph.Result.ID()] = subgraph.Result + ms.pendingResults[subgraph.IncorporatedResult.ID()] = subgraph.IncorporatedResult + + ms.assigner.On("Assign", subgraph.IncorporatedResult.Result, subgraph.IncorporatedResult.IncorporatedBlockID).Return(subgraph.Assignment, nil).Maybe() + for index := uint64(0); index < uint64(len(subgraph.IncorporatedResult.Result.Chunks)); index++ { + ms.approvalsPL.On("ByChunk", subgraph.IncorporatedResult.Result.ID(), index).Return(subgraph.Approvals[index]).Maybe() + } +} diff --git a/module/chunks/publicAssign.go b/module/chunks/publicAssign.go index 37b5734d2ca..65233a5a7a4 100644 --- a/module/chunks/publicAssign.go +++ b/module/chunks/publicAssign.go @@ -68,7 +68,7 @@ func (p *PublicAssignment) Assign(result *flow.ExecutionResult, blockID flow.Ide // Get a list of verifiers snapshot := p.protocolState.AtBlockID(blockID) - verifiers, err := snapshot.Identities(filter.HasRole(flow.RoleVerification)) + verifiers, err := snapshot.Identities(filter.And(filter.HasRole(flow.RoleVerification), filter.HasStake(true))) if err != nil { return nil, fmt.Errorf("could not get verifiers: %w", err) } From a6d13503f6ccc15ff9fdbf5e772b1163184897d1 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Sun, 25 Oct 2020 23:32:22 -0700 Subject: [PATCH 045/105] extended matching engine tests --- engine/consensus/matching/engine_test.go | 165 +++++++++++------------ 1 file changed, 76 insertions(+), 89 deletions(-) diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 6c60b482df0..ea99799b542 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -191,64 +191,6 @@ func (ms *MatchingSuite) SetupTest() { return stateSnapshotForKnownBlock(block.Header, ms.identities) }, ) - //ms.finalSnapshot.On("Identity", mock.Anything).Return( - // func(nodeID flow.Identifier) *flow.Identity { - // identity := ms.identities[nodeID] - // return identity - // }, - // func(nodeID flow.Identifier) error { - // _, found := ms.identities[nodeID] - // if !found { - // return fmt.Errorf("could not get identity (%x)", nodeID) - // } - // return nil - // }, - //) - //ms.finalSnapshot.On("Identities", mock.Anything).Return( - // func(selector flow.IdentityFilter) flow.IdentityList { - // return ms.approvers - // }, - // func(selector flow.IdentityFilter) error { - // return nil - // }, - //) - // - //ms.state.On("AtBlockID", mock.Anything).Return( - // func(blockID flow.Identifier) realproto.Snapshot { - // return ms.refBlockSnapshot - // }, - // nil, - //) - - //ms.refBlockHeader = &flow.Header{Height: 20} // only need height - //ms.refBlockSnapshot = &protocol.Snapshot{} - //ms.refBlockSnapshot.On("Identity", mock.Anything).Return( - // func(nodeID flow.Identifier) *flow.Identity { - // identity := ms.identities[nodeID] - // return identity - // }, - // func(nodeID flow.Identifier) error { - // _, found := ms.identities[nodeID] - // if !found { - // return fmt.Errorf("could not get identity (%x)", nodeID) - // } - // return nil - // }, - //) - //ms.refBlockSnapshot.On("Identities", mock.Anything).Return( - // func(selector flow.IdentityFilter) flow.IdentityList { - // return ms.approvers - // }, - // func(selector flow.IdentityFilter) error { - // return nil - // }, - //) - //ms.refBlockSnapshot.On("Head").Return( - // func() *flow.Header { - // return ms.refBlockHeader - // }, - // nil, - //) // ~~~~~~~~~~~~~~~~~~~~~~~ SETUP RESULTS STORAGE ~~~~~~~~~~~~~~~~~~~~~~~~ // ms.persistedResults = make(map[flow.Identifier]*flow.ExecutionResult) @@ -815,46 +757,68 @@ func (ms *MatchingSuite) TestSealableResults_InvalidChunks() { ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } -func (ms *MatchingSuite) TestSealableResultsNoPayload() { - ms.T().Fail() - block := unittest.BlockFixture() - block.Payload = nil // empty payload - ms.blocks[block.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() - previous.BlockID = block.Header.ParentID - incorporatedResult.Result.PreviousResultID = previous.ID() +// TestSealableResults_NoPayload_MissingChunk tests that matching.Engine.sealableResults() +// enforces the correct number of chunks for empty blocks, i.e. blocks with no payload: +// * execution receipt with missing system chunk should be rejected +func (ms *MatchingSuite) TestSealableResults_NoPayload_MissingChunk() { + subgrph := ms.validSubgraphFixture() + subgrph.Block.Payload = nil // override block's payload to nil + subgrph.IncorporatedResult.IncorporatedBlockID = subgrph.Block.ID() // update block's ID + subgrph.IncorporatedResult.Result.BlockID = subgrph.Block.ID() // update block's ID + subgrph.IncorporatedResult.Result.Chunks = subgrph.IncorporatedResult.Result.Chunks[0:0] // empty chunk list + ms.addSubgraphFixtureToMempools(subgrph) - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult + // we expect business logic to remove the incorporated result with failed sub-graph check from mempool + ms.resultsPL.On("Rem", entityWithID(subgrph.IncorporatedResult.ID())).Return(true).Once() - // check that it is looking for the previous result, and return previous - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous, nil) + results, err := ms.matching.sealableResults() + ms.Require().NoError(err) + ms.Assert().Empty(results, "should not select result with invalid chunk list") + ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called +} - // check that we are trying to remove the incorporated result from mempool - ms.resultsPL.On("Rem", mock.Anything).Run( - func(args mock.Arguments) { - incResult := args.Get(0).(*flow.IncorporatedResult) - ms.Assert().Equal(incorporatedResult.ID(), incResult.ID()) - }, - ).Return(true) +// TestSealableResults_NoPayload_TooManyChunk tests that matching.Engine.sealableResults() +// enforces the correct number of chunks for empty blocks, i.e. blocks with no payload: +// * execution receipt with more than one chunk should be rejected +func (ms *MatchingSuite) TestSealableResults_NoPayload_TooManyChunk() { + subgrph := ms.validSubgraphFixture() + subgrph.Block.Payload = nil // override block's payload to nil + subgrph.IncorporatedResult.IncorporatedBlockID = subgrph.Block.ID() // update block's ID + subgrph.IncorporatedResult.Result.BlockID = subgrph.Block.ID() // update block's ID + subgrph.IncorporatedResult.Result.Chunks = subgrph.IncorporatedResult.Result.Chunks[0:2] // two chunks + ms.addSubgraphFixtureToMempools(subgrph) - assignment := chunks.NewAssignment() - ms.assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil) + // we expect business logic to remove the incorporated result with failed sub-graph check from mempool + ms.resultsPL.On("Rem", entityWithID(subgrph.IncorporatedResult.ID())).Return(true).Once() results, err := ms.matching.sealableResults() ms.Require().NoError(err) - if ms.Assert().Len(results, 1, "should select result for empty block") { - sealable := results[0] - ms.Assert().Equal(incorporatedResult, sealable) - } + ms.Assert().Empty(results, "should not select result with invalid chunk list") + ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called +} + +// TestSealableResults_NoPayload_WrongIndexChunk tests that matching.Engine.sealableResults() +// enforces the correct number of chunks for empty blocks, i.e. blocks with no payload: +// * execution receipt with a single chunk, but wrong chunk index, should be rejected +func (ms *MatchingSuite) TestSealableResults_NoPayload_WrongIndexChunk() { + subgrph := ms.validSubgraphFixture() + subgrph.Block.Payload = nil // override block's payload to nil + subgrph.IncorporatedResult.IncorporatedBlockID = subgrph.Block.ID() // update block's ID + subgrph.IncorporatedResult.Result.BlockID = subgrph.Block.ID() // update block's ID + subgrph.IncorporatedResult.Result.Chunks = subgrph.IncorporatedResult.Result.Chunks[2:2] // chunk with chunkIndex == 2 + ms.addSubgraphFixtureToMempools(subgrph) + + // we expect business logic to remove the incorporated result with failed sub-graph check from mempool + ms.resultsPL.On("Rem", entityWithID(subgrph.IncorporatedResult.ID())).Return(true).Once() + + results, err := ms.matching.sealableResults() + ms.Require().NoError(err) + ms.Assert().Empty(results, "should not select result with invalid chunk list") + ms.resultsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } +// TestSealableResultsUnassignedVerifiers tests that matching.Engine.sealableResults(): +// only considers approvals from assigned verifiers func (ms *MatchingSuite) TestSealableResultsUnassignedVerifiers() { subgrph := ms.validSubgraphFixture() @@ -882,6 +846,29 @@ func (ms *MatchingSuite) TestSealableResultsUnassignedVerifiers() { ms.approvalsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } +// TestSealableResults_UnknownVerifiers tests that matching.Engine.sealableResults(): +// * removes approvals from unknown verification nodes from mempool +// Note: we test a seenario here, were result is sealable; it just has additional approvals from invalid nodes +func (ms *MatchingSuite) TestSealableResults_UnknownVerifiers() { + subgrph := ms.validSubgraphFixture() + + // add invalid approvals to leading chunk: + app1 := approvalFor(subgrph.IncorporatedResult.Result, 0, unittest.IdentifierFixture()) // from unknown node + app2 := approvalFor(subgrph.IncorporatedResult.Result, 0, ms.exeID) // from known but non-VerificationNode + subgrph.Approvals[0][app1.Body.ApproverID] = app1 + subgrph.Approvals[0][app2.Body.ApproverID] = app2 + + ms.addSubgraphFixtureToMempools(subgrph) + + // we expect business logic to remove the approval from the unknown node + ms.approvalsPL.On("RemApproval", entityWithID(app1.ID())).Return(true, nil).Once() + ms.approvalsPL.On("RemApproval", entityWithID(app2.ID())).Return(true, nil).Once() + + _, err := ms.matching.sealableResults() + ms.Require().NoError(err) + ms.approvalsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called +} + // Insert an approval from a node that wasn't a staked verifier at that block // (this may occur when the block wasn't know when the node received the // approval). Ensure that the approval is removed from the mempool when the From 48e2d1d745d57820e67dea579c6b134f90f416f6 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 00:46:53 -0700 Subject: [PATCH 046/105] done fixing tests for matching engine. --- engine/consensus/matching/engine.go | 2 +- engine/consensus/matching/engine_test.go | 360 ++++------------------- 2 files changed, 62 insertions(+), 300 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 106c76aaac8..5fa1d4ffd1c 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -812,7 +812,7 @@ func (e *Engine) requestPending() error { // traverse each unsealed and finalized block with height from low to high, // if the result is missing, then add the blockID to a missing block list in // order to request them. - for height := sealed.Height; height < final.Height; height++ { + for height := sealed.Height + 1; height <= final.Height; height++ { // add at most number of results if len(missingBlocksOrderedByHeight) >= e.maxUnsealedResults { break diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index ea99799b542..069714761c3 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -3,10 +3,8 @@ package matching import ( - "math/rand" "os" "testing" - "time" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" @@ -17,7 +15,6 @@ import ( "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" mempool "github.com/onflow/flow-go/module/mempool/mock" - "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" realproto "github.com/onflow/flow-go/state/protocol" @@ -609,6 +606,13 @@ func (ms *MatchingSuite) TestOnApprovalValid() { ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 0) } +// try to get matched results with nothing in memory pools +func (ms *MatchingSuite) TestSealableResultsEmptyMempools() { + results, err := ms.matching.sealableResults() + ms.Require().NoError(err, "should not error with empty mempools") + ms.Assert().Empty(results, "should not have matched results with empty mempools") +} + // TestSealableResultsValid tests matching.Engine.sealableResults(): // * a well-formed incorporated result R is in the mempool // * sufficient number of valid result approvals for result R @@ -626,13 +630,6 @@ func (ms *MatchingSuite) TestSealableResultsValid() { ms.Assert().Equal(valSubgrph.IncorporatedResult.ID(), results[0].ID(), "expecting a single return value") } -// try to get matched results with nothing in memory pools -func (ms *MatchingSuite) TestSealableResultsEmptyMempools() { - results, err := ms.matching.sealableResults() - ms.Require().NoError(err, "should not error with empty mempools") - ms.Assert().Empty(results, "should not have matched results with empty mempools") -} - // Try to seal a result for which we don't have the block. // This tests verifies that Matching engine is performing self-consistency checking: // Not finding the block for an incorporated result is a fatal @@ -848,331 +845,96 @@ func (ms *MatchingSuite) TestSealableResultsUnassignedVerifiers() { // TestSealableResults_UnknownVerifiers tests that matching.Engine.sealableResults(): // * removes approvals from unknown verification nodes from mempool -// Note: we test a seenario here, were result is sealable; it just has additional approvals from invalid nodes -func (ms *MatchingSuite) TestSealableResults_UnknownVerifiers() { +func (ms *MatchingSuite) TestSealableResults_ApprovalsForUnknownBlockRemain() { + // make child block for unfinalizedBlock, i.e.: + // <- unfinalizedBlock <- block + // and create Execution result ands approval for this block + block := unittest.BlockWithParentFixture(ms.unfinalizedBlock.Header) + er := unittest.ExecutionResultFixture(unittest.WithBlock(&block)) + app1 := approvalFor(er, 0, unittest.IdentifierFixture()) // from unknown node + + ms.approvalsPL.On("All").Return([]*flow.ResultApproval{app1}) + chunkApprovals := make(map[flow.Identifier]*flow.ResultApproval) + chunkApprovals[app1.Body.ApproverID] = app1 + ms.approvalsPL.On("ByChunk", er.ID(), 0).Return(chunkApprovals) + + _, err := ms.matching.sealableResults() + ms.Require().NoError(err) + ms.approvalsPL.AssertNumberOfCalls(ms.T(), "RemApproval", 0) + ms.approvalsPL.AssertNumberOfCalls(ms.T(), "RemChunk", 0) +} + +// TestRemoveApprovalsFromInvalidVerifiers tests that matching.Engine.sealableResults(): +// * removes approvals from invalid verification nodes from mempool +// This may occur when the block wasn't know when the node received the approval. +// Note: we test a scenario here, were result is sealable; it just has additional +// approvals from invalid nodes +func (ms *MatchingSuite) TestRemoveApprovalsFromInvalidVerifiers() { subgrph := ms.validSubgraphFixture() // add invalid approvals to leading chunk: app1 := approvalFor(subgrph.IncorporatedResult.Result, 0, unittest.IdentifierFixture()) // from unknown node app2 := approvalFor(subgrph.IncorporatedResult.Result, 0, ms.exeID) // from known but non-VerificationNode + ms.identities[ms.verID].Stake = 0 + app3 := approvalFor(subgrph.IncorporatedResult.Result, 0, ms.verID) // from zero-weight VerificationNode subgrph.Approvals[0][app1.Body.ApproverID] = app1 subgrph.Approvals[0][app2.Body.ApproverID] = app2 + subgrph.Approvals[0][app3.Body.ApproverID] = app3 ms.addSubgraphFixtureToMempools(subgrph) // we expect business logic to remove the approval from the unknown node ms.approvalsPL.On("RemApproval", entityWithID(app1.ID())).Return(true, nil).Once() ms.approvalsPL.On("RemApproval", entityWithID(app2.ID())).Return(true, nil).Once() + ms.approvalsPL.On("RemApproval", entityWithID(app3.ID())).Return(true, nil).Once() _, err := ms.matching.sealableResults() ms.Require().NoError(err) ms.approvalsPL.AssertExpectations(ms.T()) // asserts that resultsPL.Rem(incorporatedResult.ID()) was called } -// Insert an approval from a node that wasn't a staked verifier at that block -// (this may occur when the block wasn't know when the node received the -// approval). Ensure that the approval is removed from the mempool when the -// block becomes known. -func (ms *MatchingSuite) TestRemoveApprovalsFromInvalidVerifiers() { - block := unittest.BlockFixture() - ms.blocks[block.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() - previous.BlockID = block.Header.ParentID - incorporatedResult.Result.PreviousResultID = previous.ID() - - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult - - // check that it is looking for the previous result, and return previous - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous, nil) - - // assign each chunk to each approver - assignment := chunks.NewAssignment() - for _, chunk := range incorporatedResult.Result.Chunks { - assignment.Add(chunk, ms.approvers.NodeIDs()) - } - ms.assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil) - - // not using mock for approvals pool because we need the internal indexing - // logic - realApprovalPool, err := stdmap.NewApprovals(1000) - ms.Require().NoError(err) - ms.matching.approvals = realApprovalPool - - // add an approval from an unstaked verifier for the first chunk - - approval := unittest.ResultApprovalFixture() - approval.Body.BlockID = block.ID() - approval.Body.ExecutionResultID = incorporatedResult.Result.ID() - approval.Body.ApproverID = unittest.IdentifierFixture() // this is not a staked verifier - approval.Body.ChunkIndex = 0 - _, err = ms.matching.approvals.Add(approval) - ms.Require().NoError(err) - - // with requireApprovals = true ( default test case ), it should not collect - // any results because we haven't added any approvals to the mempool - results, err := ms.matching.sealableResults() - ms.Require().NoError(err) - ms.Assert().Empty(results, "should not select result with insufficient approvals") - - // should have deleted the approval of the first chunk - ms.Assert().Empty(ms.matching.approvals.All(), "should have removed the approval") -} - +// TestSealableResultsInsufficientApprovals tests matching.Engine.sealableResults(): +// * a result where at least one chunk has not enough approvals (require +// currently at least one) should not be sealable func (ms *MatchingSuite) TestSealableResultsInsufficientApprovals() { + subgrph := ms.validSubgraphFixture() + delete(subgrph.Approvals, uint64(len(subgrph.Result.Chunks)-1)) + ms.addSubgraphFixtureToMempools(subgrph) - block := unittest.BlockFixture() - ms.blocks[block.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() - previous.BlockID = block.Header.ParentID - incorporatedResult.Result.PreviousResultID = previous.ID() - - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult - - // check that it is looking for the previous result, and return previous - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous, nil) - - // check that we are trying to remove the incorporated result from mempool - ms.resultsPL.On("Rem", mock.Anything).Run( - func(args mock.Arguments) { - incResult := args.Get(0).(*flow.IncorporatedResult) - ms.Assert().Equal(incorporatedResult.ID(), incResult.ID()) - }, - ).Return(true) - - // assign each chunk to each approver - assignment := chunks.NewAssignment() - for _, chunk := range incorporatedResult.Result.Chunks { - assignment.Add(chunk, ms.approvers.NodeIDs()) - } - ms.assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil) - - // check that we are looking for chunk approvals, but return nil as if not - // found - ms.approvalsPL.On("ByChunk", mock.Anything, mock.Anything).Run( - func(args mock.Arguments) { - resultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.ID(), resultID) - }, - ).Return(nil) - - // with requireApprovals = true ( default test case ), it should not collect - // any results because we haven't added any approvals to the mempool - results, err := ms.matching.sealableResults() - ms.Require().NoError(err) - ms.Assert().Empty(results, "should not select result with insufficient approvals") - - // with requireApprovals = false, it should collect the result even if - // there are no corresponding approvals - ms.matching.requireApprovals = false - results, err = ms.matching.sealableResults() - ms.Require().NoError(err) - if ms.Assert().Len(results, 1, "should select result when requireApprovals flag is false") { - sealable := results[0] - ms.Assert().Equal(incorporatedResult, sealable) - } -} - -// insert a well-formed incorporated result in the mempool, as well as a -// sufficient number of valid result approvals, and check that the seal is -// correctly generated. -func (ms *MatchingSuite) TestSealValid() { - - block := unittest.BlockFixture() - ms.blocks[block.ID()] = &block - incorporatedResult := unittest.IncorporatedResultForBlockFixture(&block) - previous := unittest.ExecutionResultFixture() - previous.BlockID = block.Header.ParentID - incorporatedResult.Result.PreviousResultID = previous.ID() - - // add incorporated result to mempool - ms.pendingResults[incorporatedResult.Result.ID()] = incorporatedResult - - // check that it is looking for the previous result, and return previous - ms.resultsPL.On("ByResultID", mock.Anything).Run( - func(args mock.Arguments) { - previousResultID := args.Get(0).(flow.Identifier) - ms.Assert().Equal(incorporatedResult.Result.PreviousResultID, previousResultID) - }, - ).Return(previous, nil) - - // check that we are trying to remove the incorporated result from mempool - ms.resultsPL.On("Rem", mock.Anything).Run( - func(args mock.Arguments) { - incResult := args.Get(0).(*flow.IncorporatedResult) - ms.Assert().Equal(incorporatedResult.ID(), incResult.ID()) - }, - ).Return(true) - - // assign each chunk to each approver - assignment := chunks.NewAssignment() - for _, chunk := range incorporatedResult.Result.Chunks { - assignment.Add(chunk, ms.approvers.NodeIDs()) - } - ms.assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil) - - // not using mock for approvals pool because we need the internal indexing - // logic - realApprovalPool, err := stdmap.NewApprovals(1000) - ms.Require().NoError(err) - ms.matching.approvals = realApprovalPool - - // add enough approvals for each chunk - for _, approver := range ms.approvers { - for index := uint64(0); index < uint64(len(incorporatedResult.Result.Chunks)); index++ { - approval := unittest.ResultApprovalFixture() - approval.Body.BlockID = block.ID() - approval.Body.ExecutionResultID = incorporatedResult.Result.ID() - approval.Body.ApproverID = approver.NodeID - approval.Body.ChunkIndex = index - _, err := ms.matching.approvals.Add(approval) - ms.Require().NoError(err) - } - } - + // test output of Matching Engine's sealableResults() results, err := ms.matching.sealableResults() ms.Require().NoError(err) - ms.Assert().Len(results, 1, "should select result with sufficient approvals") - - sealable := results[0] - ms.Assert().Equal(incorporatedResult, sealable) - - // the incorporated result should have collected 1 signature per chunk - // (happy path) - ms.Assert().Equal( - incorporatedResult.Result.Chunks.Len(), - len(sealable.GetAggregatedSignatures()), - ) - - // check match when we are storing entities - ms.resultsDB.On("Store", mock.Anything).Run( - func(args mock.Arguments) { - stored := args.Get(0).(*flow.ExecutionResult) - ms.Assert().Equal(incorporatedResult.Result, stored) - }, - ).Return(nil) - ms.sealsPL.On("Add", mock.Anything).Run( - func(args mock.Arguments) { - seal := args.Get(0).(*flow.IncorporatedResultSeal) - ms.Assert().Equal(incorporatedResult, seal.IncorporatedResult) - ms.Assert().Equal(incorporatedResult.Result.BlockID, seal.Seal.BlockID) - ms.Assert().Equal(incorporatedResult.Result.ID(), seal.Seal.ResultID) - ms.Assert().Equal( - incorporatedResult.Result.Chunks.Len(), - len(seal.Seal.AggregatedApprovalSigs), - ) - }, - ).Return(true) - - err = ms.matching.sealResult(incorporatedResult) - ms.Require().NoError(err, "should generate seal on correct sealable result") - - ms.resultsDB.AssertNumberOfCalls(ms.T(), "Store", 1) - ms.sealsPL.AssertNumberOfCalls(ms.T(), "Add", 1) + ms.Assert().Empty(results, "expecting no sealable result") } +// TestRequestReceiptsPendingBlocks tests matching.Engine.requestPending(): +// * generate n=100 consecutive blocks, where the first one is sealed and the last one is final func (ms *MatchingSuite) TestRequestReceiptsPendingBlocks() { + // create blocks n := 100 - - // Create n consecutive blocks - // the first one is sealed and the last one is final - - headers := []flow.Header{} - - parentHeader := flow.Header{ - ChainID: flow.Emulator, - ParentID: unittest.IdentifierFixture(), - Height: 0, - PayloadHash: unittest.IdentifierFixture(), - Timestamp: time.Now().UTC(), - View: uint64(rand.Intn(1000)), - ParentVoterIDs: unittest.IdentifierListFixture(4), - ParentVoterSig: unittest.SignatureFixture(), - ProposerID: unittest.IdentifierFixture(), - ProposerSig: unittest.SignatureFixture(), - } - + orderedBlocks := make([]flow.Block, 0, n) + parentBlock := ms.unfinalizedBlock for i := 0; i < n; i++ { - newHeader := unittest.BlockHeaderWithParentFixture(&parentHeader) - parentHeader = newHeader - headers = append(headers, newHeader) - } - - orderedBlocks := []flow.Block{} - for i := 0; i < n; i++ { - payload := unittest.PayloadFixture() - header := headers[i] - header.PayloadHash = payload.Hash() - block := flow.Block{ - Header: &header, - Payload: payload, - } + block := unittest.BlockWithParentFixture(parentBlock.Header) ms.blocks[block.ID()] = &block orderedBlocks = append(orderedBlocks, block) + parentBlock = block } - ms.state = &protocol.State{} + // progress latest sealed and latest finalized: + ms.latestSealedBlock = orderedBlocks[0] + ms.latestFinalizedBlock = orderedBlocks[n-1] - ms.state.On("Final").Return( - func() realproto.Snapshot { - snapshot := &protocol.Snapshot{} - snapshot.On("Head").Return( - func() *flow.Header { - return orderedBlocks[n-1].Header - }, - nil, - ) - return snapshot - }, - nil, - ) - - ms.state.On("Sealed").Return( - func() realproto.Snapshot { - snapshot := &protocol.Snapshot{} - snapshot.On("Head").Return( - func() *flow.Header { - return orderedBlocks[0].Header - }, - nil, - ) - return snapshot - }, - nil, - ) - - ms.matching.state = ms.state - - // the results are not in the DB, which will trigger request - ms.resultsDB.On("ByBlockID", mock.Anything).Return(nil, storerr.ErrNotFound) - - // keep track of requested blocks - requestedBlocks := []flow.Identifier{} - ms.requester.On("EntityByID", mock.Anything, mock.Anything).Run( - func(args mock.Arguments) { - blockID := args.Get(0).(flow.Identifier) - requestedBlocks = append(requestedBlocks, blockID) - }, - ).Return() + // Expecting all blocks to be requested: from sealed height + 1 up to (incl.) latest finalized + for i := 1; i < n; i++ { + id := orderedBlocks[i].ID() + ms.requester.On("EntityByID", id, mock.Anything).Return().Once() + } + ms.sealsPL.On("All").Return([]*flow.IncorporatedResultSeal{}).Maybe() err := ms.matching.requestPending() ms.Require().NoError(err, "should request results for pending blocks") - - // should request n-1 blocks if n > requestReceiptThreshold - ms.Assert().Equal(len(requestedBlocks), n-1) + ms.requester.AssertExpectations(ms.T()) // asserts that requester.EntityByID(, filter.Any) was called } func stateSnapshotForUnknownBlock() *protocol.Snapshot { From 09f9d790f9889b72fe1793314fcc72bf8db7451f Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 02:30:46 -0700 Subject: [PATCH 047/105] wip --- engine/consensus/matching/engine_test.go | 2 +- module/builder/consensus/builder.go | 52 +++++++++--------- module/builder/consensus/builder_test.go | 68 +++++++++++++----------- utils/unittest/fixtures.go | 9 +++- 4 files changed, 71 insertions(+), 60 deletions(-) diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 069714761c3..770fc39dfba 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -1026,7 +1026,7 @@ func (ms *MatchingSuite) validSubgraphFixture() subgraphFixture { previousResult := unittest.ExecutionResultFixture(unittest.WithBlock(&parentBlock)) result := unittest.ExecutionResultFixture( unittest.WithBlock(&block), - unittest.WithPreviousResult(previousResult.ID()), + unittest.WithPreviousResult(previousResult), ) // Exec Receipt for block with valid subgraph diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index f71d56a49b0..94f331344bb 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -4,6 +4,7 @@ package consensus import ( "bytes" + "encoding/json" "errors" "fmt" "time" @@ -233,38 +234,39 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er // roadmap (https://github.com/dapperlabs/flow-go/issues/4872) // create a mapping of block to seal for all seals in our pool + // We consider two seals as inconsistent, if they have different start or end states encounteredInconsistentSealsForSameBlock := false - byBlock := make(map[flow.Identifier]*flow.SealContainer) - for _, sealContainer := range b.sealPool.All() { - seal := sealContainer.Seal - if sc2, found := byBlock[seal.BlockID]; found { - if len(sealContainer.ExecutionResult.Chunks) < 1 { - return nil, fmt.Errorf("ExecutionResult without chunks: %v", sealContainer.ExecutionResult.ID()) + byBlock := make(map[flow.Identifier]*flow.IncorporatedResultSeal) + for _, irSeal := range b.sealPool.All() { + if len(irSeal.IncorporatedResult.Result.Chunks) < 1 { + return nil, fmt.Errorf("ExecutionResult without chunks: %v", irSeal.IncorporatedResult.Result.ID()) + } + if irSeal2, found := byBlock[irSeal.Seal.BlockID]; found { + sc1json, err := json.Marshal(irSeal) + if err != nil { + return nil, err } - if len(sc2.ExecutionResult.Chunks) < 1 { - return nil, fmt.Errorf("ExecutionResult without chunks: %v", sc2.ExecutionResult.ID()) + sc2json, err := json.Marshal(irSeal2) + if err != nil { + return nil, err } - // only continue if both seals have same start AND end state: - if !bytes.Equal(sealContainer.Seal.FinalState, sc2.Seal.FinalState) || - !bytes.Equal(sealContainer.ExecutionResult.Chunks[0].StartState, sc2.ExecutionResult.Chunks[0].StartState) { - sc1json, err := json.Marshal(sealContainer) - if err != nil { - return nil, err - } - sc2json, err := json.Marshal(sc2) - if err != nil { - return nil, err - } - - fmt.Printf("ERROR: multiple seals for the same block %v: %s and %s", seal.BlockID, string(sc1json), string(sc2json)) + + // check whether seals are inconsistent: + if !bytes.Equal(irSeal.Seal.FinalState, irSeal2.Seal.FinalState) || + !bytes.Equal(irSeal.IncorporatedResult.Result.Chunks[0].StartState, irSeal2.IncorporatedResult.Result.Chunks[0].StartState) { + fmt.Printf("ERROR: inconsistent seals for the same block %v: %s and %s", irSeal.Seal.BlockID, string(sc1json), string(sc2json)) encounteredInconsistentSealsForSameBlock = true + } else { + fmt.Printf("WARNING: multiple seals with different IDs for the same block %v: %s and %s", irSeal.Seal.BlockID, string(sc1json), string(sc2json)) } + } else { - byBlock[seal.BlockID] = sealContainer + byBlock[irSeal.Seal.BlockID] = irSeal } } if encounteredInconsistentSealsForSameBlock { - byBlock = make(map[flow.Identifier]*flow.SealContainer) + // in case we find inconsistent seals, do not seal anything + byBlock = make(map[flow.Identifier]*flow.IncorporatedResultSeal) } // get the parent's block seal, which constitutes the beginning of the @@ -309,13 +311,13 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er break } - nextErToBeSealed := next.ExecutionResult + nextErToBeSealed := next.IncorporatedResult.Result if len(nextErToBeSealed.Chunks) < 1 { return nil, fmt.Errorf("ExecutionResult without chunks: %v", nextErToBeSealed.ID()) } initialState := nextErToBeSealed.Chunks[0].StartState if !bytes.Equal(initialState, last.FinalState) { - return nil, fmt.Errorf("seal execution states do not connect in finalized") + return nil, fmt.Errorf("seal execution states do not connect in finalized sub-chain") } seals = append(seals, next.Seal) diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 7595fb99aab..22c86922310 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -28,11 +28,12 @@ type BuilderSuite struct { suite.Suite // test helpers - firstID flow.Identifier // first block in the range we look at - finalID flow.Identifier // last finalized block - parentID flow.Identifier // parent block we build on - finalizedBlockIDs []flow.Identifier // blocks between first and final - pendingBlockIDs []flow.Identifier // blocks between final and parent + firstID flow.Identifier // first block in the range we look at + finalID flow.Identifier // last finalized block + parentID flow.Identifier // parent block we build on + finalizedBlockIDs []flow.Identifier // blocks between first and final + pendingBlockIDs []flow.Identifier // blocks between final and parent + resultForBlock map[flow.Identifier]*flow.ExecutionResult // map: BlockID -> Execution Result // used to populate and test the seal mempool chain []*flow.Seal // chain of seals starting first @@ -88,51 +89,51 @@ type BuilderSuite struct { // block, which is also used to create a seal for the previous block. The seal // and the result are combined in an IncorporatedResultSeal which is a candidate // for the seals mempool. -func (bs *BuilderSuite) createAndRecordBlock(previous *flow.Block) *flow.Block { - var header flow.Header - if previous == nil { - header = unittest.BlockHeaderFixture() +func (bs *BuilderSuite) createAndRecordBlock(parentBlock *flow.Block) *flow.Block { + var block flow.Block + if parentBlock == nil { + block = unittest.BlockFixture() } else { - header = unittest.BlockHeaderWithParentFixture(previous.Header) + block = unittest.BlockWithParentFixture(parentBlock.Header) } - block := &flow.Block{ - Header: &header, - Payload: unittest.PayloadFixture(), - } - - // if previous is not nil, create a receipt for a result of the previous + // if parentBlock is not nil, create a receipt for a result of the parentBlock // block, and add it to the payload. The corresponding IncorporatedResult - // will be use to seal the previous block, and to create an + // will be use to seal the parentBlock block, and to create an // IncorporatedResultSeal for the seal mempool. var incorporatedResult *flow.IncorporatedResult - - if previous != nil { - previousResult := unittest.ResultForBlockFixture(previous) - receipt := unittest.ExecutionReceiptFixture() - receipt.ExecutionResult = *previousResult + if parentBlock != nil { + previousResult, found := bs.resultForBlock[parentBlock.ID()] + if !found { + panic("missing execution result for parent") + } + receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(previousResult)) block.Payload.Receipts = append(block.Payload.Receipts, receipt) - // include a result of the previous block - incorporatedResult = &flow.IncorporatedResult{ - IncorporatedBlockID: block.ID(), - Result: previousResult, - } + incorporatedResultForPrevBlock := unittest.IncorporatedResult.Fixture( + unittest.IncorporatedResult.WithResult(previousResult), + unittest.IncorporatedResult.WithIncorporatedBlockID(block.ID()), + ) + result := unittest.ExecutionResultFixture( + unittest.WithBlock(&block), + unittest.WithPreviousResult(*previousResult), + ) + bs.resultForBlock[result.BlockID] = result } // record block in dbs bs.headers[block.ID()] = block.Header bs.heights[block.Header.Height] = block.Header - bs.blocks[block.ID()] = block + bs.blocks[block.ID()] = &block bs.index[block.ID()] = block.Payload.Index() - // seal the previous block with the result included in this block. Do not + // seal the parentBlock block with the result included in this block. Do not // seal the first block because it is assumed that it is already sealed. - if previous != nil && previous.ID() != bs.firstID { + if parentBlock != nil && parentBlock.ID() != bs.firstID { bs.chainSeal(incorporatedResult) } - return block + return &block } // Create a seal for the result's block. The corresponding @@ -171,6 +172,7 @@ func (bs *BuilderSuite) SetupTest() { // reset test helpers bs.pendingBlockIDs = nil bs.finalizedBlockIDs = nil + bs.resultForBlock = make(map[flow.Identifier]*flow.ExecutionResult) bs.chain = nil bs.irsMap = make(map[flow.Identifier]*flow.IncorporatedResultSeal) @@ -197,11 +199,13 @@ func (bs *BuilderSuite) SetupTest() { // insert the first block in our range first := bs.createAndRecordBlock(nil) bs.firstID = first.ID() + firstResult := unittest.ExecutionResultFixture(unittest.WithBlock(first)) bs.lastSeal = &flow.Seal{ BlockID: first.ID(), - ResultID: flow.ZeroID, + ResultID: firstResult.ID(), FinalState: unittest.StateCommitmentFixture(), } + bs.resultForBlock[firstResult.BlockID] = firstResult // insert the finalized blocks between first and final previous := first diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 2ff091b7408..07166013602 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -441,9 +441,14 @@ func ExecutionReceiptFixture(opts ...func(*flow.ExecutionReceipt)) *flow.Executi return receipt } -func WithPreviousResult(resultID flow.Identifier) func(*flow.ExecutionResult) { +func WithPreviousResult(prevResult flow.ExecutionResult) func(*flow.ExecutionResult) { return func(result *flow.ExecutionResult) { - result.PreviousResultID = resultID + result.PreviousResultID = prevResult.ID() + finalState, ok := prevResult.FinalStateCommitment() + if !ok { + panic("missing final state commitment") + } + result.Chunks[0].StartState = finalState } } From b25a0c8601da823dc6f43193af7ec36511e08119 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 02:52:41 -0700 Subject: [PATCH 048/105] fixed builder tests --- module/builder/consensus/builder_test.go | 12 ++++++++---- utils/unittest/fixtures.go | 2 ++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 22c86922310..d009f525671 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -101,7 +101,7 @@ func (bs *BuilderSuite) createAndRecordBlock(parentBlock *flow.Block) *flow.Bloc // block, and add it to the payload. The corresponding IncorporatedResult // will be use to seal the parentBlock block, and to create an // IncorporatedResultSeal for the seal mempool. - var incorporatedResult *flow.IncorporatedResult + var incorporatedResultForPrevBlock *flow.IncorporatedResult if parentBlock != nil { previousResult, found := bs.resultForBlock[parentBlock.ID()] if !found { @@ -110,7 +110,7 @@ func (bs *BuilderSuite) createAndRecordBlock(parentBlock *flow.Block) *flow.Bloc receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(previousResult)) block.Payload.Receipts = append(block.Payload.Receipts, receipt) - incorporatedResultForPrevBlock := unittest.IncorporatedResult.Fixture( + incorporatedResultForPrevBlock = unittest.IncorporatedResult.Fixture( unittest.IncorporatedResult.WithResult(previousResult), unittest.IncorporatedResult.WithIncorporatedBlockID(block.ID()), ) @@ -130,7 +130,7 @@ func (bs *BuilderSuite) createAndRecordBlock(parentBlock *flow.Block) *flow.Bloc // seal the parentBlock block with the result included in this block. Do not // seal the first block because it is assumed that it is already sealed. if parentBlock != nil && parentBlock.ID() != bs.firstID { - bs.chainSeal(incorporatedResult) + bs.chainSeal(incorporatedResultForPrevBlock) } return &block @@ -200,10 +200,14 @@ func (bs *BuilderSuite) SetupTest() { first := bs.createAndRecordBlock(nil) bs.firstID = first.ID() firstResult := unittest.ExecutionResultFixture(unittest.WithBlock(first)) + firstSealedState, ok := firstResult.FinalStateCommitment() + if !ok { + panic("missing first execution result's final state commitment") + } bs.lastSeal = &flow.Seal{ BlockID: first.ID(), ResultID: firstResult.ID(), - FinalState: unittest.StateCommitmentFixture(), + FinalState: firstSealedState, } bs.resultForBlock[firstResult.BlockID] = firstResult diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 07166013602..1bb92e050ce 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -454,9 +454,11 @@ func WithPreviousResult(prevResult flow.ExecutionResult) func(*flow.ExecutionRes func WithBlock(block *flow.Block) func(*flow.ExecutionResult) { return func(result *flow.ExecutionResult) { + startState := result.Chunks[0].StartState updatedResult := *ResultForBlockFixture(block) result.BlockID = updatedResult.BlockID result.Chunks = updatedResult.Chunks + result.Chunks[0].StartState = startState } } From 068bbe89f17521e786b3ed239b15e820d1b2fbe4 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 02:57:27 -0700 Subject: [PATCH 049/105] test fix --- engine/consensus/matching/engine_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 770fc39dfba..f5bb1f3b5dc 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -1026,7 +1026,7 @@ func (ms *MatchingSuite) validSubgraphFixture() subgraphFixture { previousResult := unittest.ExecutionResultFixture(unittest.WithBlock(&parentBlock)) result := unittest.ExecutionResultFixture( unittest.WithBlock(&block), - unittest.WithPreviousResult(previousResult), + unittest.WithPreviousResult(*previousResult), ) // Exec Receipt for block with valid subgraph From b662b73da1daf6c1221ebcede5203b880cac6ac8 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 03:27:47 -0700 Subject: [PATCH 050/105] added some more sanity checks to avoid sealing empty state commitment --- engine/consensus/matching/engine.go | 2 +- module/builder/consensus/builder.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 5fa1d4ffd1c..0c83fc3a88c 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -207,7 +207,7 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece Logger() resultFinalState, ok := receipt.ExecutionResult.FinalStateCommitment() - if !ok { + if !ok || len(resultFinalState) < 1 { log.Error().Msg("execution receipt without FinalStateCommit received") return engine.NewInvalidInputErrorf("execution receipt without FinalStateCommit: %x", receipt.ID()) } diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index 94f331344bb..43b74c921bc 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -241,6 +241,10 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er if len(irSeal.IncorporatedResult.Result.Chunks) < 1 { return nil, fmt.Errorf("ExecutionResult without chunks: %v", irSeal.IncorporatedResult.Result.ID()) } + if len(irSeal.Seal.FinalState) < 1 { + // respective Execution Result should have been rejected by matching engine + return nil, fmt.Errorf("seal with empty state commitment: %v", irSeal.ID()) + } if irSeal2, found := byBlock[irSeal.Seal.BlockID]; found { sc1json, err := json.Marshal(irSeal) if err != nil { From 436c7b8c3be24885db8935ed3cc9435cb3e7adcc Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 03:43:26 -0700 Subject: [PATCH 051/105] linted code --- engine/consensus/matching/engine_test.go | 7 ------- module/builder/consensus/builder.go | 1 - module/mempool/stdmap/approvals_test.go | 2 +- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index f5bb1f3b5dc..ff9dfe3cc0a 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -975,13 +975,6 @@ func approvalFor(result *flow.ExecutionResult, chunkIdx uint64, approverID flow. ) } -func expectedID(expectedID flow.Identifier) interface{} { - return mock.MatchedBy( - func(actualID flow.Identifier) bool { - return expectedID == actualID - }) -} - func entityWithID(expectedID flow.Identifier) interface{} { return mock.MatchedBy( func(entity flow.Entity) bool { diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index 43b74c921bc..30db458a5d0 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -371,7 +371,6 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er seals = append(seals, next.Seal) sealCount++ delete(byBlock, pendingID) - last = next.Seal } b.tracer.FinishSpan(parentID, trace.CONBuildOnCreatePayloadSeals) diff --git a/module/mempool/stdmap/approvals_test.go b/module/mempool/stdmap/approvals_test.go index 51c1fdfbd87..a1493dd456e 100644 --- a/module/mempool/stdmap/approvals_test.go +++ b/module/mempool/stdmap/approvals_test.go @@ -3,10 +3,10 @@ package stdmap import ( "testing" - "github.com/onflow/flow-go/model/flow" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) From 7a523f32d67972044b9515dd4ee632f24e9e5597 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 26 Oct 2020 07:50:32 -0700 Subject: [PATCH 052/105] exclude the last executed block from the execution queues --- engine/execution/ingestion/engine.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 40a18967c83..e00a4247785 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -209,12 +209,19 @@ func (e *Engine) reloadFinalizedUnexecutedBlocks() error { return fmt.Errorf("could not get finalized block: %w", err) } - // find the last executed and finalized block - lastExecutedHeight := final.Height - for ; ; lastExecutedHeight-- { - header, err := e.state.AtHeight(lastExecutedHeight).Head() + // find the first unexecuted and finalized block + // we iterate from the last finalized, check if it has been executed, + // if not, keep going to the lower height, until we find an executed + // block, and then the next height is the first unexecuted. + // if there is only one finalized, and it's executed (i.e. genesis), + // then the firstUnexecuted is a unfinalized block, which is ok, + // because the next loop will ensure it only iterate through finalized + // block. + firstUnexecuted := final.Height + for ; ; firstUnexecuted-- { + header, err := e.state.AtHeight(firstUnexecuted).Head() if err != nil { - return fmt.Errorf("could not get header at height: %v, %w", lastExecutedHeight, err) + return fmt.Errorf("could not get header at height: %v, %w", firstUnexecuted, err) } executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, header.ID()) @@ -223,15 +230,16 @@ func (e *Engine) reloadFinalizedUnexecutedBlocks() error { } if executed { + firstUnexecuted++ break } } - e.log.Info().Msgf("last finalized and executed height: %v", lastExecutedHeight) + e.log.Info().Msgf("last finalized and executed height: %v", firstUnexecuted) // starting from the last executed block, go through each unexecuted and finalized block // reload its block to execution queues - for height := lastExecutedHeight; height <= final.Height; height++ { + for height := firstUnexecuted; height <= final.Height; height++ { header, err := e.state.AtHeight(height).Head() if err != nil { return fmt.Errorf("could not get header at height: %v, %w", height, err) From b79dc3a86322f5362adc8e1b52eea4842de2c374 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 26 Oct 2020 14:32:09 -0700 Subject: [PATCH 053/105] update comment --- engine/execution/ingestion/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index e00a4247785..ed4146a2d62 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -237,7 +237,7 @@ func (e *Engine) reloadFinalizedUnexecutedBlocks() error { e.log.Info().Msgf("last finalized and executed height: %v", firstUnexecuted) - // starting from the last executed block, go through each unexecuted and finalized block + // starting from the first unexecuted block, go through each unexecuted and finalized block // reload its block to execution queues for height := firstUnexecuted; height <= final.Height; height++ { header, err := e.state.AtHeight(height).Head() From d19b61a2bae0f09b698c548ab23ab60d74dc19a0 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 19:55:45 -0700 Subject: [PATCH 054/105] Apply suggestions from code review Co-authored-by: Jordan Schalm --- module/builder/consensus/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index 30db458a5d0..ffb27f7d108 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -315,7 +315,7 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er break } - nextErToBeSealed := next.IncorporatedResult.Result + nextResultToBeSealed := next.IncorporatedResult.Result if len(nextErToBeSealed.Chunks) < 1 { return nil, fmt.Errorf("ExecutionResult without chunks: %v", nextErToBeSealed.ID()) } From 66ea1fe812b6c6135273015de7b624b304358a98 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 21:36:04 -0700 Subject: [PATCH 055/105] code decluttering; extended goDoc and method comments --- engine/consensus/matching/engine.go | 23 +++++++-- model/flow/executionResult.go | 26 ++++++++-- module/builder/consensus/builder.go | 9 ++-- module/mempool/stdmap/approvals.go | 35 ++++---------- module/mempool/stdmap/approvals_test.go | 1 - module/mempool/stdmap/incorporated_results.go | 47 +++++-------------- 6 files changed, 67 insertions(+), 74 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 0c83fc3a88c..51d6a454150 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -214,6 +214,21 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece log = log.With().Hex("final_state", resultFinalState).Logger() log.Info().Msg("execution receipt received") + // CAUTION INCOMPLETE + // For many other messages, we check that the message's origin (as established by the + // networking layer) is equal to the message's creator as reported by the message itself. + // Thereby we rely on the networking layer for enforcing message integrity via the + // networking key. + // Unfortunately, this shortcut is _not_ applicable here for the following reason. + // Execution Nodes sync state between each other and have the ability so skip computing + // blocks. They could build on top of other nodes' execution results. When an Execution + // Node receives a request for a block it hasn't itself computed, it will forward + // receipts from other nodes (which it potentially used to continue its own computation). + // Therefore, message origin and message creator are not necessarily the same + // for Execution Receipts (in case an Exec Node forwards a receipt from a different node). + + // TODO: check the ExecutionReceipt's cryptographic integrity using the staking key + // if the receipt is for an unknown block, skip it. It will be re-requested later. head, err := e.state.AtBlockID(receipt.ExecutionResult.BlockID).Head() if err != nil { @@ -237,9 +252,8 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return fmt.Errorf("failed to process execution receipt: %w", err) } - // TODO: check the approval's cryptographic integrityt. + // TODO: check the approval's cryptographic integrity. // if !errors.Is(err - // store the result to make it persistent for later result := &receipt.ExecutionResult err = e.resultsDB.Store(result) // internally de-duplicates @@ -282,7 +296,10 @@ func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultAppro Logger() log.Info().Msg("result approval received") - // check approver matches the origin ID + // Check that the message's origin (as established by the networking layer) is + // equal to the message's creator as reported by the message itself. Thereby, + // we rely on the networking layer for enforcing message integrity via the + // networking key. if approval.Body.ApproverID != originID { return engine.NewInvalidInputErrorf("invalid origin for approval: %x", originID) } diff --git a/model/flow/executionResult.go b/model/flow/executionResult.go index cbb02faea91..01010369101 100644 --- a/model/flow/executionResult.go +++ b/model/flow/executionResult.go @@ -27,12 +27,32 @@ func (er ExecutionResult) Checksum() Identifier { return MakeID(er) } -// FinalStateCommitment gets the final state of the result and returns false -// if the number of chunks is 0 (used as a sanity check) +// FinalStateCommitment returns the Execution Result's commitment to the final +// execution state of the block, i.e. the last chunk's output state. +// +// By protocol definition, each ExecutionReceipt must contain at least one +// chunk (system chunk). Convention: publishing an ExecutionReceipt without a +// final state commitment is a slashable protocol violation. +// TODO: change bool to error return with a sentinel error func (er ExecutionResult) FinalStateCommitment() (StateCommitment, bool) { if er.Chunks.Len() == 0 { return nil, false } + s := er.Chunks[er.Chunks.Len()-1].EndState + return s, len(s) > 0 // empty state commitment -> second return value is false +} - return er.Chunks[er.Chunks.Len()-1].EndState, true +// InitialStateCommit returns a commitment to the execution state used as input +// for computing the block the block, i.e. the leading chunk's input state. +// +// By protocol definition, each ExecutionReceipt must contain at least one +// chunk (system chunk). Convention: publishing an ExecutionReceipt without an +// initial state commitment is a slashable protocol violation. +// TODO: change bool to error return with a sentinel error +func (er ExecutionResult) InitialStateCommit() (StateCommitment, bool) { + if er.Chunks.Len() == 0 { + return nil, false + } + s := er.Chunks[0].StartState + return s, len(s) > 0 // empty state commitment -> second return value is false } diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index ffb27f7d108..5d06286e4b3 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -315,13 +315,14 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er break } + // enforce that execution results form chain nextResultToBeSealed := next.IncorporatedResult.Result - if len(nextErToBeSealed.Chunks) < 1 { - return nil, fmt.Errorf("ExecutionResult without chunks: %v", nextErToBeSealed.ID()) + initialState, ok := nextResultToBeSealed.InitialStateCommit() + if !ok { + return nil, fmt.Errorf("missing initial state commitment in execution result %v", nextResultToBeSealed.ID()) } - initialState := nextErToBeSealed.Chunks[0].StartState if !bytes.Equal(initialState, last.FinalState) { - return nil, fmt.Errorf("seal execution states do not connect in finalized sub-chain") + return nil, fmt.Errorf("execution results do not form chain") } seals = append(seals, next.Seal) diff --git a/module/mempool/stdmap/approvals.go b/module/mempool/stdmap/approvals.go index 8d47b067910..de16174be45 100644 --- a/module/mempool/stdmap/approvals.go +++ b/module/mempool/stdmap/approvals.go @@ -176,26 +176,17 @@ func (a *Approvals) ByChunk(resultID flow.Identifier, chunkIndex uint64) map[flo // To guarantee concurrency safety, we need to copy the map via a locked operation in the backend. // Otherwise, another routine might concurrently modify the map stored for the same resultID. approvals := make(map[flow.Identifier]*flow.ResultApproval) - err := a.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + _ = a.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { entity, exists := backdata[chunkKey] if !exists { return nil } - approvalMapEntity, ok := entity.(model.ApprovalMapEntity) - if !ok { - return fmt.Errorf("unexpected entity type %T", entity) - } - for i, app := range approvalMapEntity.Approvals { + // uncaught type assertion; should never panic as the mempool only stores ApprovalMapEntity: + for i, app := range entity.(model.ApprovalMapEntity).Approvals { approvals[i] = app } return nil - }) - if err != nil { - // The current implementation never reaches this path, as it only stores - // ApprovalMapEntity as entities in the mempool. Reaching this error - // condition implies this code was inconsistently modified. - panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) - } + }) // error return impossible return approvals } @@ -204,25 +195,15 @@ func (a *Approvals) ByChunk(resultID flow.Identifier, chunkIndex uint64) map[flo func (a *Approvals) All() []*flow.ResultApproval { res := make([]*flow.ResultApproval, 0) - err := a.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + _ = a.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { for _, entity := range backdata { - approvalMapEntity, ok := entity.(model.ApprovalMapEntity) - if !ok { - // should never happen: as the mempool only stores ApprovalMapEntity - return fmt.Errorf("unexpected entity type %T", entity) - } - for _, approval := range approvalMapEntity.Approvals { + // uncaught type assertion; should never panic as the mempool only stores ApprovalMapEntity: + for _, approval := range entity.(model.ApprovalMapEntity).Approvals { res = append(res, approval) } } return nil - }) - if err != nil { - // The current implementation never reaches this path, as it only stores - // ApprovalMapEntity as entities in the mempool. Reaching this error - // condition implies this code was inconsistently modified. - panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) - } + }) // error return impossible return res } diff --git a/module/mempool/stdmap/approvals_test.go b/module/mempool/stdmap/approvals_test.go index a1493dd456e..989775f480d 100644 --- a/module/mempool/stdmap/approvals_test.go +++ b/module/mempool/stdmap/approvals_test.go @@ -22,7 +22,6 @@ func TestApprovals(t *testing.T) { ok, err := approvalPL.Add(approval1) require.True(t, ok) require.NoError(t, err) - println("Foo") // checks the existence of approval for key approvals := approvalPL.ByChunk(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex) diff --git a/module/mempool/stdmap/incorporated_results.go b/module/mempool/stdmap/incorporated_results.go index d0576131f5c..644524b1f7d 100644 --- a/module/mempool/stdmap/incorporated_results.go +++ b/module/mempool/stdmap/incorporated_results.go @@ -78,25 +78,15 @@ func (ir *IncorporatedResults) All() []*flow.IncorporatedResult { // To guarantee concurrency safety, we need to copy the map via a locked operation in the backend. // Otherwise, another routine might concurrently modify the maps stored as mempool entities. res := make([]*flow.IncorporatedResult, 0) - err := ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + _ = ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { for _, entity := range backdata { - irMap, ok := entity.(model.IncorporatedResultMap) - if !ok { - // should never happen: as the mempool only stores IncorporatedResultMap - return fmt.Errorf("unexpected entity type %T", entity) - } - for _, ir := range irMap.IncorporatedResults { + // uncaught type assertion; should never panic as the mempool only stores IncorporatedResultMap: + for _, ir := range entity.(model.IncorporatedResultMap).IncorporatedResults { res = append(res, ir) } } return nil - }) - if err != nil { - // The current implementation never reaches this path, as it only stores - // IncorporatedResultMap as entities in the mempool. Reaching this error - // condition implies this code was inconsistently modified. - panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) - } + }) // error return impossible return res } @@ -113,11 +103,8 @@ func (ir *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.Execu if !exists { return storage.ErrNotFound } - irMap, ok := entity.(model.IncorporatedResultMap) - if !ok { - // should never happen: as the mempoo - return fmt.Errorf("unexpected entity type %T", entity) - } + // uncaught type assertion; should never panic as the mempool only stores IncorporatedResultMap: + irMap := entity.(model.IncorporatedResultMap) result = irMap.ExecutionResult for i, res := range irMap.IncorporatedResults { incResults[i] = res @@ -127,9 +114,7 @@ func (ir *IncorporatedResults) ByResultID(resultID flow.Identifier) (*flow.Execu if errors.Is(err, storage.ErrNotFound) { return nil, nil, false } else if err != nil { - // The current implementation never reaches this path, as it only stores - // IncorporatedResultMap as entities in the mempool. Reaching this error - // condition implies this code was inconsistently modified. + // The current implementation never reaches this path panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) } @@ -141,7 +126,7 @@ func (ir *IncorporatedResults) Rem(incorporatedResult *flow.IncorporatedResult) key := incorporatedResult.Result.ID() removed := false - err := ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { + _ = ir.backend.Run(func(backdata map[flow.Identifier]flow.Entity) error { var incResults map[flow.Identifier]*flow.IncorporatedResult entity, ok := backdata[key] @@ -149,12 +134,8 @@ func (ir *IncorporatedResults) Rem(incorporatedResult *flow.IncorporatedResult) // there are no items for this result return nil } - incorporatedResultMap, ok := entity.(model.IncorporatedResultMap) - if !ok { - return fmt.Errorf("unexpected entity type %T", entity) - } - - incResults = incorporatedResultMap.IncorporatedResults + // uncaught type assertion; should never panic as the mempool only stores IncorporatedResultMap: + incResults = entity.(model.IncorporatedResultMap).IncorporatedResults if _, ok := incResults[incorporatedResult.IncorporatedBlockID]; !ok { // there are no items for this IncorporatedBlockID return nil @@ -171,13 +152,7 @@ func (ir *IncorporatedResults) Rem(incorporatedResult *flow.IncorporatedResult) removed = true *ir.size-- return nil - }) - if err != nil { - // The current implementation never reaches this path, as it only stores - // IncorporatedResultMap as entities in the mempool. Reaching this error - // condition implies this code was inconsistently modified. - panic("unexpected internal error in IncorporatedResults mempool: " + err.Error()) - } + }) // error return impossible return removed } From 69f4fd8fc8402bec589cf061ef09a6c493b28dbd Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Mon, 26 Oct 2020 21:58:26 -0700 Subject: [PATCH 056/105] fixed camel cases in go code files --- engine/consensus/matching/engine.go | 5 ++--- .../{executedTransaction.go => executed_transaction.go} | 0 model/flow/{executionReceipt.go => execution_receipt.go} | 0 model/flow/{executionResult.go => execution_result.go} | 0 module/mempool/stdmap/incorporated_result_seals.go | 8 +++----- 5 files changed, 5 insertions(+), 8 deletions(-) rename model/flow/{executedTransaction.go => executed_transaction.go} (100%) rename model/flow/{executionReceipt.go => execution_receipt.go} (100%) rename model/flow/{executionResult.go => execution_result.go} (100%) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 51d6a454150..b42291ce94f 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -207,7 +207,7 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece Logger() resultFinalState, ok := receipt.ExecutionResult.FinalStateCommitment() - if !ok || len(resultFinalState) < 1 { + if !ok { // return log.Error().Msg("execution receipt without FinalStateCommit received") return engine.NewInvalidInputErrorf("execution receipt without FinalStateCommit: %x", receipt.ID()) } @@ -252,8 +252,6 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return fmt.Errorf("failed to process execution receipt: %w", err) } - // TODO: check the approval's cryptographic integrity. - // if !errors.Is(err // store the result to make it persistent for later result := &receipt.ExecutionResult err = e.resultsDB.Store(result) // internally de-duplicates @@ -697,6 +695,7 @@ func (e *Engine) sealResult(incorporatedResult *flow.IncorporatedResult) error { // get final state of execution result finalState, ok := incorporatedResult.Result.FinalStateCommitment() if !ok { + // message correctness should have been checked before: failure here is an internal implementation bug return fmt.Errorf("failed to get final state commitment from Execution Result") } diff --git a/model/flow/executedTransaction.go b/model/flow/executed_transaction.go similarity index 100% rename from model/flow/executedTransaction.go rename to model/flow/executed_transaction.go diff --git a/model/flow/executionReceipt.go b/model/flow/execution_receipt.go similarity index 100% rename from model/flow/executionReceipt.go rename to model/flow/execution_receipt.go diff --git a/model/flow/executionResult.go b/model/flow/execution_result.go similarity index 100% rename from model/flow/executionResult.go rename to model/flow/execution_result.go diff --git a/module/mempool/stdmap/incorporated_result_seals.go b/module/mempool/stdmap/incorporated_result_seals.go index 6a8fdb589d8..f6abdd6b1df 100644 --- a/module/mempool/stdmap/incorporated_result_seals.go +++ b/module/mempool/stdmap/incorporated_result_seals.go @@ -25,6 +25,7 @@ func (ir *IncorporatedResultSeals) All() []*flow.IncorporatedResultSeal { entities := ir.Backend.All() res := make([]*flow.IncorporatedResultSeal, 0, len(ir.entities)) for _, entity := range entities { + // uncaught type assertion; should never panic as the mempool only stores IncorporatedResultSeal: res = append(res, entity.(*flow.IncorporatedResultSeal)) } return res @@ -36,11 +37,8 @@ func (ir *IncorporatedResultSeals) ByID(id flow.Identifier) (*flow.IncorporatedR if !ok { return nil, false } - res, ok := entity.(*flow.IncorporatedResultSeal) - if !ok { - return nil, false - } - return res, true + // uncaught type assertion; should never panic as the mempool only stores IncorporatedResultSeal: + return entity.(*flow.IncorporatedResultSeal), true } // Rem removes an IncorporatedResultSeal from the mempool From 9f044b1cf1ed90f1279100546ab3a0819c0a85f4 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 27 Oct 2020 09:58:35 -0700 Subject: [PATCH 057/105] log queue head, and refactor with IsBlockExecuted --- engine/execution/ingestion/engine.go | 68 +++++++++++++++------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index ed4146a2d62..2a0b991b18b 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -338,16 +338,14 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { blockID := block.ID() log := e.log.With().Hex("block_id", blockID[:]).Logger() - _, err := e.execState.StateCommitmentByBlockID(ctx, blockID) - if err == nil { - // a statecommitment being stored indicates the block - // has been executed - log.Debug().Msg("block has been executed already") - return nil + executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, blockID) + if err != nil { + return fmt.Errorf("could not check whether block is executed: %w", err) } - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("could not query state commitment for block: %w", err) + if executed { + log.Debug().Msg("block has been executed already") + return nil } // unexecuted block @@ -385,10 +383,10 @@ func (e *Engine) enqueueBlockAndCheckExecutable(block *flow.Block, checkStateSyn return nil } + firstUnexecutedHeight := queue.Head.Item.Height() if checkStateSync { // whenever the queue grows, we need to check whether the state sync should be // triggered. - firstUnexecutedHeight := queue.Head.Item.Height() e.unit.Launch(func() { e.checkStateSyncStart(firstUnexecutedHeight) }) @@ -428,7 +426,14 @@ func (e *Engine) enqueueBlockAndCheckExecutable(block *flow.Block, checkStateSyn } // execute the block if the block is ready to be executed - e.executeBlockIfComplete(executableBlock) + completed := e.executeBlockIfComplete(executableBlock) + + log.Info(). + // if the execution is halt, but the queue keeps growing, we could check which block + // hasn't been executed. + Uint64("first_unexecuted_in_queue", firstUnexecutedHeight). + Bool("completed", completed). + Msg("block is enqueued") return nil }, @@ -1404,25 +1409,26 @@ func (e *Engine) deltaRange(ctx context.Context, fromHeight uint64, toHeight uin } blockID := header.ID() - _, err = e.execState.StateCommitmentByBlockID(ctx, blockID) - - if err == nil { - // this block has been executed, we will send the delta - delta, err := e.execState.RetrieveStateDelta(ctx, blockID) - if err != nil { - return fmt.Errorf("could not retrieve state delta for block %v, %w", blockID, err) - } - onDelta(delta) + executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, blockID) + if err != nil { + return fmt.Errorf("could not check whether block is executed: %w", err) + } - } else if errors.Is(err, storage.ErrNotFound) { + if !executed { // this block has not been executed, - // it parent block hasn't been executed, the higher block won't be - // executed either, so we stop iterating through the heights + // we could stop iterating through the heights, because + // if a parent block is not executed, its children won't be executed break - } else { - return fmt.Errorf("could not query statecommitment for height %v: %w", height, err) } + + // this block has been executed, we will send the delta + delta, err := e.execState.RetrieveStateDelta(ctx, blockID) + if err != nil { + return fmt.Errorf("could not retrieve state delta for block %v, %w", blockID, err) + } + + onDelta(delta) } return nil @@ -1450,19 +1456,17 @@ func (e *Engine) handleStateDeltaResponse(executionNodeID flow.Identifier, delta // check if the block has been executed already // delta ID is block ID blockID := delta.ID() - _, err = e.execState.StateCommitmentByBlockID(e.unit.Ctx(), blockID) + executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, blockID) + if err != nil { + return fmt.Errorf("could not check whether block is executed: %w", err) + } - if err == nil { - // the block has been executed, ignore + if executed { + // if the block has been executed, we don't need the delta, exit here e.log.Info().Hex("block", logging.Entity(delta)).Msg("ignore executed state delta") return nil } - // exception - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("could not get know block was executed or not: %w", err) - } - // block not executed yet, check if the block has been sealed lastSealed, err := e.state.Sealed().Head() if err != nil { From 626e807c763fe8dbb63739ef16ebf6c79762415b Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Tue, 27 Oct 2020 10:38:05 -0700 Subject: [PATCH 058/105] use Restart=always in example systemd deploy files This is what we use for internally run nodes and what we recommend to others running Flow nodes. --- deploy/systemd-docker/flow-access.service | 2 +- deploy/systemd-docker/flow-collection.service | 2 +- deploy/systemd-docker/flow-consensus.service | 2 +- deploy/systemd-docker/flow-execution.service | 2 +- deploy/systemd-docker/flow-verification.service | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/systemd-docker/flow-access.service b/deploy/systemd-docker/flow-access.service index cfea6c913d2..3d6960ed4b4 100644 --- a/deploy/systemd-docker/flow-access.service +++ b/deploy/systemd-docker/flow-access.service @@ -12,7 +12,7 @@ Type=simple TimeoutStopSec=1m RestartSec=5s -Restart=no +Restart=always StandardOutput=journal diff --git a/deploy/systemd-docker/flow-collection.service b/deploy/systemd-docker/flow-collection.service index 27ff6f006d2..07e5df19870 100644 --- a/deploy/systemd-docker/flow-collection.service +++ b/deploy/systemd-docker/flow-collection.service @@ -12,7 +12,7 @@ Type=simple TimeoutStopSec=1m RestartSec=5s -Restart=no +Restart=always StandardOutput=journal diff --git a/deploy/systemd-docker/flow-consensus.service b/deploy/systemd-docker/flow-consensus.service index 5f629043722..1c5b21273f3 100644 --- a/deploy/systemd-docker/flow-consensus.service +++ b/deploy/systemd-docker/flow-consensus.service @@ -12,7 +12,7 @@ Type=simple TimeoutStopSec=1m RestartSec=5s -Restart=no +Restart=always StandardOutput=journal diff --git a/deploy/systemd-docker/flow-execution.service b/deploy/systemd-docker/flow-execution.service index 32a7fe18c4f..ce385aee3d1 100644 --- a/deploy/systemd-docker/flow-execution.service +++ b/deploy/systemd-docker/flow-execution.service @@ -12,7 +12,7 @@ Type=simple TimeoutStopSec=1m RestartSec=5s -Restart=no +Restart=always StandardOutput=journal diff --git a/deploy/systemd-docker/flow-verification.service b/deploy/systemd-docker/flow-verification.service index 73a6a5d15cf..fdc6a698596 100644 --- a/deploy/systemd-docker/flow-verification.service +++ b/deploy/systemd-docker/flow-verification.service @@ -12,7 +12,7 @@ Type=simple TimeoutStopSec=1m RestartSec=5s -Restart=no +Restart=always StandardOutput=journal From 83c490ed326da563634e1d0e9da2f398517e33f0 Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Tue, 27 Oct 2020 11:27:21 -0700 Subject: [PATCH 059/105] Fix merge --- engine/execution/ingestion/engine.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 2af2a4a2a69..4c3fae19f09 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -746,6 +746,20 @@ func (e *Engine) handleComputationResult( return nil, fmt.Errorf("could not generate execution receipt: %w", err) } + err = func() error { + span, _ := e.tracer.StartSpanFromContext(ctx, trace.EXESaveExecutionReceipt) + defer span.Finish() + + err = e.execState.PersistExecutionReceipt(ctx, receipt) + if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { + return fmt.Errorf("could not persist execution receipt: %w", err) + } + return nil + }() + if err != nil { + return nil, err + } + err = e.providerEngine.BroadcastExecutionReceipt(ctx, receipt) if err != nil { return nil, fmt.Errorf("could not send broadcast order: %w", err) @@ -881,20 +895,6 @@ func (e *Engine) saveExecutionResults( return nil, err } - err = func() error { - span, _ := e.tracer.StartSpanFromContext(childCtx, trace.EXESaveExecutionReceipt) - defer span.Finish() - - err = e.execState.PersistExecutionReceipt(ctx, receipt) - if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { - return fmt.Errorf("could not persist execution receipt: %w", err) - } - return nil - }() - if err != nil { - return nil, err - } - e.log.Debug(). Hex("block_id", logging.Entity(executableBlock)). Hex("start_state", originalState). From e7982f46fe78954c09d7b333fd66414c18d912ce Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 27 Oct 2020 13:58:40 -0700 Subject: [PATCH 060/105] add safety check and crash the node if unable to reload blocks --- engine/execution/ingestion/engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 2a0b991b18b..c49ceb27fad 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -135,7 +135,7 @@ func New( func (e *Engine) Ready() <-chan struct{} { err := e.reloadUnexecutedBlocks() if err != nil { - e.log.Error().Err(err).Msg("failed to load all unexecuted blocks") + e.log.Fatal().Err(err).Msg("failed to load all unexecuted blocks") } return e.unit.Ready() @@ -218,7 +218,7 @@ func (e *Engine) reloadFinalizedUnexecutedBlocks() error { // because the next loop will ensure it only iterate through finalized // block. firstUnexecuted := final.Height - for ; ; firstUnexecuted-- { + for ; firstUnexecuted >= 0; firstUnexecuted-- { header, err := e.state.AtHeight(firstUnexecuted).Head() if err != nil { return fmt.Errorf("could not get header at height: %v, %w", firstUnexecuted, err) From 6fdda8d2778df3550b0f076cc3e029368a559a70 Mon Sep 17 00:00:00 2001 From: Ramtin Mehdizadeh Seraj Date: Wed, 28 Oct 2020 11:02:02 -0700 Subject: [PATCH 061/105] Adding tx byte size limit for access/collection nodes (#90) --- access/errors.go | 10 ++++++++ access/validator.go | 17 ++++++++++++++ engine/access/rpc/backend/backend.go | 1 + engine/collection/ingest/engine.go | 1 + model/flow/constants.go | 3 +++ model/flow/transaction.go | 34 ++++++++++++++++++++++++++++ 6 files changed, 66 insertions(+) diff --git a/access/errors.go b/access/errors.go index 601b0feee63..b65aef42d65 100644 --- a/access/errors.go +++ b/access/errors.go @@ -48,3 +48,13 @@ type InvalidGasLimitError struct { func (e InvalidGasLimitError) Error() string { return fmt.Sprintf("transaction gas limit (%d) exceeds the maximum gas limit (%d)", e.Actual, e.Maximum) } + +// InvalidTxByteSizeError indicates that a transaction byte size exceeds the maximum. +type InvalidTxByteSizeError struct { + Maximum uint64 + Actual uint64 +} + +func (e InvalidTxByteSizeError) Error() string { + return fmt.Sprintf("transaction byte size (%d) exceeds the maximum byte size allowed for a transaction (%d)", e.Actual, e.Maximum) +} diff --git a/access/validator.go b/access/validator.go index 1b800462479..7a3172d1d0d 100644 --- a/access/validator.go +++ b/access/validator.go @@ -48,6 +48,7 @@ type TransactionValidationOptions struct { AllowUnknownReferenceBlockID bool MaxGasLimit uint64 CheckScriptsParse bool + MaxTxSizeLimit uint64 } type TransactionValidator struct { @@ -66,6 +67,11 @@ func NewTransactionValidator( } func (v *TransactionValidator) Validate(tx *flow.TransactionBody) (err error) { + err = v.checkTxSizeLimit(tx) + if err != nil { + return err + } + err = v.checkMissingFields(tx) if err != nil { return err @@ -91,6 +97,17 @@ func (v *TransactionValidator) Validate(tx *flow.TransactionBody) (err error) { return nil } +func (v *TransactionValidator) checkTxSizeLimit(tx *flow.TransactionBody) error { + txSize := uint64(tx.ByteSize()) + if txSize > v.options.MaxTxSizeLimit { + return InvalidTxByteSizeError{ + Actual: txSize, + Maximum: v.options.MaxTxSizeLimit, + } + } + return nil +} + func (v *TransactionValidator) checkMissingFields(tx *flow.TransactionBody) error { missingFields := tx.MissingFields() diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 274e9e1a268..3b745aa874c 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -122,6 +122,7 @@ func configureTransactionValidator(state protocol.State) *access.TransactionVali AllowUnknownReferenceBlockID: false, MaxGasLimit: flow.DefaultMaxGasLimit, CheckScriptsParse: true, + MaxTxSizeLimit: flow.DefaultMaxTxSizeLimit, }, ) } diff --git a/engine/collection/ingest/engine.go b/engine/collection/ingest/engine.go index 51584952d36..b3beb0fa067 100644 --- a/engine/collection/ingest/engine.go +++ b/engine/collection/ingest/engine.go @@ -57,6 +57,7 @@ func New( AllowUnknownReferenceBlockID: config.AllowUnknownReference, MaxGasLimit: flow.DefaultMaxGasLimit, CheckScriptsParse: config.CheckScriptsParse, + MaxTxSizeLimit: flow.DefaultMaxTxSizeLimit, }, ) diff --git a/model/flow/constants.go b/model/flow/constants.go index ecbf2bd00d7..9c51100383d 100644 --- a/model/flow/constants.go +++ b/model/flow/constants.go @@ -19,6 +19,9 @@ const DefaultTransactionExpiryBuffer = 30 // DefaultMaxGasLimit is the default maximum value for the transaction gas limit. const DefaultMaxGasLimit = 9999 +// DefaultMaxTxSizeLimit is the default maximum transaction byte size. (64KB) +const DefaultMaxTxSizeLimit = 64000 + // DefaultAuctionWindow defines the length of the auction window at the beginning of // an epoch, during which nodes can bid for seats in the committee. Valid epoch events // such as setup and commit can only be submitted after this window has passed. diff --git a/model/flow/transaction.go b/model/flow/transaction.go index a481569c3cf..674b441d096 100644 --- a/model/flow/transaction.go +++ b/model/flow/transaction.go @@ -64,6 +64,26 @@ func (tb TransactionBody) Fingerprint() []byte { }) } +func (tb TransactionBody) ByteSize() int { + size := 0 + size += len(tb.ReferenceBlockID) + size += len(tb.Script) + for _, arg := range tb.Arguments { + size += len(arg) + } + size += 8 // gas size + size += tb.ProposalKey.ByteSize() + size += AddressLength // payer address + size += len(tb.Authorizers) * AddressLength // Authorizers + for _, s := range tb.PayloadSignatures { + size += s.ByteSize() + } + for _, s := range tb.EnvelopeSignatures { + size += s.ByteSize() + } + return size +} + func (tb TransactionBody) ID() Identifier { return MakeID(tb) } @@ -386,6 +406,13 @@ type ProposalKey struct { SequenceNumber uint64 } +// ByteSize returns the byte size of the proposal key +func (p ProposalKey) ByteSize() int { + keyIDLen := 8 + sequenceNumberLen := 8 + return len(p.Address) + keyIDLen + sequenceNumberLen +} + // A TransactionSignature is a signature associated with a specific account key. type TransactionSignature struct { Address Address @@ -394,6 +421,13 @@ type TransactionSignature struct { Signature []byte } +// ByteSize returns the byte size of the transaction signature +func (s TransactionSignature) ByteSize() int { + signerIndexLen := 8 + keyIDLen := 8 + return len(s.Address) + signerIndexLen + keyIDLen + len(s.Signature) +} + func (s TransactionSignature) Fingerprint() []byte { return fingerprint.Fingerprint(s.canonicalForm()) } From 445c268312a879401dfa4cd05bd1928c9ff6407c Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 28 Oct 2020 11:23:46 -0700 Subject: [PATCH 062/105] reverted minor change in ExecutionResult: an empty state commitment is now considered valid. Proper fix will be part of a downstream PR --- model/flow/execution_result.go | 8 +++++-- utils/unittest/fixtures.go | 39 +++++++++++----------------------- 2 files changed, 18 insertions(+), 29 deletions(-) diff --git a/model/flow/execution_result.go b/model/flow/execution_result.go index 01010369101..54867657d45 100644 --- a/model/flow/execution_result.go +++ b/model/flow/execution_result.go @@ -39,7 +39,9 @@ func (er ExecutionResult) FinalStateCommitment() (StateCommitment, bool) { return nil, false } s := er.Chunks[er.Chunks.Len()-1].EndState - return s, len(s) > 0 // empty state commitment -> second return value is false + // TODO: empty state commitment -> second return value is false: + // return s, len(s) > 0 + return s, true } // InitialStateCommit returns a commitment to the execution state used as input @@ -54,5 +56,7 @@ func (er ExecutionResult) InitialStateCommit() (StateCommitment, bool) { return nil, false } s := er.Chunks[0].StartState - return s, len(s) > 0 // empty state commitment -> second return value is false + // TODO: empty state commitment -> second return value is false: + // return s, len(s) > 0 + return s, true } diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 1bb92e050ce..736bd4c67e7 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -398,23 +398,6 @@ func ExecutableBlockFixtureWithParent(collectionsSignerIDs [][]flow.Identifier, return executableBlock } -func ResultForBlockFixture(block *flow.Block) *flow.ExecutionResult { - chunks := 0 - if block.Payload != nil { - // +1 for system chunk - chunks = len(block.Payload.Guarantees) + 1 - } - - return &flow.ExecutionResult{ - ExecutionResultBody: flow.ExecutionResultBody{ - PreviousResultID: IdentifierFixture(), - BlockID: block.ID(), - Chunks: ChunksFixture(uint(chunks), block.ID()), - }, - Signatures: SignaturesFixture(6), - } -} - func WithExecutorID(id flow.Identifier) func(*flow.ExecutionReceipt) { return func(receipt *flow.ExecutionReceipt) { receipt.ExecutorID = id @@ -453,12 +436,17 @@ func WithPreviousResult(prevResult flow.ExecutionResult) func(*flow.ExecutionRes } func WithBlock(block *flow.Block) func(*flow.ExecutionResult) { + chunks := 1 // tailing chunk is always system chunk + if block.Payload != nil { + chunks += len(block.Payload.Guarantees) + } + blockID := block.ID() + return func(result *flow.ExecutionResult) { - startState := result.Chunks[0].StartState - updatedResult := *ResultForBlockFixture(block) - result.BlockID = updatedResult.BlockID - result.Chunks = updatedResult.Chunks - result.Chunks[0].StartState = startState + startState := result.Chunks[0].StartState // retain previous start state in case it was user-defined + result.BlockID = blockID + result.Chunks = ChunksFixture(uint(chunks), block.ID()) + result.Chunks[0].StartState = startState // set start state to value before update } } @@ -468,10 +456,7 @@ func ExecutionResultFixture(opts ...func(*flow.ExecutionResult)) *flow.Execution ExecutionResultBody: flow.ExecutionResultBody{ PreviousResultID: IdentifierFixture(), BlockID: IdentifierFixture(), - Chunks: flow.ChunkList{ - ChunkFixture(blockID), - ChunkFixture(blockID), - }, + Chunks: ChunksFixture(2, blockID), }, Signatures: SignaturesFixture(6), } @@ -496,7 +481,7 @@ func IncorporatedResultFixture(opts ...func(*flow.IncorporatedResult)) *flow.Inc // TODO replace by usage unittest.IncorporatedResult func IncorporatedResultForBlockFixture(block *flow.Block) *flow.IncorporatedResult { - result := ResultForBlockFixture(block) + result := ExecutionResultFixture(WithBlock(block)) incorporatedBlockID := IdentifierFixture() return flow.NewIncorporatedResult(incorporatedBlockID, result) } From 1219a5e149bd2015c6c904fa31b54857e29d9765 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Wed, 28 Oct 2020 11:29:49 -0700 Subject: [PATCH 063/105] add address validity check --- access/errors.go | 12 ++++++++++++ access/validator.go | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/access/errors.go b/access/errors.go index 601b0feee63..55ac43e3b5d 100644 --- a/access/errors.go +++ b/access/errors.go @@ -3,6 +3,8 @@ package access import ( "errors" "fmt" + + "github.com/onflow/flow-go/model/flow" ) // ErrUnknownReferenceBlock indicates that a transaction references an unknown block. @@ -48,3 +50,13 @@ type InvalidGasLimitError struct { func (e InvalidGasLimitError) Error() string { return fmt.Sprintf("transaction gas limit (%d) exceeds the maximum gas limit (%d)", e.Actual, e.Maximum) } + +// InvalidAddressError indicates that a transaction references an invalid flow Address +// in either the Authorizers or Payer field. +type InvalidAddressError struct { + Address flow.Address +} + +func (e InvalidAddressError) Error() string { + return fmt.Sprintf("invalid address: %s", e.Address) +} diff --git a/access/validator.go b/access/validator.go index 1b800462479..8b9d77b2d12 100644 --- a/access/validator.go +++ b/access/validator.go @@ -48,15 +48,21 @@ type TransactionValidationOptions struct { AllowUnknownReferenceBlockID bool MaxGasLimit uint64 CheckScriptsParse bool + // MaxAddressIndex is a simple spam prevention measure. It rejects any + // transactions referencing an address with index newer than the specified + // maximum. A zero value indicates no address checking. + MaxAddressIndex uint64 } type TransactionValidator struct { - blocks Blocks + blocks Blocks // for looking up blocks to check transaction expiry + chain flow.Chain // for checking validity of addresses options TransactionValidationOptions } func NewTransactionValidator( blocks Blocks, + chain flow.Chain, options TransactionValidationOptions, ) *TransactionValidator { return &TransactionValidator{ @@ -175,6 +181,33 @@ func (v *TransactionValidator) checkCanBeParsed(tx *flow.TransactionBody) error return nil } +func (v *TransactionValidator) checkAddresses(tx *flow.TransactionBody) error { + + for _, address := range append(tx.Authorizers, tx.Payer) { + // first we check objective validity, essentially whether or not this + // is a valid output of the address generator + if !v.chain.IsValid(address) { + return InvalidAddressError{Address: address} + } + + // skip second check if not configured + if v.options.MaxAddressIndex == 0 { + continue + } + + // next we check subjective validity based on the configured maximum index + index, err := v.chain.IndexFromAddress(address) + if err != nil { + return fmt.Errorf("could not get index for address (%s): %w", address, err) + } + if index > v.options.MaxAddressIndex { + return InvalidAddressError{Address: address} + } + } + + return nil +} + func remove(s []string, r string) []string { for i, v := range s { if v == r { From 1574b8d7aad5cad1a349091a1c8c42c537b719de Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 28 Oct 2020 11:35:48 -0700 Subject: [PATCH 064/105] fixed TODO comment --- cmd/consensus/main.go | 2 +- engine/consensus/matching/engine.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index 82ee67cf3d7..0ee9d0702c2 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -188,7 +188,7 @@ func main() { assigner, requireOneApproval, ) - requesterEng.WithHandle(match.HandleReceipt) // TODO: use match.Process here (?) to parallelize engines in terms of threading + requesterEng.WithHandle(match.HandleReceipt) return match, err }). Component("provider engine", func(node *cmd.FlowNodeBuilder) (module.ReadyDoneAware, error) { diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index f2fcb17e39f..915cdf2f1ea 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -164,13 +164,14 @@ func (e *Engine) Process(originID flow.Identifier, event interface{}) error { }) } -// HandleReceipt pipes explicitely requested receipts to the process function. +// HandleReceipt pipes explicitly requested receipts to the process function. // Receipts can come from this function or the receipt provider setup in the // engine constructor. func (e *Engine) HandleReceipt(originID flow.Identifier, receipt flow.Entity) { - e.log.Debug().Msg("received receipt from requester engine") + // TODO: wrap following call to e.process into e.unit.Launch (?) + // to parallelize engines in terms of threading err := e.process(originID, receipt) if err != nil { e.log.Error().Err(err).Msg("could not process receipt") From e8898eb6424f8f7f1d3e193fb91f8ced24e55af6 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 28 Oct 2020 11:46:30 -0700 Subject: [PATCH 065/105] Apply suggestions from code review Co-authored-by: Leo Zhang --- engine/consensus/matching/engine.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 915cdf2f1ea..33113f5f4ac 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -224,7 +224,7 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece // Thereby we rely on the networking layer for enforcing message integrity via the // networking key. // Unfortunately, this shortcut is _not_ applicable here for the following reason. - // Execution Nodes sync state between each other and have the ability so skip computing + // Execution Nodes sync state between each other and have the ability to skip computing // blocks. They could build on top of other nodes' execution results. When an Execution // Node receives a request for a block it hasn't itself computed, it will forward // receipts from other nodes (which it potentially used to continue its own computation). @@ -266,7 +266,8 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece // resultsDB is persistent storage while Mempools are in-memory only. // After a crash, the replica still needs to be able to generate a seal // for an Result even if it had stored the Result before the crash. - // Otherwise, liveness of sealing is undermined. + // Otherwise, a stored result might never get sealed, and + // liveness of sealing is undermined. // store the result belonging to the receipt in the memory pool // TODO: This is a temporary step. In future, the incorporated results From 8e414360e444db983fcd0321dd3396e7fa8b1d34 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 28 Oct 2020 12:12:50 -0700 Subject: [PATCH 066/105] improved logging; renamed matching.Engine.maxUnsealedResults to matching.Engine.maxResultsToRequest for clarity --- engine/consensus/matching/engine.go | 50 +++++++++++++---------------- model/flow/execution_result.go | 4 +-- 2 files changed, 25 insertions(+), 29 deletions(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 33113f5f4ac..7a23838baa9 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -9,7 +9,6 @@ import ( "github.com/opentracing/opentracing-go" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "go.uber.org/atomic" "github.com/onflow/flow-go/engine" @@ -51,7 +50,7 @@ type Engine struct { assigner module.ChunkAssigner // chunk assignment object checkingSealing *atomic.Bool // used to rate limit the checksealing call requestReceiptThreshold uint // how many blocks between sealed/finalized before we request execution receipts - maxUnsealedResults int // how many unsealed results to check when check sealing + maxResultsToRequest int // max number of finalized blocks for which we request execution results requireApprovals bool // flag to disable verifying chunk approvals } @@ -96,7 +95,7 @@ func New( missing: make(map[flow.Identifier]uint), checkingSealing: atomic.NewBool(false), requestReceiptThreshold: 10, - maxUnsealedResults: 200, + maxResultsToRequest: 200, assigner: assigner, requireApprovals: requireApprovals, } @@ -266,7 +265,7 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece // resultsDB is persistent storage while Mempools are in-memory only. // After a crash, the replica still needs to be able to generate a seal // for an Result even if it had stored the Result before the crash. - // Otherwise, a stored result might never get sealed, and + // Otherwise, a stored result might never get sealed, and // liveness of sealing is undermined. // store the result belonging to the receipt in the memory pool @@ -276,14 +275,14 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece // that contains a receipt committing to this result. added, err := e.incorporatedResults.Add(flow.NewIncorporatedResult(result.BlockID, result)) if err != nil { - e.log.Err(err).Msg("error inserting incorporated result in mempool") + log.Err(err).Msg("error inserting incorporated result in mempool") } if !added { - e.log.Debug().Msg("skipping result already in mempool") + log.Debug().Msg("skipping result already in mempool") return nil } e.mempool.MempoolEntries(metrics.ResourceResult, e.incorporatedResults.Size()) - e.log.Info().Msg("execution result added to mempool") + log.Info().Msg("execution result added to mempool") // kick off a check for potential seal formation e.unit.Launch(e.checkSealing) @@ -341,7 +340,7 @@ func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultAppro return err } if !added { - e.log.Debug().Msg("skipping approval already in mempool") + log.Debug().Msg("skipping approval already in mempool") return nil } e.mempool.MempoolEntries(metrics.ResourceApproval, e.approvals.Size()) @@ -504,7 +503,7 @@ RES_LOOP: previousID := incorporatedResult.Result.PreviousResultID previous, err := e.resultsDB.ByID(previousID) if errors.Is(err, storage.ErrNotFound) { - log.Debug().Msg("skipping sealable result with unknown previous result") + e.log.Debug().Msg("skipping sealable result with unknown previous result") continue } if err != nil { @@ -514,7 +513,7 @@ RES_LOOP: // check sub-graph if block.ParentID != previous.BlockID { _ = e.incorporatedResults.Rem(incorporatedResult) - log.Warn(). + e.log.Warn(). Str("block_parent_id", block.ParentID.String()). Str("previous_result_block_id", previous.BlockID.String()). Msg("removing result with invalid sub-graph") @@ -540,7 +539,7 @@ RES_LOOP: if incorporatedResult.Result.Chunks.Len() != requiredChunks { _ = e.incorporatedResults.Rem(incorporatedResult) - log.Warn(). + e.log.Warn(). Int("result_chunks", len(incorporatedResult.Result.Chunks)). Int("required_chunks", requiredChunks). Msg("removing result with invalid number of chunks") @@ -568,7 +567,7 @@ RES_LOOP: // get chunk at position i chunk, ok := incorporatedResult.Result.Chunks.ByIndex(uint64(i)) if !ok { - log.Warn().Msgf("chunk out of range requested: %d", i) + e.log.Warn().Msgf("chunk out of range requested: %d", i) _ = e.incorporatedResults.Rem(incorporatedResult) continue RES_LOOP } @@ -576,7 +575,7 @@ RES_LOOP: // Check if chunk index matches its position. This ensures that the // result contains all chunks and no duplicates. if chunk.Index != uint64(i) { - log.Warn().Msgf("chunk out of place: pos = %d, index = %d", i, chunk.Index) + e.log.Warn().Msgf("chunk out of place: pos = %d, index = %d", i, chunk.Index) _ = e.incorporatedResults.Rem(incorporatedResult) continue RES_LOOP } @@ -822,7 +821,7 @@ func (e *Engine) requestPending() error { // right order. The right order gives the priority to the execution result // of lower height blocks to be requested first, since a gap in the sealing // heights would stop the sealing. - missingBlocksOrderedByHeight := make([]flow.Identifier, 0, e.maxUnsealedResults) + missingBlocksOrderedByHeight := make([]flow.Identifier, 0, e.maxResultsToRequest) // turn mempool into Lookup table: BlockID -> Result knownResultForBlock := make(map[flow.Identifier]struct{}) @@ -838,7 +837,7 @@ func (e *Engine) requestPending() error { // order to request them. for height := sealed.Height + 1; height <= final.Height; height++ { // add at most number of results - if len(missingBlocksOrderedByHeight) >= e.maxUnsealedResults { + if len(missingBlocksOrderedByHeight) >= e.maxResultsToRequest { break } @@ -856,23 +855,20 @@ func (e *Engine) requestPending() error { } } - e.log.Info(). - Uint64("final", final.Height). - Uint64("sealed", sealed.Height). - Uint("request_receipt_threshold", e.requestReceiptThreshold). - Int("missing", len(missingBlocksOrderedByHeight)). - Msg("check missing receipts") - // request missing execution results, if sealed height is low enough + log := e.log.With(). + Uint64("finalized_height", final.Height). + Uint64("sealed_height", sealed.Height). + Uint("request_receipt_threshold", e.requestReceiptThreshold). + Int("finalized_blocks_without_result", len(missingBlocksOrderedByHeight)). + Logger() if uint(final.Height-sealed.Height) >= e.requestReceiptThreshold { - requestedCount := 0 for _, blockID := range missingBlocksOrderedByHeight { e.requester.EntityByID(blockID, filter.Any) - requestedCount++ } - e.log.Info(). - Int("count", requestedCount). - Msg("requested missing results") + log.Info().Msg("requesting receipts") + } else { + log.Debug().Msg("skip requesting receipts as difference between sealed and finalized height does not exceed threshold") } return nil diff --git a/model/flow/execution_result.go b/model/flow/execution_result.go index 54867657d45..3f18c995c39 100644 --- a/model/flow/execution_result.go +++ b/model/flow/execution_result.go @@ -39,7 +39,7 @@ func (er ExecutionResult) FinalStateCommitment() (StateCommitment, bool) { return nil, false } s := er.Chunks[er.Chunks.Len()-1].EndState - // TODO: empty state commitment -> second return value is false: + // TODO: empty state commitment should not be considered valid // return s, len(s) > 0 return s, true } @@ -56,7 +56,7 @@ func (er ExecutionResult) InitialStateCommit() (StateCommitment, bool) { return nil, false } s := er.Chunks[0].StartState - // TODO: empty state commitment -> second return value is false: + // TODO: empty state commitment should not be considered valid // return s, len(s) > 0 return s, true } From 469c50cec08b397bdd5092bd5e16a1149fdf2fa5 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 28 Oct 2020 12:24:33 -0700 Subject: [PATCH 067/105] fixed broken test after var renaming :facepalm: --- engine/consensus/matching/engine_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index d4e30c825b2..a489e807ec7 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -328,7 +328,7 @@ func (ms *MatchingSuite) SetupTest() { seals: ms.sealsPL, checkingSealing: atomic.NewBool(false), requestReceiptThreshold: 10, - maxUnsealedResults: 200, + maxResultsToRequest: 200, assigner: ms.assigner, requireApprovals: true, } From 4eb858e58e5ef0e194818c2e09e13bbc24d7f3b0 Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Wed, 28 Oct 2020 13:22:15 -0700 Subject: [PATCH 068/105] update configs where ingest engine is instantiated --- access/validator.go | 1 + cmd/collection/main.go | 1 + engine/access/rpc/backend/backend.go | 5 +++-- engine/collection/ingest/config.go | 3 +++ engine/collection/ingest/engine.go | 3 +++ engine/collection/ingest/engine_test.go | 3 ++- engine/testutil/nodes.go | 2 +- 7 files changed, 14 insertions(+), 4 deletions(-) diff --git a/access/validator.go b/access/validator.go index 8b9d77b2d12..2abf0c6ec96 100644 --- a/access/validator.go +++ b/access/validator.go @@ -67,6 +67,7 @@ func NewTransactionValidator( ) *TransactionValidator { return &TransactionValidator{ blocks: blocks, + chain: chain, options: options, } } diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 49897427722..b8b132e5055 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -231,6 +231,7 @@ func main() { node.Metrics.Engine, colMetrics, node.Me, + node.RootChainID.Chain(), pools, ingestConf, ) diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 274e9e1a268..6135013bcd1 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -79,7 +79,7 @@ func New( collections: collections, blocks: blocks, transactions: transactions, - transactionValidator: configureTransactionValidator(state), + transactionValidator: configureTransactionValidator(state, chainID), transactionMetrics: transactionMetrics, retry: retry, collectionGRPCPort: collectionGRPCPort, @@ -112,9 +112,10 @@ func New( return b } -func configureTransactionValidator(state protocol.State) *access.TransactionValidator { +func configureTransactionValidator(state protocol.State, chainID flow.ChainID) *access.TransactionValidator { return access.NewTransactionValidator( access.NewProtocolStateBlocks(state), + chainID.Chain(), access.TransactionValidationOptions{ Expiry: flow.DefaultTransactionExpiry, ExpiryBuffer: flow.DefaultTransactionExpiryBuffer, diff --git a/engine/collection/ingest/config.go b/engine/collection/ingest/config.go index 839a11c0e53..86d03068e5b 100644 --- a/engine/collection/ingest/config.go +++ b/engine/collection/ingest/config.go @@ -16,6 +16,8 @@ type Config struct { AllowUnknownReference bool // whether or not we check that transaction scripts are parse-able CheckScriptsParse bool + // the maximum address index we accept + MaxAddressIndex uint64 // how many extra nodes in the responsible cluster we propagate transactions to // (we always send to at least one) PropagationRedundancy uint @@ -27,6 +29,7 @@ func DefaultConfig() Config { MaxGasLimit: flow.DefaultMaxGasLimit, AllowUnknownReference: false, CheckScriptsParse: true, + MaxAddressIndex: 1_000_000, PropagationRedundancy: 2, } } diff --git a/engine/collection/ingest/engine.go b/engine/collection/ingest/engine.go index 51584952d36..2adae13c033 100644 --- a/engine/collection/ingest/engine.go +++ b/engine/collection/ingest/engine.go @@ -43,6 +43,7 @@ func New( engMetrics module.EngineMetrics, colMetrics module.CollectionMetrics, me module.Local, + chain flow.Chain, pools *epochs.TransactionPools, config Config, ) (*Engine, error) { @@ -51,11 +52,13 @@ func New( transactionValidator := access.NewTransactionValidator( access.NewProtocolStateBlocks(state), + chain, access.TransactionValidationOptions{ Expiry: flow.DefaultTransactionExpiry, ExpiryBuffer: config.ExpiryBuffer, AllowUnknownReferenceBlockID: config.AllowUnknownReference, MaxGasLimit: flow.DefaultMaxGasLimit, + MaxAddressIndex: config.MaxAddressIndex, CheckScriptsParse: config.CheckScriptsParse, }, ) diff --git a/engine/collection/ingest/engine_test.go b/engine/collection/ingest/engine_test.go index 949ddcdf077..1432aa839d7 100644 --- a/engine/collection/ingest/engine_test.go +++ b/engine/collection/ingest/engine_test.go @@ -117,7 +117,8 @@ func (suite *Suite) SetupTest() { suite.epochQuery = mocks.NewEpochQuery(suite.T(), 1, epoch) suite.conf = DefaultConfig() - suite.engine, err = New(log, net, suite.state, metrics, metrics, suite.me, suite.pools, suite.conf) + chain := flow.Testnet.Chain() + suite.engine, err = New(log, net, suite.state, metrics, metrics, suite.me, chain, suite.pools, suite.conf) suite.Require().Nil(err) } diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 7cc3f0d56eb..62eedf549ee 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -146,7 +146,7 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identi transactions := storage.NewTransactions(node.Metrics, node.DB) collections := storage.NewCollections(node.DB, transactions) - ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me, pools, collectioningest.DefaultConfig()) + ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me, chainID.Chain(), pools, collectioningest.DefaultConfig()) require.NoError(t, err) selector := filter.HasRole(flow.RoleAccess, flow.RoleVerification) From 52ad49d39d4c0471b76fcfe65a2883b63821f5cc Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Wed, 28 Oct 2020 13:50:44 -0700 Subject: [PATCH 069/105] add tests for objective/subjective validity checks --- access/validator.go | 5 +++++ engine/collection/ingest/engine_test.go | 25 +++++++++++++++++++++++++ utils/unittest/fixtures.go | 9 +++++++++ 3 files changed, 39 insertions(+) diff --git a/access/validator.go b/access/validator.go index 2abf0c6ec96..36bb0201ec3 100644 --- a/access/validator.go +++ b/access/validator.go @@ -95,6 +95,11 @@ func (v *TransactionValidator) Validate(tx *flow.TransactionBody) (err error) { // TODO check account/payer signatures + err = v.checkAddresses(tx) + if err != nil { + return err + } + return nil } diff --git a/engine/collection/ingest/engine_test.go b/engine/collection/ingest/engine_test.go index 1432aa839d7..4e1888b688d 100644 --- a/engine/collection/ingest/engine_test.go +++ b/engine/collection/ingest/engine_test.go @@ -167,6 +167,31 @@ func (suite *Suite) TestInvalidTransaction() { suite.T().Skip() }) + suite.Run("invalid address", func() { + suite.Run("objective check", func() { + invalid := unittest.InvalidAddressFixture() + tx := unittest.TransactionBodyFixture() + tx.ReferenceBlockID = suite.root.ID() + tx.Payer = invalid + + err := suite.engine.ProcessLocal(&tx) + suite.Assert().Error(err) + suite.Assert().True(errors.As(err, &access.InvalidAddressError{})) + }) + + suite.Run("subjective check with max index", func() { + invalid, err := flow.Testnet.Chain().AddressAtIndex(suite.conf.MaxAddressIndex + 1) + suite.Require().Nil(err) + tx := unittest.TransactionBodyFixture() + tx.ReferenceBlockID = suite.root.ID() + tx.Payer = invalid + + err = suite.engine.ProcessLocal(&tx) + suite.Assert().Error(err) + suite.Assert().True(errors.As(err, &access.InvalidAddressError{})) + }) + }) + suite.Run("expired reference block ID", func() { // "finalize" a sufficiently high block that root block is expired final := unittest.BlockFixture() diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index ea86dfb6366..38a9259542f 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -37,6 +37,15 @@ func RandomAddressFixture() flow.Address { return addr } +func InvalidAddressFixture() flow.Address { + addr := AddressFixture() + addr[0] ^= 1 // alter one bit to obtain an invalid address + if flow.Testnet.Chain().IsValid(addr) { + panic("invalid address fixture generated valid address") + } + return addr +} + func TransactionSignatureFixture() flow.TransactionSignature { return flow.TransactionSignature{ Address: AddressFixture(), From acba54abffda0fa576e572bda5c7b2ddb611541c Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Wed, 28 Oct 2020 13:53:54 -0700 Subject: [PATCH 070/105] add flag for max index --- cmd/collection/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/collection/main.go b/cmd/collection/main.go index b8b132e5055..b51ea45d4c7 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -86,6 +86,8 @@ func main() { "expiry buffer for inbound transactions") flags.UintVar(&ingestConf.PropagationRedundancy, "ingest-tx-propagation-redundancy", 10, "how many additional cluster members we propagate transactions to") + flags.Uint64Var(&ingestConf.MaxAddressIndex, "ingest-max-address-index", 1_000_000, + "the maximum address index allowed in transactions") flags.UintVar(&builderExpiryBuffer, "builder-expiry-buffer", 25, "expiry buffer for transactions in proposed collections") flags.Float64Var(&builderPayerRateLimit, "builder-rate-limit", 0, // no rate limiting From 45905b8af1bb00a8cdb514b41e081d791553652e Mon Sep 17 00:00:00 2001 From: Ramtin Mehdizadeh Seraj Date: Wed, 28 Oct 2020 15:26:44 -0700 Subject: [PATCH 071/105] Adding gas and byte size limits to collections (#91) Co-authored-by: Jordan Schalm --- cmd/collection/main.go | 8 +++++ engine/collection/ingest/engine.go | 14 ++++++--- model/flow/transaction.go | 4 +-- module/builder/collection/builder.go | 30 ++++++++++++++++++ module/builder/collection/builder_test.go | 37 +++++++++++++++++++++++ module/builder/collection/config.go | 20 ++++++++++++ 6 files changed, 106 insertions(+), 7 deletions(-) diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 49897427722..9a19906e733 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -46,6 +46,8 @@ func main() { var ( txLimit uint maxCollectionSize uint + maxCollectionByteSize uint64 + maxCollectionTotalGas uint64 builderExpiryBuffer uint builderPayerRateLimit float64 builderUnlimitedPayers []string @@ -94,6 +96,10 @@ func main() { "set of payer addresses which are omitted from rate limiting") flags.UintVar(&maxCollectionSize, "builder-max-collection-size", 200, "maximum number of transactions in proposed collections") + flags.Uint64Var(&maxCollectionByteSize, "builder-max-collection-byte-size", 1000000, + "maximum byte size of the proposed collection") + flags.Uint64Var(&maxCollectionTotalGas, "builder-max-collection-total-gas", 1000000, + "maximum total amount of maxgas of transactions in proposed collections") flags.DurationVar(&hotstuffTimeout, "hotstuff-timeout", 60*time.Second, "the initial timeout for the hotstuff pacemaker") flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 2500*time.Millisecond, @@ -287,6 +293,8 @@ func main() { colMetrics, push, builder.WithMaxCollectionSize(maxCollectionSize), + builder.WithMaxCollectionByteSize(maxCollectionByteSize), + builder.WithMaxCollectionTotalGas(maxCollectionTotalGas), builder.WithExpiryBuffer(builderExpiryBuffer), builder.WithMaxPayerTransactionRate(builderPayerRateLimit), builder.WithUnlimitedPayers(unlimitedPayers...), diff --git a/engine/collection/ingest/engine.go b/engine/collection/ingest/engine.go index b3beb0fa067..afa6744aa5e 100644 --- a/engine/collection/ingest/engine.go +++ b/engine/collection/ingest/engine.go @@ -143,9 +143,11 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { // from outside the system or routed from another collection node. func (e *Engine) onTransaction(originID flow.Identifier, tx *flow.TransactionBody) error { + txID := tx.ID() + log := e.log.With(). Hex("origin_id", originID[:]). - Hex("tx_id", logging.Entity(tx)). + Hex("tx_id", txID[:]). Hex("ref_block_id", tx.ReferenceBlockID[:]). Logger() @@ -180,7 +182,6 @@ func (e *Engine) onTransaction(originID flow.Identifier, tx *flow.TransactionBod pool := e.pools.ForEpoch(counter) // short-circuit if we have already stored the transaction - txID := tx.ID() if pool.Has(txID) { e.log.Debug().Msg("received dupe transaction") return nil @@ -203,13 +204,16 @@ func (e *Engine) onTransaction(originID flow.Identifier, tx *flow.TransactionBod return fmt.Errorf("node is not assigned to any cluster in this epoch: %d", counter) } + localClusterFingerPrint := localCluster.Fingerprint() + txClusterFingerPrint := txCluster.Fingerprint() + log = log.With(). - Hex("local_cluster", logging.ID(localCluster.Fingerprint())). - Hex("tx_cluster", logging.ID(txCluster.Fingerprint())). + Hex("local_cluster", logging.ID(localClusterFingerPrint)). + Hex("tx_cluster", logging.ID(txClusterFingerPrint)). Logger() // if our cluster is responsible for the transaction, add it to the mempool - if localCluster.Fingerprint() == txCluster.Fingerprint() { + if localClusterFingerPrint == txClusterFingerPrint { _ = pool.Add(tx) e.colMetrics.TransactionIngested(txID) log.Debug().Msg("added transaction to pool") diff --git a/model/flow/transaction.go b/model/flow/transaction.go index 674b441d096..d4588b9f2a5 100644 --- a/model/flow/transaction.go +++ b/model/flow/transaction.go @@ -64,7 +64,7 @@ func (tb TransactionBody) Fingerprint() []byte { }) } -func (tb TransactionBody) ByteSize() int { +func (tb TransactionBody) ByteSize() uint { size := 0 size += len(tb.ReferenceBlockID) size += len(tb.Script) @@ -81,7 +81,7 @@ func (tb TransactionBody) ByteSize() int { for _, s := range tb.EnvelopeSignatures { size += s.ByteSize() } - return size + return uint(size) } func (tb TransactionBody) ID() Identifier { diff --git a/module/builder/collection/builder.go b/module/builder/collection/builder.go index 4b4eca4e919..ff922f945f4 100644 --- a/module/builder/collection/builder.go +++ b/module/builder/collection/builder.go @@ -195,6 +195,8 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er minRefID := refChainFinalizedID var transactions []*flow.TransactionBody + var totalByteSize uint64 + var totalGas uint64 for _, tx := range b.transactions.All() { // if we have reached maximum number of transactions, stop @@ -202,6 +204,32 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er break } + txByteSize := uint64(tx.ByteSize()) + // ignore transactions with tx byte size bigger that the max amount per collection + // this case shouldn't happen ever since we keep a limit on tx byte size but in case + // we keep this condition + if txByteSize > b.config.MaxCollectionByteSize { + continue + } + + // because the max byte size per tx is way smaller than the max collection byte size, we can stop here and not continue. + // to make it more effective in the future we can continue adding smaller ones + if totalByteSize+txByteSize > b.config.MaxCollectionByteSize { + break + } + + // ignore transactions with max gas bigger that the max total gas per collection + // this case shouldn't happen ever but in case we keep this condition + if tx.GasLimit > b.config.MaxCollectionTotalGas { + continue + } + + // cause the max gas limit per tx is way smaller than the total max gas per collection, we can stop here and not continue. + // to make it more effective in the future we can continue adding smaller ones + if totalGas+tx.GasLimit > b.config.MaxCollectionTotalGas { + break + } + // retrieve the main chain header that was used as reference refHeader, err := b.mainHeaders.ByBlockID(tx.ReferenceBlockID) if errors.Is(err, storage.ErrNotFound) { @@ -251,6 +279,8 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er limiter.transactionIncluded(tx) transactions = append(transactions, tx) + totalByteSize += txByteSize + totalGas += tx.GasLimit } // STEP FOUR: we have a set of transactions that are valid to include diff --git a/module/builder/collection/builder_test.go b/module/builder/collection/builder_test.go index 6841a7bf720..a974e9a2d50 100644 --- a/module/builder/collection/builder_test.go +++ b/module/builder/collection/builder_test.go @@ -111,6 +111,7 @@ func (suite *BuilderSuite) Bootstrap() { transaction := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ReferenceBlockID = root.ID() tx.ProposalKey.SequenceNumber = uint64(i) + tx.GasLimit = uint64(9999) }) added := suite.pool.Add(&transaction) suite.Assert().True(added) @@ -459,6 +460,42 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionSize() { suite.Assert().Equal(builtCollection.Len(), 1) } +func (suite *BuilderSuite) TestBuildOn_MaxCollectionByteSize() { + // set the max collection byte size to 600 (each tx is about 273 bytes) + suite.builder = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.headers, suite.headers, suite.payloads, suite.pool, builder.WithMaxCollectionByteSize(600)) + + // build a block + header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + suite.Require().Nil(err) + + // retrieve the built block from storage + var built model.Block + err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + suite.Require().Nil(err) + builtCollection := built.Payload.Collection + + // should be only 2 transactions in the collection, since each tx is ~273 bytes and the limit is 600 bytes + suite.Assert().Equal(builtCollection.Len(), 2) +} + +func (suite *BuilderSuite) TestBuildOn_MaxCollectionTotalGas() { + // set the max gas to 20,000 + suite.builder = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.headers, suite.headers, suite.payloads, suite.pool, builder.WithMaxCollectionTotalGas(20000)) + + // build a block + header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + suite.Require().Nil(err) + + // retrieve the built block from storage + var built model.Block + err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + suite.Require().Nil(err) + builtCollection := built.Payload.Collection + + // should be only 2 transactions in collection, since each transaction has gas limit of 9,999 and collection limit is set to 20,000 + suite.Assert().Equal(builtCollection.Len(), 2) +} + func (suite *BuilderSuite) TestBuildOn_ExpiredTransaction() { // create enough main-chain blocks that an expired transaction is possible diff --git a/module/builder/collection/config.go b/module/builder/collection/config.go index 89c3c84c12d..404b63dc363 100644 --- a/module/builder/collection/config.go +++ b/module/builder/collection/config.go @@ -28,6 +28,12 @@ type Config struct { // UnlimitedPayer is a set of addresses which are not affected by per-payer // rate limiting. UnlimitedPayers map[flow.Address]struct{} + + // MaxCollectionByteSize is the maximum byte size of a collection. + MaxCollectionByteSize uint64 + + // MaxCollectionTotalGas is the maximum of total of gas per collection (sum of maxGasLimit over transactions) + MaxCollectionTotalGas uint64 } func DefaultConfig() Config { @@ -36,6 +42,8 @@ func DefaultConfig() Config { ExpiryBuffer: 15, // 15 blocks for collections to be included MaxPayerTransactionRate: 0, // no rate limiting UnlimitedPayers: make(map[flow.Address]struct{}), // no unlimited payers + MaxCollectionByteSize: uint64(1000000), // ~1MB + MaxCollectionTotalGas: uint64(1000000), // 1M } } @@ -71,3 +79,15 @@ func WithUnlimitedPayers(payers ...flow.Address) Opt { c.UnlimitedPayers = lookup } } + +func WithMaxCollectionByteSize(limit uint64) Opt { + return func(c *Config) { + c.MaxCollectionByteSize = limit + } +} + +func WithMaxCollectionTotalGas(limit uint64) Opt { + return func(c *Config) { + c.MaxCollectionTotalGas = limit + } +} From 858a4a4eae2201b34e4e876b5107bc0bb988085a Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 28 Oct 2020 17:02:15 -0700 Subject: [PATCH 072/105] addng test cases for verify unexecuted blocks --- engine/execution/ingestion/engine.go | 109 ++++---- engine/execution/ingestion/engine_test.go | 302 ++++++++++++++++++++++ utils/unittest/equals.go | 21 ++ utils/unittest/fixtures.go | 27 ++ utils/unittest/mocks/execution_state.go | 56 ++++ utils/unittest/mocks/protocol_state.go | 187 ++++++++++++++ 6 files changed, 653 insertions(+), 49 deletions(-) create mode 100644 utils/unittest/equals.go create mode 100644 utils/unittest/mocks/execution_state.go create mode 100644 utils/unittest/mocks/protocol_state.go diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index c49ceb27fad..aee58043ed4 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -186,27 +186,11 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } } -// on nodes startup, we need to load all the unexecuted blocks to the execution queues. -// blocks have to be loaded in the way that the parent has been loaded before loading its children -func (e *Engine) reloadUnexecutedBlocks() error { - err := e.reloadFinalizedUnexecutedBlocks() - if err != nil { - return fmt.Errorf("could not reload finalized unexecuted blocks") - } - - err = e.reloadPendingUnexecutedBlocks() - if err != nil { - return fmt.Errorf("could not reload pending unexecuted blocks") - } - - return nil -} - -func (e *Engine) reloadFinalizedUnexecutedBlocks() error { +func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { // get finalized height final, err := e.state.Final().Head() if err != nil { - return fmt.Errorf("could not get finalized block: %w", err) + return nil, fmt.Errorf("could not get finalized block: %w", err) } // find the first unexecuted and finalized block @@ -217,82 +201,109 @@ func (e *Engine) reloadFinalizedUnexecutedBlocks() error { // then the firstUnexecuted is a unfinalized block, which is ok, // because the next loop will ensure it only iterate through finalized // block. - firstUnexecuted := final.Height - for ; firstUnexecuted >= 0; firstUnexecuted-- { - header, err := e.state.AtHeight(firstUnexecuted).Head() + lastExecuted := final.Height + + for ; lastExecuted > 0; lastExecuted-- { + header, err := e.state.AtHeight(lastExecuted).Head() if err != nil { - return fmt.Errorf("could not get header at height: %v, %w", firstUnexecuted, err) + return nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) } executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, header.ID()) if err != nil { - return fmt.Errorf("could not check whether block is executed: %w", err) + return nil, fmt.Errorf("could not check whether block is executed: %w", err) } if executed { - firstUnexecuted++ break } } + firstUnexecuted := lastExecuted + 1 + e.log.Info().Msgf("last finalized and executed height: %v", firstUnexecuted) + unexecuted := make([]flow.Identifier, 0) + // starting from the first unexecuted block, go through each unexecuted and finalized block // reload its block to execution queues for height := firstUnexecuted; height <= final.Height; height++ { header, err := e.state.AtHeight(height).Head() if err != nil { - return fmt.Errorf("could not get header at height: %v, %w", height, err) + return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) } - err = e.reloadBlock(header.ID()) - if err != nil { - return fmt.Errorf("could not reload block %v, %w", height, err) - } - - e.log.Info().Msgf("reloaded block at height: %v", height) - + unexecuted = append(unexecuted, header.ID()) } - return nil + return unexecuted, nil } -func (e *Engine) reloadPendingUnexecutedBlocks() error { +func (e *Engine) pendingUnexecutedBlocks() ([]flow.Identifier, error) { + unexecuted := make([]flow.Identifier, 0) + pendings, err := e.state.Final().Pending() if err != nil { - return fmt.Errorf("could not get pending blocks: %w", err) + return nil, fmt.Errorf("could not get pending blocks: %w", err) } for _, pending := range pendings { - reloaded, err := e.reloadBlockIfNotExecuted(pending) + executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, pending) if err != nil { - return fmt.Errorf("could not reload block for block %w", err) + return nil, fmt.Errorf("could not check block executed or not: %w", err) } - e.log.Info().Bool("reloaded", reloaded).Msgf("reloaded block %v", pending) + if !executed { + unexecuted = append(unexecuted, pending) + } } - return nil + return unexecuted, nil } -// reload the block to execution queues if has not been executed. -// return whether the block was reloaded. -func (e *Engine) reloadBlockIfNotExecuted(blockID flow.Identifier) (bool, error) { - executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, blockID) +func (e *Engine) unexecutedBlocks() (finalized []flow.Identifier, pending []flow.Identifier, err error) { + finalized, err = e.finalizedUnexecutedBlocks() if err != nil { - return false, fmt.Errorf("could not check block executed or not: %w", err) + return nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") } - if executed { - return false, nil + pending, err = e.pendingUnexecutedBlocks() + if err != nil { + return nil, nil, fmt.Errorf("could not read pending unexecuted blocks") } - err = e.reloadBlock(blockID) + return finalized, pending, nil +} + +// on nodes startup, we need to load all the unexecuted blocks to the execution queues. +// blocks have to be loaded in the way that the parent has been loaded before loading its children +func (e *Engine) reloadUnexecutedBlocks() error { + finalized, pending, err := e.unexecutedBlocks() if err != nil { - return false, fmt.Errorf("could not reload block: %w", err) + return fmt.Errorf("could not reload unexecuted blocks: %w", err) } - return true, nil + unexecuted := append(finalized, pending...) + + log := e.log.With(). + Int("total", len(unexecuted)). + Int("finalized", len(finalized)). + Int("pending", len(pending)).Logger() + + log.Info().Msg("reloading unexecuted blocks") + + for _, blockID := range unexecuted { + err := e.reloadBlock(blockID) + if err != nil { + return fmt.Errorf("could not reload block: %v, %w", blockID, err) + } + + e.log.Debug().Hex("block_id", blockID[:]).Msg("reloaded block") + } + + log.Info().Msg("all unexecuted have been successfully reloaded") + + return nil } func (e *Engine) reloadBlock(blockID flow.Identifier) error { diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 91583ebf752..5255dd9e56d 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -32,6 +32,7 @@ import ( storageerr "github.com/onflow/flow-go/storage" storage "github.com/onflow/flow-go/storage/mocks" "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" ) var ( @@ -554,3 +555,304 @@ func TestShouldTriggerStateSync(t *testing.T) { // reached the threshold 10, so should trigger require.True(t, shouldTriggerStateSync(20, 29, 10)) } + +func newIngestionEngine(t *testing.T, ps *mocks.PS, es *mocks.ES) *Engine { + log := unittest.Logger() + metrics := metrics.NewNoopCollector() + tracer, err := trace.NewTracer(log, "test") + require.NoError(t, err) + ctrl := gomock.NewController(t) + net := module.NewMockNetwork(ctrl) + request := module.NewMockRequester(ctrl) + syncConduit := network.NewMockConduit(ctrl) + var engine *Engine + net.EXPECT().Register(gomock.Eq(engineCommon.SyncExecution), gomock.AssignableToTypeOf(engine)).Return(syncConduit, nil) + + // generates signing identity including staking key for signing + seed := make([]byte, crypto.KeyGenSeedMinLenBLSBLS12381) + n, err := rand.Read(seed) + require.Equal(t, n, crypto.KeyGenSeedMinLenBLSBLS12381) + require.NoError(t, err) + sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed) + require.NoError(t, err) + myIdentity.StakingPubKey = sk.PublicKey() + me := mocklocal.NewMockLocal(sk, myIdentity.ID(), t) + + blocks := storage.NewMockBlocks(ctrl) + collections := storage.NewMockCollections(ctrl) + events := storage.NewMockEvents(ctrl) + txResults := storage.NewMockTransactionResults(ctrl) + + computationManager := new(computation.ComputationManager) + providerEngine := new(provider.ProviderEngine) + + deltas, err := NewDeltas(10) + require.NoError(t, err) + + engine, err = New( + log, + net, + me, + request, + ps, + blocks, + collections, + events, + txResults, + computationManager, + providerEngine, + es, + metrics, + tracer, + false, + filter.Any, + deltas, + 10, + false, + ) + + require.NoError(t, err) + return engine +} + +func logChain(chain []*flow.Block) { + log := unittest.Logger() + for i, block := range chain { + log.Info().Msgf("block %v, height: %v, ID: %v", i, block.Header.Height, block.ID()) + } +} + +func TestLoadingUnexecutedBlocks(t *testing.T) { + t.Run("only genesis", func(t *testing.T) { + ps := mocks.NewPS() + + chain, result, seal := unittest.ChainFixture(0) + genesis := chain[0] + + logChain(chain) + + require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) + + es := mocks.NewES(seal) + engine := newIngestionEngine(t, ps, es) + + finalized, pending, err := engine.unexecutedBlocks() + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, finalized) + unittest.IDsEqual(t, []flow.Identifier{}, pending) + }) + + t.Run("no finalized, nor pending unexected", func(t *testing.T) { + ps := mocks.NewPS() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Mutate().Extend(blockA)) + require.NoError(t, ps.Mutate().Extend(blockB)) + require.NoError(t, ps.Mutate().Extend(blockC)) + require.NoError(t, ps.Mutate().Extend(blockD)) + + es := mocks.NewES(seal) + engine := newIngestionEngine(t, ps, es) + + finalized, pending, err := engine.unexecutedBlocks() + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, finalized) + unittest.IDsEqual(t, []flow.Identifier{blockA.ID(), blockB.ID(), blockC.ID(), blockD.ID()}, pending) + }) + + t.Run("no finalized, some pending executed", func(t *testing.T) { + ps := mocks.NewPS() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Mutate().Extend(blockA)) + require.NoError(t, ps.Mutate().Extend(blockB)) + require.NoError(t, ps.Mutate().Extend(blockC)) + require.NoError(t, ps.Mutate().Extend(blockD)) + + es := mocks.NewES(seal) + engine := newIngestionEngine(t, ps, es) + + mocks.ExecuteBlock(t, es, blockA) + mocks.ExecuteBlock(t, es, blockB) + + finalized, pending, err := engine.unexecutedBlocks() + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, finalized) + unittest.IDsEqual(t, []flow.Identifier{blockC.ID(), blockD.ID()}, pending) + }) + + t.Run("all finalized have been executed, and no pending executed", func(t *testing.T) { + ps := mocks.NewPS() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Mutate().Extend(blockA)) + require.NoError(t, ps.Mutate().Extend(blockB)) + require.NoError(t, ps.Mutate().Extend(blockC)) + require.NoError(t, ps.Mutate().Extend(blockD)) + + require.NoError(t, ps.Mutate().Finalize(blockA.ID())) + require.NoError(t, ps.Mutate().Finalize(blockB.ID())) + require.NoError(t, ps.Mutate().Finalize(blockC.ID())) + + es := mocks.NewES(seal) + engine := newIngestionEngine(t, ps, es) + + mocks.ExecuteBlock(t, es, blockA) + mocks.ExecuteBlock(t, es, blockB) + mocks.ExecuteBlock(t, es, blockC) + + finalized, pending, err := engine.unexecutedBlocks() + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, finalized) + unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) + }) + + t.Run("some finalized are executed and conflicting are executed", func(t *testing.T) { + ps := mocks.NewPS() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Mutate().Extend(blockA)) + require.NoError(t, ps.Mutate().Extend(blockB)) + require.NoError(t, ps.Mutate().Extend(blockC)) + require.NoError(t, ps.Mutate().Extend(blockD)) + + require.NoError(t, ps.Mutate().Finalize(blockC.ID())) + + es := mocks.NewES(seal) + engine := newIngestionEngine(t, ps, es) + + mocks.ExecuteBlock(t, es, blockA) + mocks.ExecuteBlock(t, es, blockB) + mocks.ExecuteBlock(t, es, blockC) + + finalized, pending, err := engine.unexecutedBlocks() + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, finalized) + unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) + }) + + t.Run("all pending executed", func(t *testing.T) { + ps := mocks.NewPS() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Mutate().Extend(blockA)) + require.NoError(t, ps.Mutate().Extend(blockB)) + require.NoError(t, ps.Mutate().Extend(blockC)) + require.NoError(t, ps.Mutate().Extend(blockD)) + require.NoError(t, ps.Mutate().Finalize(blockA.ID())) + + es := mocks.NewES(seal) + engine := newIngestionEngine(t, ps, es) + + mocks.ExecuteBlock(t, es, blockA) + mocks.ExecuteBlock(t, es, blockB) + mocks.ExecuteBlock(t, es, blockC) + mocks.ExecuteBlock(t, es, blockD) + + finalized, pending, err := engine.unexecutedBlocks() + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, finalized) + unittest.IDsEqual(t, []flow.Identifier{}, pending) + }) + + t.Run("some fork is executed", func(t *testing.T) { + ps := mocks.NewPS() + + // Genesis <- A <- B <- C (finalized) <- D <- E <- F + // ^--- G <- H + // ^-- I + // ^--- J <- K + chain, result, seal := unittest.ChainFixture(6) + genesis, blockA, blockB, blockC, blockD, blockE, blockF := + chain[0], chain[1], chain[2], chain[3], chain[4], chain[5], chain[6] + + fork1 := unittest.ChainFixtureFrom(2, blockD.Header) + blockG, blockH := fork1[0], fork1[1] + + fork2 := unittest.ChainFixtureFrom(1, blockC.Header) + blockI := fork2[0] + + fork3 := unittest.ChainFixtureFrom(2, blockB.Header) + blockJ, blockK := fork3[0], fork3[1] + + logChain(chain) + logChain(fork1) + logChain(fork2) + logChain(fork3) + + require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Mutate().Extend(blockA)) + require.NoError(t, ps.Mutate().Extend(blockB)) + require.NoError(t, ps.Mutate().Extend(blockC)) + require.NoError(t, ps.Mutate().Extend(blockI)) + require.NoError(t, ps.Mutate().Extend(blockJ)) + require.NoError(t, ps.Mutate().Extend(blockK)) + require.NoError(t, ps.Mutate().Extend(blockD)) + require.NoError(t, ps.Mutate().Extend(blockE)) + require.NoError(t, ps.Mutate().Extend(blockF)) + require.NoError(t, ps.Mutate().Extend(blockG)) + require.NoError(t, ps.Mutate().Extend(blockH)) + + require.NoError(t, ps.Mutate().Finalize(blockC.ID())) + + es := mocks.NewES(seal) + + engine := newIngestionEngine(t, ps, es) + + mocks.ExecuteBlock(t, es, blockA) + mocks.ExecuteBlock(t, es, blockB) + mocks.ExecuteBlock(t, es, blockC) + mocks.ExecuteBlock(t, es, blockD) + mocks.ExecuteBlock(t, es, blockG) + mocks.ExecuteBlock(t, es, blockJ) + + finalized, pending, err := engine.unexecutedBlocks() + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, finalized) + unittest.IDsEqual(t, []flow.Identifier{ + blockI.ID(), // I is still pending, and unexecuted + blockE.ID(), + blockF.ID(), + // note K is not a pending block, but a conflicting block, even if it's not executed, + // it won't included + blockH.ID()}, + pending) + }) +} diff --git a/utils/unittest/equals.go b/utils/unittest/equals.go new file mode 100644 index 00000000000..78eb707acc7 --- /dev/null +++ b/utils/unittest/equals.go @@ -0,0 +1,21 @@ +package unittest + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +func toHex(ids []flow.Identifier) []string { + hex := make([]string, 0, len(ids)) + for _, id := range ids { + hex = append(hex, id.String()) + } + return hex +} + +func IDsEqual(t *testing.T, id1, id2 []flow.Identifier) { + require.Equal(t, toHex(id1), toHex(id2)) +} diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index ea86dfb6366..a6165ff43cb 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -1006,3 +1006,30 @@ func BootstrapFixture(participants flow.IdentityList, opts ...func(*flow.Block)) seal := SealFixture(SealFromResult(result), WithServiceEvents(setup.ServiceEvent(), commit.ServiceEvent())) return root, result, seal } + +// ChainFixture creates a list of blocks that forms a chain +func ChainFixture(nonGenesisCount int) ([]*flow.Block, *flow.ExecutionResult, *flow.Seal) { + chain := make([]*flow.Block, 0, nonGenesisCount+1) + + participants := IdentityListFixture(5, WithAllRoles()) + genesis, result, seal := BootstrapFixture(participants) + chain = append(chain, genesis) + + children := ChainFixtureFrom(nonGenesisCount, genesis.Header) + chain = append(chain, children...) + return chain, result, seal +} + +// ChainFixtureFrom creates a chain of blocks starting from a given parent block, +// the total number of blocks in the chain is specified by the given count +func ChainFixtureFrom(count int, parent *flow.Header) []*flow.Block { + blocks := make([]*flow.Block, 0, count) + + for i := 0; i < count; i++ { + block := BlockWithParentFixture(parent) + blocks = append(blocks, &block) + parent = block.Header + } + + return blocks +} diff --git a/utils/unittest/mocks/execution_state.go b/utils/unittest/mocks/execution_state.go new file mode 100644 index 00000000000..eb1952a0866 --- /dev/null +++ b/utils/unittest/mocks/execution_state.go @@ -0,0 +1,56 @@ +package mocks + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + state "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +// ES is a mocked version of execution state that +// simulates some of its behavior for testing purpose +type ES struct { + sync.Mutex + state.ExecutionState + commits map[flow.Identifier]flow.StateCommitment +} + +func NewES(seal *flow.Seal) *ES { + commits := make(map[flow.Identifier]flow.StateCommitment) + commits[seal.BlockID] = seal.FinalState + return &ES{ + commits: commits, + } +} + +func (es *ES) PersistStateCommitment(ctx context.Context, blockID flow.Identifier, commit flow.StateCommitment) error { + es.Lock() + defer es.Unlock() + es.commits[blockID] = commit + return nil +} + +func (es *ES) StateCommitmentByBlockID(ctx context.Context, blockID flow.Identifier) (flow.StateCommitment, error) { + commit, ok := es.commits[blockID] + if !ok { + return nil, storage.ErrNotFound + } + + return commit, nil +} + +func ExecuteBlock(t *testing.T, es *ES, block *flow.Block) { + _, ok := es.commits[block.Header.ParentID] + require.True(t, ok, "parent block not executed") + require.NoError(t, + es.PersistStateCommitment( + context.Background(), + block.ID(), + unittest.StateCommitmentFixture())) +} diff --git a/utils/unittest/mocks/protocol_state.go b/utils/unittest/mocks/protocol_state.go new file mode 100644 index 00000000000..29b98c1a2a2 --- /dev/null +++ b/utils/unittest/mocks/protocol_state.go @@ -0,0 +1,187 @@ +package mocks + +import ( + "fmt" + "sync" + + "github.com/stretchr/testify/mock" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" +) + +// PS is a mocked version of protocol state, which +// has very close behavior to the real implementation +// but for testing purpose. +// If you are testing a module that depends on protocol state's +// behavior, but you don't want to mock up the methods and its return +// value, then just use this module +type PS struct { + sync.Mutex + protocol.State + blocks map[flow.Identifier]*flow.Block + children map[flow.Identifier][]flow.Identifier + heights map[uint64]*flow.Block + finalized uint64 + result *flow.ExecutionResult + seal *flow.Seal +} + +func NewPS() *PS { + return &PS{ + blocks: make(map[flow.Identifier]*flow.Block), + children: make(map[flow.Identifier][]flow.Identifier), + heights: make(map[uint64]*flow.Block), + } +} + +type PSMutator struct { + protocolmock.Mutator + ps *PS +} + +func (ps *PS) AtBlockID(blockID flow.Identifier) protocol.Snapshot { + ps.Lock() + defer ps.Unlock() + + snapshot := new(protocolmock.Snapshot) + block, ok := ps.blocks[blockID] + if ok { + snapshot.On("Head").Return(block.Header, nil) + } else { + snapshot.On("Head").Return(nil, storage.ErrNotFound) + } + return snapshot +} + +func (ps *PS) AtHeight(height uint64) protocol.Snapshot { + ps.Lock() + defer ps.Unlock() + + snapshot := new(protocolmock.Snapshot) + block, ok := ps.heights[height] + if ok { + snapshot.On("Head").Return(block.Header, nil) + } else { + snapshot.On("Head").Return(nil, storage.ErrNotFound) + } + return snapshot +} + +func (ps *PS) Final() protocol.Snapshot { + ps.Lock() + defer ps.Unlock() + + final, ok := ps.heights[ps.finalized] + if !ok { + return nil + } + + snapshot := new(protocolmock.Snapshot) + snapshot.On("Head").Return(final.Header, nil) + finalID := final.ID() + mocked := snapshot.On("Pending") + mocked.RunFn = func(args mock.Arguments) { + // not concurrent safe + pendings := pending(ps, finalID) + mocked.ReturnArguments = mock.Arguments{pendings, nil} + } + return snapshot +} + +func pending(ps *PS, blockID flow.Identifier) []flow.Identifier { + var pendingIDs []flow.Identifier + pendingIDs, ok := ps.children[blockID] + + if !ok { + return pendingIDs + } + + for _, pendingID := range pendingIDs { + additionalIDs := pending(ps, pendingID) + pendingIDs = append(pendingIDs, additionalIDs...) + } + + return pendingIDs +} + +func (ps *PS) Mutate() protocol.Mutator { + return &PSMutator{ + protocolmock.Mutator{}, + ps, + } +} + +func (m *PSMutator) Bootstrap(root *flow.Block, result *flow.ExecutionResult, seal *flow.Seal) error { + m.ps.Lock() + defer m.ps.Unlock() + + if _, ok := m.ps.blocks[root.ID()]; ok { + return storage.ErrAlreadyExists + } + + m.ps.blocks[root.ID()] = root + m.ps.result = result + m.ps.seal = seal + m.ps.heights[root.Header.Height] = root + m.ps.finalized = root.Header.Height + return nil +} + +func (m *PSMutator) Extend(block *flow.Block) error { + m.ps.Lock() + defer m.ps.Unlock() + + id := block.ID() + if _, ok := m.ps.blocks[id]; ok { + return storage.ErrAlreadyExists + } + + if _, ok := m.ps.blocks[block.Header.ParentID]; !ok { + return fmt.Errorf("could not retrieve parent") + } + + m.ps.blocks[id] = block + + // index children + children, ok := m.ps.children[block.Header.ParentID] + if !ok { + children = make([]flow.Identifier, 0) + } + + children = append(children, id) + m.ps.children[block.Header.ParentID] = children + + return nil +} + +func (m *PSMutator) Finalize(blockID flow.Identifier) error { + m.ps.Lock() + defer m.ps.Unlock() + + block, ok := m.ps.blocks[blockID] + if !ok { + return fmt.Errorf("could not retrieve final header") + } + + if block.Header.Height <= m.ps.finalized { + return fmt.Errorf("could not finalize old blocks") + } + + // update heights + cur := block + for height := cur.Header.Height; height > m.ps.finalized; height-- { + parent, ok := m.ps.blocks[cur.Header.ParentID] + if !ok { + return fmt.Errorf("parent does not exist for block at height: %v, parentID: %v", cur.Header.Height, cur.Header.ParentID) + } + m.ps.heights[height] = cur + cur = parent + } + + m.ps.finalized = block.Header.Height + + return nil +} From d24dea178c302dca86c2a1667f511b61202bed94 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 28 Oct 2020 17:11:34 -0700 Subject: [PATCH 073/105] update comment --- engine/execution/ingestion/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 5ffd5824ccf..03e6cbc05db 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -221,7 +221,7 @@ func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { firstUnexecuted := lastExecuted + 1 - e.log.Info().Msgf("last finalized and executed height: %v", firstUnexecuted) + e.log.Info().Msgf("last finalized and executed height: %v", lastExecuted) unexecuted := make([]flow.Identifier, 0) From f47430b2413ae6b50aa2fb2999301db90ff076c1 Mon Sep 17 00:00:00 2001 From: Ramtin Mehdizadeh Seraj Date: Wed, 28 Oct 2020 21:31:34 -0700 Subject: [PATCH 074/105] Ramtin/4844 adding more tx types to the load (#68) --- integration/loader/main.go | 2 + integration/utils/batchLoadGenerator.go | 39 ++-- integration/utils/contLoadGenerator.go | 243 ++++++++++++++++++------ integration/utils/scriptCreator.go | 92 --------- integration/utils/scripts.go | 236 +++++++++++++++++++++++ 5 files changed, 450 insertions(+), 162 deletions(-) delete mode 100644 integration/utils/scriptCreator.go create mode 100644 integration/utils/scripts.go diff --git a/integration/loader/main.go b/integration/loader/main.go index 00e2947d787..f6128de4402 100644 --- a/integration/loader/main.go +++ b/integration/loader/main.go @@ -28,6 +28,7 @@ type LoadCase struct { func main() { sleep := flag.Duration("sleep", 0, "duration to sleep before benchmarking starts") + loadTypeFlag := flag.String("load-type", "token-transfer", "type of loads (\"token-transfer\", \"add-keys\", \"computation-heavy\", \"event-heavy\", \"ledger-heavy\")") tpsFlag := flag.String("tps", "1", "transactions per second (TPS) to send, accepts a comma separated list of values if used in conjunction with `tps-durations`") tpsDurationsFlag := flag.String("tps-durations", "0", "duration that each load test will run, accepts a comma separted list that will be applied to multiple values of the `tps` flag (defaults to infinite if not provided, meaning only the first tps case will be tested; additional values will be ignored)") chainIDStr := flag.String("chain", string(flowsdk.Testnet), "chain ID") @@ -119,6 +120,7 @@ func main() { &fungibleTokenAddress, &flowTokenAddress, c.tps, + utils.LoadType(*loadTypeFlag), ) if err != nil { log.Fatal().Err(err).Msgf("unable to create new cont load generator") diff --git a/integration/utils/batchLoadGenerator.go b/integration/utils/batchLoadGenerator.go index d15c366a5b2..f882e98b331 100644 --- a/integration/utils/batchLoadGenerator.go +++ b/integration/utils/batchLoadGenerator.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/onflow/cadence" flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/templates" "github.com/rs/zerolog" @@ -25,6 +26,17 @@ type flowAccount struct { signerLock sync.Mutex } +func (acc *flowAccount) signTx(tx *flowsdk.Transaction, keyID int) error { + acc.signerLock.Lock() + defer acc.signerLock.Unlock() + err := tx.SignEnvelope(*acc.address, keyID, acc.signer) + if err != nil { + return err + } + acc.seqNumber++ + return nil +} + func newFlowAccount(i int, address *flowsdk.Address, accountKey *flowsdk.AccountKey, signer crypto.InMemorySigner) *flowAccount { return &flowAccount{ i: i, @@ -46,7 +58,6 @@ type BatchLoadGenerator struct { fungibleTokenAddress *flowsdk.Address accounts []*flowAccount step int - scriptCreator *ScriptCreator txTracker *TxTracker statsTracker *TxStatsTracker } @@ -74,11 +85,6 @@ func NewBatchLoadGenerator(fclient *client.Client, return nil, err } - scriptCreator, err := NewScriptCreator() - if err != nil { - return nil, err - } - lGen := &BatchLoadGenerator{ numberOfAccounts: numberOfAccounts, flowClient: fclient, @@ -89,7 +95,6 @@ func NewBatchLoadGenerator(fclient *client.Client, step: 0, txTracker: txTracker, statsTracker: stTracker, - scriptCreator: scriptCreator, } return lGen, nil } @@ -144,21 +149,29 @@ func (lg *BatchLoadGenerator) setupServiceAccountKeys() error { return err } - keys := make([]*flowsdk.AccountKey, 0) + cadenceKeys := make([]cadence.Value, lg.numberOfAccounts) for i := 0; i < lg.numberOfAccounts; i++ { - keys = append(keys, lg.serviceAccount.accountKey) + cadenceKeys[i] = bytesToCadenceArray(lg.serviceAccount.accountKey.Encode()) } + cadenceKeysArray := cadence.NewArray(cadenceKeys) - addKeysTx, err := lg.scriptCreator.AddKeysToAccountTransaction(*lg.serviceAccount.address, keys) + addKeysScript, err := AddKeyToAccountScript() if err != nil { return err } - addKeysTx. + addKeysTx := flowsdk.NewTransaction(). + SetScript(addKeysScript). + AddAuthorizer(*lg.serviceAccount.address). SetReferenceBlockID(blockRef). SetProposalKey(*lg.serviceAccount.address, lg.serviceAccount.accountKey.ID, lg.serviceAccount.accountKey.SequenceNumber). SetPayer(*lg.serviceAccount.address) + err = addKeysTx.AddArgument(cadenceKeysArray) + if err != nil { + return err + } + lg.serviceAccount.signerLock.Lock() defer lg.serviceAccount.signerLock.Unlock() @@ -286,7 +299,7 @@ func (lg *BatchLoadGenerator) distributeInitialTokens() error { for i := 0; i < len(lg.accounts); i++ { // Transfer 10000 tokens - transferScript, err := lg.scriptCreator.TokenTransferScript( + transferScript, err := TokenTransferScript( lg.fungibleTokenAddress, lg.flowTokenAddress, lg.accounts[i].address, @@ -339,7 +352,7 @@ func (lg *BatchLoadGenerator) rotateTokens() error { for i := 0; i < len(lg.accounts); i++ { j := (i + 1) % len(lg.accounts) - transferScript, err := lg.scriptCreator.TokenTransferScript( + transferScript, err := TokenTransferScript( lg.fungibleTokenAddress, lg.accounts[i].address, lg.accounts[j].address, diff --git a/integration/utils/contLoadGenerator.go b/integration/utils/contLoadGenerator.go index 6b90c6325ee..826a3713f8c 100644 --- a/integration/utils/contLoadGenerator.go +++ b/integration/utils/contLoadGenerator.go @@ -16,6 +16,16 @@ import ( "github.com/onflow/flow-go-sdk/crypto" ) +type LoadType string + +const ( + TokenTransferLoadType LoadType = "token-transfer" + TokenAddKeysLoadType LoadType = "add-keys" + CompHeavyLoadType LoadType = "computation-heavy" + EventHeavyLoadType LoadType = "event-heavy" + LedgerHeavyLoadType LoadType = "ledger-heavy" +) + const accountCreationBatchSize = 100 const tokensPerTransfer = 0.01 // flow testnets only have 10e6 total supply, so we choose a small amount here @@ -33,15 +43,16 @@ type ContLoadGenerator struct { serviceAccount *flowAccount flowTokenAddress *flowsdk.Address fungibleTokenAddress *flowsdk.Address + favContractAddress *flowsdk.Address accounts []*flowAccount availableAccounts chan *flowAccount // queue with accounts that are available for workers - scriptCreator *ScriptCreator txTracker *TxTracker txStatsTracker *TxStatsTracker workerStatsTracker *WorkerStatsTracker workers []*Worker blockRef BlockRef stopped bool + loadType LoadType } // NewContLoadGenerator returns a new ContLoadGenerator @@ -56,6 +67,7 @@ func NewContLoadGenerator( fungibleTokenAddress *flowsdk.Address, flowTokenAddress *flowsdk.Address, tps int, + loadType LoadType, ) (*ContLoadGenerator, error) { numberOfAccounts := tps * 10 // 1 second per block, factor 10 for delays to prevent sequence number collisions @@ -72,11 +84,6 @@ func NewContLoadGenerator( return nil, err } - scriptCreator, err := NewScriptCreator() - if err != nil { - return nil, err - } - lGen := &ContLoadGenerator{ log: log, loaderMetrics: loaderMetrics, @@ -94,8 +101,8 @@ func NewContLoadGenerator( txTracker: txTracker, txStatsTracker: txStatsTracker, workerStatsTracker: NewWorkerStatsTracker(), - scriptCreator: scriptCreator, blockRef: NewBlockRef(supervisorClient), + loadType: loadType, } return lGen, nil @@ -116,6 +123,48 @@ func (lg *ContLoadGenerator) Init() error { return err } } + lg.SetupFavContract() + return nil +} + +func (lg *ContLoadGenerator) SetupFavContract() error { + // take one of the accounts + if len(lg.accounts) == 0 { + return fmt.Errorf("can't setup fav contract, zero accounts available") + } + + acc := lg.accounts[0] + + blockRef, err := lg.blockRef.Get() + if err != nil { + lg.log.Error().Err(err).Msgf("error getting reference block") + return err + } + + lg.log.Trace().Msgf("creating fav contract deployment script") + deployScript := DeployingMyFavContractScript() + if err != nil { + lg.log.Error().Err(err).Msgf("error creating fav contract deployment script") + return err + } + + lg.log.Trace().Msgf("creating fav contract deployment transaction") + deploymentTx := flowsdk.NewTransaction(). + SetReferenceBlockID(blockRef). + SetScript(deployScript). + SetProposalKey(*acc.address, 0, acc.seqNumber). + SetPayer(*acc.address). + AddAuthorizer(*acc.address) + + lg.log.Trace().Msgf("signing transaction") + acc.signTx(deploymentTx, 0) + if err != nil { + lg.log.Error().Err(err).Msgf("error signing transaction") + return err + } + + lg.sendTx(deploymentTx) + lg.favContractAddress = acc.address return nil } @@ -123,7 +172,18 @@ func (lg *ContLoadGenerator) Init() error { func (lg *ContLoadGenerator) Start() { // spawn workers for i := 0; i < lg.tps; i++ { - worker := NewWorker(i, 1*time.Second, lg.sendTx) + var worker Worker + + switch lg.loadType { + case TokenTransferLoadType: + worker = NewWorker(i, 1*time.Second, lg.sendTokenTransferTx) + case TokenAddKeysLoadType: + worker = NewWorker(i, 1*time.Second, lg.sendAddKeyTx) + // other types + default: + worker = NewWorker(i, 1*time.Second, lg.sendFavContractTx) + } + worker.Start() lg.workerStatsTracker.AddWorker() @@ -142,35 +202,6 @@ func (lg *ContLoadGenerator) Stop() { lg.workerStatsTracker.StopPrinting() } -const createAccountsTransactionTemplate = ` -import FungibleToken from 0x%s -import FlowToken from 0x%s - -transaction(publicKey: [UInt8], count: Int, initialTokenAmount: UFix64) { - prepare(signer: AuthAccount) { - let vault = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) - ?? panic("Could not borrow reference to the owner's Vault") - - var i = 0 - while i < count { - let account = AuthAccount(payer: signer) - account.addPublicKey(publicKey) - - let receiver = account.getCapability(/public/flowTokenReceiver)!.borrow<&{FungibleToken.Receiver}>() - ?? panic("Could not borrow receiver reference to the recipient's Vault") - - receiver.deposit(from: <-vault.withdraw(amount: initialTokenAmount)) - - i = i + 1 - } - } -} -` - -func createAccountsTransaction(fungibleToken, flowToken flowsdk.Address) []byte { - return []byte(fmt.Sprintf(createAccountsTransactionTemplate, fungibleToken, flowToken)) -} - func (lg *ContLoadGenerator) createAccounts(num int) error { lg.log.Info().Msgf("creating and funding %d accounts...", num) @@ -189,7 +220,7 @@ func (lg *ContLoadGenerator) createAccounts(num int) error { // Generate an account creation script createAccountTx := flowsdk.NewTransaction(). - SetScript(createAccountsTransaction(*lg.fungibleTokenAddress, *lg.flowTokenAddress)). + SetScript(CreateAccountsScript(*lg.fungibleTokenAddress, *lg.flowTokenAddress)). SetReferenceBlockID(blockRef). SetProposalKey( *lg.serviceAccount.address, @@ -226,7 +257,6 @@ func (lg *ContLoadGenerator) createAccounts(num int) error { } lg.serviceAccount.signerLock.Lock() - err = createAccountTx.SignEnvelope( *lg.serviceAccount.address, lg.serviceAccount.accountKey.ID, @@ -235,9 +265,7 @@ func (lg *ContLoadGenerator) createAccounts(num int) error { if err != nil { return err } - lg.serviceAccount.accountKey.SequenceNumber++ - lg.serviceAccount.signerLock.Unlock() err = lg.flowClient.SendTransaction(context.Background(), *createAccountTx) @@ -314,7 +342,9 @@ func (lg *ContLoadGenerator) createAccounts(num int) error { return nil } -func (lg *ContLoadGenerator) sendTx(workerID int) { +func (lg *ContLoadGenerator) sendAddKeyTx(workerID int) { + // TODO move this as a configurable parameter + numberOfKeysToAdd := 40 blockRef, err := lg.blockRef.Get() if err != nil { lg.log.Error().Err(err).Msgf("error getting reference block") @@ -326,21 +356,76 @@ func (lg *ContLoadGenerator) sendTx(workerID int) { acc := <-lg.availableAccounts defer func() { lg.availableAccounts <- acc }() + lg.log.Trace().Msgf("creating add proposer key script") + cadenceKeys := make([]cadence.Value, numberOfKeysToAdd) + for i := 0; i < numberOfKeysToAdd; i++ { + cadenceKeys[i] = bytesToCadenceArray(lg.serviceAccount.accountKey.Encode()) + } + cadenceKeysArray := cadence.NewArray(cadenceKeys) + + addKeysScript, err := AddKeyToAccountScript() + if err != nil { + lg.log.Error().Err(err).Msgf("error getting add key to account script") + return + } + + addKeysTx := flowsdk.NewTransaction(). + SetScript(addKeysScript). + AddAuthorizer(*acc.address). + SetReferenceBlockID(blockRef). + SetProposalKey(*lg.serviceAccount.address, lg.serviceAccount.accountKey.ID, lg.serviceAccount.accountKey.SequenceNumber). + SetPayer(*lg.serviceAccount.address) + + err = addKeysTx.AddArgument(cadenceKeysArray) + if err != nil { + lg.log.Error().Err(err).Msgf("error constructing add keys to account transaction") + return + } + + lg.log.Trace().Msgf("creating transaction") + + addKeysTx.SetReferenceBlockID(blockRef). + SetProposalKey(*acc.address, 0, acc.seqNumber). + SetPayer(*acc.address). + AddAuthorizer(*acc.address) + + lg.log.Trace().Msgf("signing transaction") + acc.signTx(addKeysTx, 0) + if err != nil { + lg.log.Error().Err(err).Msgf("error signing transaction") + return + } + + lg.sendTx(addKeysTx) +} + +func (lg *ContLoadGenerator) sendTokenTransferTx(workerID int) { + + blockRef, err := lg.blockRef.Get() + if err != nil { + lg.log.Error().Err(err).Msgf("error getting reference block") + return + } + + lg.log.Trace().Msgf("getting next available account") + acc := <-lg.availableAccounts + defer func() { lg.availableAccounts <- acc }() + lg.log.Trace().Msgf("getting next account") nextAcc := lg.accounts[(acc.i+1)%len(lg.accounts)] lg.log.Trace().Msgf("creating transfer script") - transferScript, err := lg.scriptCreator.TokenTransferScript( + transferScript, err := TokenTransferScript( lg.fungibleTokenAddress, acc.address, nextAcc.address, tokensPerTransfer) if err != nil { - lg.log.Error().Err(err).Msgf("error creating token trasferscript") + lg.log.Error().Err(err).Msgf("error creating token transfer script") return } - lg.log.Trace().Msgf("creating transaction") + lg.log.Trace().Msgf("creating token transfer transaction") transferTx := flowsdk.NewTransaction(). SetReferenceBlockID(blockRef). SetScript(transferScript). @@ -349,18 +434,62 @@ func (lg *ContLoadGenerator) sendTx(workerID int) { AddAuthorizer(*acc.address) lg.log.Trace().Msgf("signing transaction") - acc.signerLock.Lock() - err = transferTx.SignEnvelope(*acc.address, 0, acc.signer) + acc.signTx(transferTx, 0) + if err != nil { + lg.log.Error().Err(err).Msgf("error signing transaction") + return + } + + lg.sendTx(transferTx) +} + +// TODO update this to include loadtype +func (lg *ContLoadGenerator) sendFavContractTx(workerID int) { + + blockRef, err := lg.blockRef.Get() + if err != nil { + lg.log.Error().Err(err).Msgf("error getting reference block") + return + } + + lg.log.Trace().Msgf("getting next available account") + + acc := <-lg.availableAccounts + defer func() { lg.availableAccounts <- acc }() + var txScript []byte + + switch lg.loadType { + case CompHeavyLoadType: + txScript = ComputationHeavyScript(*lg.favContractAddress) + case EventHeavyLoadType: + txScript = EventHeavyScript(*lg.favContractAddress) + case LedgerHeavyLoadType: + txScript = LedgerHeavyScript(*lg.favContractAddress) + } + + lg.log.Trace().Msgf("creating transaction") + tx := flowsdk.NewTransaction(). + SetReferenceBlockID(blockRef). + SetScript(txScript). + SetProposalKey(*acc.address, 0, acc.seqNumber). + SetPayer(*acc.address). + AddAuthorizer(*acc.address) + + lg.log.Trace().Msgf("signing transaction") + acc.signTx(tx, 0) if err != nil { - acc.signerLock.Unlock() lg.log.Error().Err(err).Msgf("error signing transaction") return } - acc.seqNumber++ - acc.signerLock.Unlock() + + lg.sendTx(tx) +} + +func (lg *ContLoadGenerator) sendTx(tx *flowsdk.Transaction) { + // TODO move this as a configurable parameter lg.log.Trace().Msgf("sending transaction") - err = lg.flowClient.SendTransaction(context.Background(), *transferTx) + err := lg.flowClient.SendTransaction(context.Background(), *tx) if err != nil { lg.log.Error().Err(err).Msgf("error sending transaction") return @@ -373,34 +502,34 @@ func (lg *ContLoadGenerator) sendTx(workerID int) { if lg.trackTxs { stopped := false wg := sync.WaitGroup{} - lg.txTracker.AddTx(transferTx.ID(), + lg.txTracker.AddTx(tx.ID(), nil, func(_ flowsdk.Identifier, res *flowsdk.TransactionResult) { - lg.log.Trace().Str("tx_id", transferTx.ID().String()).Msgf("finalized tx") + lg.log.Trace().Str("tx_id", tx.ID().String()).Msgf("finalized tx") if !stopped { stopped = true wg.Done() } }, // on finalized func(_ flowsdk.Identifier, _ *flowsdk.TransactionResult) { - lg.log.Trace().Str("tx_id", transferTx.ID().String()).Msgf("sealed tx") + lg.log.Trace().Str("tx_id", tx.ID().String()).Msgf("sealed tx") }, // on sealed func(_ flowsdk.Identifier) { - lg.log.Warn().Str("tx_id", transferTx.ID().String()).Msgf("tx expired") + lg.log.Warn().Str("tx_id", tx.ID().String()).Msgf("tx expired") if !stopped { stopped = true wg.Done() } }, // on expired func(_ flowsdk.Identifier) { - lg.log.Warn().Str("tx_id", transferTx.ID().String()).Msgf("tx timed out") + lg.log.Warn().Str("tx_id", tx.ID().String()).Msgf("tx timed out") if !stopped { stopped = true wg.Done() } }, // on timout func(_ flowsdk.Identifier, err error) { - lg.log.Error().Err(err).Str("tx_id", transferTx.ID().String()).Msgf("tx error") + lg.log.Error().Err(err).Str("tx_id", tx.ID().String()).Msgf("tx error") if !stopped { stopped = true wg.Done() diff --git a/integration/utils/scriptCreator.go b/integration/utils/scriptCreator.go deleted file mode 100644 index c71ccb25c3a..00000000000 --- a/integration/utils/scriptCreator.go +++ /dev/null @@ -1,92 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/onflow/cadence" - flowsdk "github.com/onflow/flow-go-sdk" -) - -const ( - fungibleTokenTransactionsBaseURL = "https://raw.githubusercontent.com/onflow/flow-ft/0e8024a483ce85c06eb165c2d4c9a5795ba167a1/src/transactions/" - transferTokens = "transfer_tokens.cdc" -) - -// ScriptCreator creates transaction scripts -type ScriptCreator struct { - tokenTransferTemplate []byte -} - -// NewScriptCreator returns a new instance of ScriptCreator -func NewScriptCreator() (*ScriptCreator, error) { - ttt, err := getTokenTransferTemplate() - if err != nil { - return nil, err - } - return &ScriptCreator{tokenTransferTemplate: ttt}, nil -} - -// TokenTransferScript returns a transaction script for transfering `amount` flow tokens to `toAddr` address -func (sc *ScriptCreator) TokenTransferScript(ftAddr, flowToken, toAddr *flowsdk.Address, amount float64) ([]byte, error) { - withFTAddr := strings.ReplaceAll(string(sc.tokenTransferTemplate), "0x02", "0x"+ftAddr.Hex()) - withFlowTokenAddr := strings.Replace(string(withFTAddr), "0x03", "0x"+flowToken.Hex(), 1) - withToAddr := strings.Replace(string(withFlowTokenAddr), "0x04", "0x"+toAddr.Hex(), 1) - withAmount := strings.Replace(string(withToAddr), fmt.Sprintf("%f", amount), "0.01", 1) - return []byte(withAmount), nil -} - -var addKeysScript = []byte(` -transaction(keys: [[UInt8]]) { - prepare(signer: AuthAccount) { - for key in keys { - signer.addPublicKey(key) - } - } -} -`) - -// AddKeysToAccountTransaction returns a transaction for adding keys to an already existing account -func (sc *ScriptCreator) AddKeysToAccountTransaction( - address flowsdk.Address, - keys []*flowsdk.AccountKey, -) (*flowsdk.Transaction, error) { - cadenceKeys := make([]cadence.Value, len(keys)) - - for i, key := range keys { - cadenceKeys[i] = bytesToCadenceArray(key.Encode()) - } - - cadenceKeysArray := cadence.NewArray(cadenceKeys) - - tx := flowsdk.NewTransaction(). - SetScript(addKeysScript). - AddAuthorizer(address) - - err := tx.AddArgument(cadenceKeysArray) - if err != nil { - return nil, err - } - - return tx, err -} - -func getTokenTransferTemplate() ([]byte, error) { - resp, err := http.Get(fungibleTokenTransactionsBaseURL + transferTokens) - if err != nil { - return nil, err - } - defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) -} - -func bytesToCadenceArray(l []byte) cadence.Array { - values := make([]cadence.Value, len(l)) - for i, b := range l { - values[i] = cadence.NewUInt8(b) - } - - return cadence.NewArray(values) -} diff --git a/integration/utils/scripts.go b/integration/utils/scripts.go new file mode 100644 index 00000000000..00453dc8053 --- /dev/null +++ b/integration/utils/scripts.go @@ -0,0 +1,236 @@ +package utils + +import ( + "encoding/hex" + "fmt" + "strings" + + "github.com/onflow/cadence" + flowsdk "github.com/onflow/flow-go-sdk" +) + +const tokenTransferTransactionTemplate = ` +import FungibleToken from 0x02 +import FlowToken from 0x03 + +transaction { + let sentVault: @FungibleToken.Vault + prepare(signer: AuthAccount) { + let storedVault = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Unable to borrow a reference to the sender's Vault") + self.sentVault <- storedVault.withdraw(amount: 10.0) + } + execute { + let recipient = getAccount(0x04) + let receiver = recipient + .getCapability(/public/flowTokenReceiver)! + .borrow<&FlowToken.Vault{FungibleToken.Receiver}>() + ?? panic("Unable to borrow receiver reference for recipient") + receiver.deposit(from: <-self.sentVault) + } +} +` + +// TokenTransferScript returns a transaction script for transfering `amount` flow tokens to `toAddr` address +func TokenTransferScript(ftAddr, flowToken, toAddr *flowsdk.Address, amount float64) ([]byte, error) { + withFTAddr := strings.ReplaceAll(string(tokenTransferTransactionTemplate), "0x02", "0x"+ftAddr.Hex()) + withFlowTokenAddr := strings.Replace(string(withFTAddr), "0x03", "0x"+flowToken.Hex(), 1) + withToAddr := strings.Replace(string(withFlowTokenAddr), "0x04", "0x"+toAddr.Hex(), 1) + withAmount := strings.Replace(string(withToAddr), fmt.Sprintf("%f", amount), "0.01", 1) + return []byte(withAmount), nil +} + +// AddKeyToAccountScript returns a transaction script to add keys to an account +func AddKeyToAccountScript() ([]byte, error) { + return []byte(` + transaction(keys: [[UInt8]]) { + prepare(signer: AuthAccount) { + for key in keys { + signer.addPublicKey(key) + } + } + } + `), nil +} + +const createAccountsScriptTemplate = ` +import FungibleToken from 0x%s +import FlowToken from 0x%s + +transaction(publicKey: [UInt8], count: Int, initialTokenAmount: UFix64) { + prepare(signer: AuthAccount) { + let vault = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault") + + var i = 0 + while i < count { + let account = AuthAccount(payer: signer) + account.addPublicKey(publicKey) + + let receiver = account.getCapability(/public/flowTokenReceiver)!.borrow<&{FungibleToken.Receiver}>() + ?? panic("Could not borrow receiver reference to the recipient's Vault") + + receiver.deposit(from: <-vault.withdraw(amount: initialTokenAmount)) + + i = i + 1 + } + } +} +` + +// CreateAccountsScript returns a transaction script for creating an account +func CreateAccountsScript(fungibleToken, flowToken flowsdk.Address) []byte { + return []byte(fmt.Sprintf(createAccountsScriptTemplate, fungibleToken, flowToken)) +} + +const myFavContract = ` +access(all) contract MyFavContract { + + init() { + self.itemCounter = UInt32(0) + self.items = [] + } + + // items + access(all) event NewItemAddedEvent(id: UInt32, metadata: {String: String}) + + access(self) var itemCounter: UInt32 + + access(all) struct Item { + + pub let itemID: UInt32 + + pub let metadata: {String: String} + + init(_ metadata: {String: String}) { + self.itemID = MyFavContract.itemCounter + self.metadata = metadata + + // inc the counter + MyFavContract.itemCounter = MyFavContract.itemCounter + UInt32(1) + + // emit event + emit NewItemAddedEvent(id: self.itemID, metadata: self.metadata) + } + } + + access(self) var items: [Item] + + access(all) fun AddItem(_ metadata: {String: String}){ + let item = Item(metadata) + self.items.append(item) + } + + access(all) fun AddManyRandomItems(_ n: Int){ + var i = 0 + while i < n { + MyFavContract.AddItem({"data": "ABCDEFGHIJKLMNOP"}) + i = i + 1 + } + } + + // heavy operations + // computation heavy function + access(all) fun ComputationHeavy() { + var s: Int256 = 1024102410241024 + var i = 0 + var a = Int256(7) + var b = Int256(5) + var c = Int256(2) + while i < 15000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + log(i) + } + + access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) + + // event heavy function + access(all) fun EventHeavy() { + var s: Int256 = 1024102410241024 + var i = 0 + + while i < 220 { + emit LargeEvent(value: s, str: s.toString(), list:[], dic:{s.toString():s.toString()}) + i = i + 1 + } + log(i) + } + + access(all) fun LedgerInteractionHeavy() { + MyFavContract.AddManyRandomItems(800) + } +} +` + +const deployingMyFavContractScriptTemplate = ` +transaction { + prepare(signer: AuthAccount) { + signer.setCode("%s".decodeHex()) + } +} +` + +func DeployingMyFavContractScript() []byte { + return []byte(fmt.Sprintf(deployingMyFavContractScriptTemplate, hex.EncodeToString([]byte(myFavContract)))) + +} + +const eventHeavyScriptTemplate = ` +import MyFavContract from 0x%s + +transaction { + prepare(acct: AuthAccount) {} + execute { + MyFavContract.EventHeavy() + } +} +` + +func EventHeavyScript(favContractAddress flowsdk.Address) []byte { + return []byte(fmt.Sprintf(eventHeavyScriptTemplate, favContractAddress)) +} + +const compHeavyScriptTemplate = ` +import MyFavContract from 0x%s + +transaction { + prepare(acct: AuthAccount) {} + execute { + MyFavContract.ComputationHeavy() + } +} +` + +func ComputationHeavyScript(favContractAddress flowsdk.Address) []byte { + return []byte(fmt.Sprintf(compHeavyScriptTemplate, favContractAddress)) +} + +const ledgerHeavyScriptTemplate = ` +import MyFavContract from 0x%s + +transaction { + prepare(acct: AuthAccount) {} + execute { + MyFavContract.LedgerInteractionHeavy() + } +} +` + +func LedgerHeavyScript(favContractAddress flowsdk.Address) []byte { + return []byte(fmt.Sprintf(ledgerHeavyScriptTemplate, favContractAddress)) +} + +func bytesToCadenceArray(l []byte) cadence.Array { + values := make([]cadence.Value, len(l)) + for i, b := range l { + values[i] = cadence.NewUInt8(b) + } + + return cadence.NewArray(values) +} + +// TODO add tx size heavy similar to add keys From 992d287600e0f70e7a50fcf693b7d9610d9b11df Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Wed, 28 Oct 2020 22:13:13 -0700 Subject: [PATCH 075/105] bug fixed: VoteAggregator now handles block equivocation without error --- .../voteaggregator/vote_aggregator.go | 27 +++++++--- .../voteaggregator/vote_aggregator_test.go | 50 ++++++++++++++++--- 2 files changed, 64 insertions(+), 13 deletions(-) diff --git a/consensus/hotstuff/voteaggregator/vote_aggregator.go b/consensus/hotstuff/voteaggregator/vote_aggregator.go index d8585fa0b69..1d626b7aaef 100644 --- a/consensus/hotstuff/voteaggregator/vote_aggregator.go +++ b/consensus/hotstuff/voteaggregator/vote_aggregator.go @@ -256,27 +256,42 @@ func (va *VoteAggregator) convertPendingVotes(pendingVotes []*model.Vote, block return nil } -// storeIncorporatedVote stores incorporated votes and accumulate stakes -// it drops invalid votes and duplicate votes +// storeIncorporatedVote stores incorporated votes and accumulates weight +// It drops invalid votes. +// +// Handling of DOUBLE VOTES (equivocation): +// Including double votes in building the QC for their respective block does _not_ +// reduce the security. The main deterrent is the slashing for equivocation, which only +// requires detection of double voting. Therefore, we take the simpler approach and do +// not discard votes from equivocating nodes. When encountering vote equivocation: +// * notify consumer +// * do not error, as a replica must handle this case is part of the "normal operation" +// * Provided the vote is valid by itself, treat it as valid. The validity of a vote +// needs to be objective and not depend on whether the replica has knowledge of a +// conflicting vote. +// +// Note that treating a valid vote as invalid would create the following additional edge case: +// * consider a primary that is proposing two conflicting blocks (block equivocation) +// * Assume the case where we would treat the proposer's vote for its second block +// (embedded in the block) as invalid +// * then, we would arrive at the conclusion that the second block itself is invalid +// This would violate objective validity of blocks. func (va *VoteAggregator) validateAndStoreIncorporatedVote(vote *model.Vote, block *model.Block) (bool, error) { // validate the vote voter, err := va.voteValidator.ValidateVote(vote, block) - if model.IsInvalidVoteError(err) { // does not report invalid vote as an error, notify consumers instead va.notifier.OnInvalidVoteDetected(vote) return false, nil } - if err != nil { return false, fmt.Errorf("could not validate incorporated vote: %w", err) } - // does not report double vote as an error, notify consumers instead + // check for double vote: firstVote, detected := va.detectDoubleVote(vote) if detected { va.notifier.OnDoubleVotingDetected(firstVote, vote) - return false, nil } // update existing voting status or create a new one diff --git a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go index 605a250cc6c..4276177e4b2 100644 --- a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go +++ b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/onflow/flow-go/crypto" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -18,7 +19,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/validator" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state" @@ -48,7 +48,7 @@ func (as *AggregatorSuite) SetupTest() { // seed the RNG rand.Seed(time.Now().UnixNano()) - // generate the validator set with qualified majority threshold of 5 + // generate the validator set with super-majority threshold of 5 as.participants = unittest.IdentityListFixture(7, unittest.WithRole(flow.RoleConsensus)) // create a mocked snapshot @@ -575,6 +575,45 @@ func (as *AggregatorSuite) TestDuplicateVotesBeforeBlock() { require.NoError(as.T(), err) } +// UNHAPPY PATH +// TestEquivocation_DoubleProposal tests that VoteAggregator handles a double +// proposal equivocation correctly. There are two ways, we can feed a double +// proposal into the VoteAggregator: +// * VoteAggregator.BuildQCOnReceivedBlock +// * VoteAggregator.StoreVoteAndBuildQC +// We test that both handle the double proposal gracefully (without error). +func (as *AggregatorSuite) TestEquivocation_DoubleProposal() { + testView := uint64(5) + bp1 := newMockBlock(as, testView, as.participants[0].NodeID) + bp2 := newMockBlock(as, testView, as.participants[0].NodeID) + + // Each of the blocks contain the proposer's signature. Hence, the proposer + // votes for both of its conflicting blocks => expect double vote notification + as.notifier.On("OnDoubleVotingDetected", bp1.ProposerVote(), bp2.ProposerVote()).Return() + + as.aggregator.StoreProposerVote(bp1.ProposerVote()) + qc, built, err := as.aggregator.BuildQCOnReceivedBlock(bp1.Block) + require.Nil(as.T(), qc) + require.False(as.T(), built) + require.NoError(as.T(), err) + + // Feed double proposal into VoteAggregator.BuildQCOnReceivedBlock + as.aggregator.StoreProposerVote(bp2.ProposerVote()) + qc, built, err = as.aggregator.BuildQCOnReceivedBlock(bp2.Block) + require.NoError(as.T(), err) + require.False(as.T(), built) + require.Nil(as.T(), qc) + + // Feed double proposal into VoteAggregator.StoreVoteAndBuildQC + vote2 := as.newMockVote(testView, bp2.Block.BlockID, as.participants[2].NodeID) + qc, built, err = as.aggregator.StoreVoteAndBuildQC(vote2, bp2.Block) + require.NoError(as.T(), err) + require.False(as.T(), built) + require.Nil(as.T(), qc) + + as.notifier.AssertExpectations(as.T()) +} + // ORDER // receive 5 votes, and the block, the QC should contain leader's vote, and the first 4 votes. func (as *AggregatorSuite) TestVoteOrderAfterBlock() { @@ -880,17 +919,14 @@ func (as *AggregatorSuite) TestSufficientRBSig() { } func newMockBlock(as *AggregatorSuite, view uint64, proposerID flow.Identifier) *model.Proposal { - blockHeader := unittest.BlockHeaderFixture() - blockHeader.View = view block := &model.Block{ View: view, - BlockID: blockHeader.ID(), + BlockID: unittest.IdentifierFixture(), ProposerID: proposerID, } - sig := crypto.Signature{} bp := &model.Proposal{ Block: block, - SigData: sig, + SigData: crypto.Signature{}, } as.RegisterProposal(bp) return bp From d1dc1ded99cd68909e4987538656f5cee5a26929 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 12:59:16 -0700 Subject: [PATCH 076/105] executed the last executed on startup block the BlockProcessable until reloading is done --- engine/execution/ingestion/engine.go | 221 ++++++++++++---------- engine/execution/ingestion/engine_test.go | 23 ++- utils/unittest/equals.go | 4 + 3 files changed, 143 insertions(+), 105 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 03e6cbc05db..f6930d9a1bc 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -186,11 +186,11 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } } -func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { +func (e *Engine) finalizedUnexecutedBlocks() (flow.Identifier, []flow.Identifier, error) { // get finalized height final, err := e.state.Final().Head() if err != nil { - return nil, fmt.Errorf("could not get finalized block: %w", err) + return flow.ZeroID, nil, fmt.Errorf("could not get finalized block: %w", err) } // find the first unexecuted and finalized block @@ -202,19 +202,21 @@ func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { // because the next loop will ensure it only iterate through finalized // block. lastExecuted := final.Height + lastExecutedID := final.ID() for ; lastExecuted > 0; lastExecuted-- { header, err := e.state.AtHeight(lastExecuted).Head() if err != nil { - return nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) + return flow.ZeroID, nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) } executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, header.ID()) if err != nil { - return nil, fmt.Errorf("could not check whether block is executed: %w", err) + return flow.ZeroID, nil, fmt.Errorf("could not check whether block is executed: %w", err) } if executed { + lastExecutedID = header.ID() break } } @@ -230,13 +232,13 @@ func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { for height := firstUnexecuted; height <= final.Height; height++ { header, err := e.state.AtHeight(height).Head() if err != nil { - return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) + return flow.ZeroID, nil, fmt.Errorf("could not get header at height: %v, %w", height, err) } unexecuted = append(unexecuted, header.ID()) } - return unexecuted, nil + return lastExecutedID, unexecuted, nil } func (e *Engine) pendingUnexecutedBlocks() ([]flow.Identifier, error) { @@ -261,58 +263,82 @@ func (e *Engine) pendingUnexecutedBlocks() ([]flow.Identifier, error) { return unexecuted, nil } -func (e *Engine) unexecutedBlocks() (finalized []flow.Identifier, pending []flow.Identifier, err error) { - finalized, err = e.finalizedUnexecutedBlocks() +func (e *Engine) unexecutedBlocks() (lastExecutedFinal flow.Identifier, finalized []flow.Identifier, pending []flow.Identifier, err error) { + lastExecutedFinal, finalized, err = e.finalizedUnexecutedBlocks() if err != nil { - return nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") + return flow.ZeroID, nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") } pending, err = e.pendingUnexecutedBlocks() if err != nil { - return nil, nil, fmt.Errorf("could not read pending unexecuted blocks") + return flow.ZeroID, nil, nil, fmt.Errorf("could not read pending unexecuted blocks") } - return finalized, pending, nil + return lastExecutedFinal, finalized, pending, nil } // on nodes startup, we need to load all the unexecuted blocks to the execution queues. // blocks have to be loaded in the way that the parent has been loaded before loading its children func (e *Engine) reloadUnexecutedBlocks() error { - finalized, pending, err := e.unexecutedBlocks() - if err != nil { - return fmt.Errorf("could not reload unexecuted blocks: %w", err) - } + // it's possible the BlockProcessable is called during the reloading, as the follower engine + // will receive blocks before ingestion engine is ready. + // The problem with that is, since the reloading hasn't finished yet, enqueuing the new block from + // the BlockProcessable callback will fail, because its parent block might have not been reloaded + // to the queues yet. + // So one solution here is to lock the execution queues during reloading, so that if BlockProcessable + // is called before reloading is finished, it will be blocked, which will avoid that edge case. + return e.mempool.Run(func( + blockByCollection *stdmap.BlockByCollectionBackdata, + executionQueues *stdmap.QueuesBackdata) error { + + lastExecutedFinal, finalized, pending, err := e.unexecutedBlocks() + if err != nil { + return fmt.Errorf("could not reload unexecuted blocks: %w", err) + } - unexecuted := append(finalized, pending...) + unexecuted := append(finalized, pending...) - log := e.log.With(). - Int("total", len(unexecuted)). - Int("finalized", len(finalized)). - Int("pending", len(pending)).Logger() + log := e.log.With(). + Int("total", len(unexecuted)). + Int("finalized", len(finalized)). + Int("pending", len(pending)).Logger() - log.Info().Msg("reloading unexecuted blocks") + log.Info().Msg("reloading unexecuted blocks") - for _, blockID := range unexecuted { - err := e.reloadBlock(blockID) + // saving an executed block is currently not transactional, so it's possible + // the block is marked as executed but the receipt is not saved during a crash + // in order to mitigate the problem, we always re-execute the last executed and finalized + // block + err = e.reloadBlock(blockByCollection, executionQueues, lastExecutedFinal) if err != nil { - return fmt.Errorf("could not reload block: %v, %w", blockID, err) + return fmt.Errorf("could not reload the last executed final block: %v, %w", lastExecutedFinal, err) } - e.log.Debug().Hex("block_id", blockID[:]).Msg("reloaded block") - } + for _, blockID := range unexecuted { + err := e.reloadBlock(blockByCollection, executionQueues, blockID) + if err != nil { + return fmt.Errorf("could not reload block: %v, %w", blockID, err) + } - log.Info().Msg("all unexecuted have been successfully reloaded") + e.log.Debug().Hex("block_id", blockID[:]).Msg("reloaded block") + } - return nil + log.Info().Msg("all unexecuted have been successfully reloaded") + + return nil + }) } -func (e *Engine) reloadBlock(blockID flow.Identifier) error { +func (e *Engine) reloadBlock( + blockByCollection *stdmap.BlockByCollectionBackdata, + executionQueues *stdmap.QueuesBackdata, + blockID flow.Identifier) error { block, err := e.blocks.ByID(blockID) if err != nil { return fmt.Errorf("could not get block by ID: %v %w", blockID, err) } - err = e.enqueueBlockAndCheckExecutable(block, false) + err = e.enqueueBlockAndCheckExecutable(blockByCollection, executionQueues, block, false) if err != nil { return fmt.Errorf("could not enqueue block on reloading: %w", err) @@ -362,7 +388,14 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { // unexecuted block e.metrics.StartBlockReceivedToExecuted(blockID) - err = e.enqueueBlockAndCheckExecutable(block, true) + // acquiring the lock so that there is only one process modifying the queue + err = e.mempool.Run(func( + blockByCollection *stdmap.BlockByCollectionBackdata, + executionQueues *stdmap.QueuesBackdata, + ) error { + return e.enqueueBlockAndCheckExecutable(blockByCollection, executionQueues, block, true) + }) + if err != nil { return fmt.Errorf("could not enqueue block: %w", err) } @@ -370,7 +403,11 @@ func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { return nil } -func (e *Engine) enqueueBlockAndCheckExecutable(block *flow.Block, checkStateSync bool) error { +func (e *Engine) enqueueBlockAndCheckExecutable( + blockByCollection *stdmap.BlockByCollectionBackdata, + executionQueues *stdmap.QueuesBackdata, + block *flow.Block, + checkStateSync bool) error { executableBlock := &entity.ExecutableBlock{ Block: block, CompleteCollections: make(map[flow.Identifier]*entity.CompleteCollection), @@ -378,77 +415,69 @@ func (e *Engine) enqueueBlockAndCheckExecutable(block *flow.Block, checkStateSyn blockID := executableBlock.ID() - // acquiring the lock so that there is only one process modifying the queue - return e.mempool.Run( - func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - // adding the block to the queue, - queue, added := enqueue(executableBlock, executionQueues) + // adding the block to the queue, + queue, added := enqueue(executableBlock, executionQueues) - // if it's not added, it means the block is not a new block, it already - // exists in the queue, then bail - if !added { - log.Debug().Msg("block already exists in the execution queue") - return nil - } + // if it's not added, it means the block is not a new block, it already + // exists in the queue, then bail + if !added { + log.Debug().Msg("block already exists in the execution queue") + return nil + } - firstUnexecutedHeight := queue.Head.Item.Height() - if checkStateSync { - // whenever the queue grows, we need to check whether the state sync should be - // triggered. - e.unit.Launch(func() { - e.checkStateSyncStart(firstUnexecutedHeight) - }) - } + firstUnexecutedHeight := queue.Head.Item.Height() + if checkStateSync { + // whenever the queue grows, we need to check whether the state sync should be + // triggered. + e.unit.Launch(func() { + e.checkStateSyncStart(firstUnexecutedHeight) + }) + } - // check if a block is executable. - // a block is executable if the following conditions are all true - // 1) the parent state commitment is ready - // 2) the collections for the block payload are ready - // 3) the child block is ready for querying the randomness - - // check if the block's parent has been executed. (we can't execute the block if the parent has - // not been executed yet) - // check if there is a statecommitment for the parent block - parentCommitment, err := e.execState.StateCommitmentByBlockID(e.unit.Ctx(), block.Header.ParentID) - - // if we found the statecommitment for the parent block, then add it to the executable block. - if err == nil { - executableBlock.StartState = parentCommitment - } else if errors.Is(err, storage.ErrNotFound) { - // the parent block is an unexecuted block. - // if the queue only has one block, and its parent doesn't - // exist in the queue, then we need to load the block from the storage. - _, ok := queue.Nodes[blockID] - if !ok { - log.Error().Msgf("an unexecuted parent block is missing in the queue") - } - } else { - // if there is exception, then crash - log.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") - } + // check if a block is executable. + // a block is executable if the following conditions are all true + // 1) the parent state commitment is ready + // 2) the collections for the block payload are ready + // 3) the child block is ready for querying the randomness + + // check if the block's parent has been executed. (we can't execute the block if the parent has + // not been executed yet) + // check if there is a statecommitment for the parent block + parentCommitment, err := e.execState.StateCommitmentByBlockID(e.unit.Ctx(), block.Header.ParentID) + + // if we found the statecommitment for the parent block, then add it to the executable block. + if err == nil { + executableBlock.StartState = parentCommitment + } else if errors.Is(err, storage.ErrNotFound) { + // the parent block is an unexecuted block. + // if the queue only has one block, and its parent doesn't + // exist in the queue, then we need to load the block from the storage. + _, ok := queue.Nodes[blockID] + if !ok { + log.Error().Msgf("an unexecuted parent block is missing in the queue") + } + } else { + // if there is exception, then crash + log.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") + } - // check if we have all the collections for the block, and request them if there is missing. - err = e.matchOrRequestCollections(executableBlock, blockByCollection) - if err != nil { - return fmt.Errorf("cannot send collection requests: %w", err) - } + // check if we have all the collections for the block, and request them if there is missing. + err = e.matchOrRequestCollections(executableBlock, blockByCollection) + if err != nil { + return fmt.Errorf("cannot send collection requests: %w", err) + } - // execute the block if the block is ready to be executed - completed := e.executeBlockIfComplete(executableBlock) + // execute the block if the block is ready to be executed + completed := e.executeBlockIfComplete(executableBlock) - log.Info(). - // if the execution is halt, but the queue keeps growing, we could check which block - // hasn't been executed. - Uint64("first_unexecuted_in_queue", firstUnexecutedHeight). - Bool("completed", completed). - Msg("block is enqueued") + log.Info(). + // if the execution is halt, but the queue keeps growing, we could check which block + // hasn't been executed. + Uint64("first_unexecuted_in_queue", firstUnexecutedHeight). + Bool("completed", completed). + Msg("block is enqueued") - return nil - }, - ) + return nil } // executeBlock will execute the block. diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index c8ef0bb2f81..0f7a9f7b7fd 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -647,9 +647,10 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { es := mocks.NewES(seal) engine := newIngestionEngine(t, ps, es) - finalized, pending, err := engine.unexecutedBlocks() + lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) + unittest.IDEqual(t, genesis.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{}, pending) }) @@ -672,9 +673,10 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { es := mocks.NewES(seal) engine := newIngestionEngine(t, ps, es) - finalized, pending, err := engine.unexecutedBlocks() + lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) + unittest.IDEqual(t, genesis.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockA.ID(), blockB.ID(), blockC.ID(), blockD.ID()}, pending) }) @@ -700,9 +702,10 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockA) mocks.ExecuteBlock(t, es, blockB) - finalized, pending, err := engine.unexecutedBlocks() + lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) + unittest.IDEqual(t, genesis.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockC.ID(), blockD.ID()}, pending) }) @@ -722,8 +725,6 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Extend(blockC)) require.NoError(t, ps.Mutate().Extend(blockD)) - require.NoError(t, ps.Mutate().Finalize(blockA.ID())) - require.NoError(t, ps.Mutate().Finalize(blockB.ID())) require.NoError(t, ps.Mutate().Finalize(blockC.ID())) es := mocks.NewES(seal) @@ -733,9 +734,10 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockB) mocks.ExecuteBlock(t, es, blockC) - finalized, pending, err := engine.unexecutedBlocks() + lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) + unittest.IDEqual(t, blockC.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) }) @@ -764,9 +766,10 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockB) mocks.ExecuteBlock(t, es, blockC) - finalized, pending, err := engine.unexecutedBlocks() + lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) + unittest.IDEqual(t, blockC.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) }) @@ -795,9 +798,10 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockC) mocks.ExecuteBlock(t, es, blockD) - finalized, pending, err := engine.unexecutedBlocks() + lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) + unittest.IDEqual(t, blockA.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{}, pending) }) @@ -853,9 +857,10 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockG) mocks.ExecuteBlock(t, es, blockJ) - finalized, pending, err := engine.unexecutedBlocks() + lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) + unittest.IDEqual(t, blockC.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{ blockI.ID(), // I is still pending, and unexecuted diff --git a/utils/unittest/equals.go b/utils/unittest/equals.go index 78eb707acc7..dc02844e891 100644 --- a/utils/unittest/equals.go +++ b/utils/unittest/equals.go @@ -16,6 +16,10 @@ func toHex(ids []flow.Identifier) []string { return hex } +func IDEqual(t *testing.T, id1, id2 flow.Identifier) { + require.Equal(t, id1.String(), id2.String()) +} + func IDsEqual(t *testing.T, id1, id2 []flow.Identifier) { require.Equal(t, toHex(id1), toHex(id2)) } From 4c3b1fe87c686e9f965a2f96a434123e84ee3637 Mon Sep 17 00:00:00 2001 From: Alexander Hentschel Date: Thu, 29 Oct 2020 14:19:18 -0700 Subject: [PATCH 077/105] extending logging --- engine/consensus/matching/engine.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 7a23838baa9..d0afee6c8ad 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -215,7 +215,6 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return engine.NewInvalidInputErrorf("execution receipt without FinalStateCommit: %x", receipt.ID()) } log = log.With().Hex("final_state", resultFinalState).Logger() - log.Info().Msg("execution receipt received") // CAUTION INCOMPLETE // For many other messages, we check that the message's origin (as established by the @@ -239,6 +238,12 @@ func (e *Engine) onReceipt(originID flow.Identifier, receipt *flow.ExecutionRece return nil } + log = log.With(). + Uint64("block_view", head.View). + Uint64("block_height", head.Height). + Logger() + log.Info().Msg("execution receipt received") + // if Execution Receipt is for block whose height is lower or equal to already sealed height // => drop Receipt sealed, err := e.state.Sealed().Head() From 0653782bed73fadbadc4821484d8aa74058f81c3 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 14:30:28 -0700 Subject: [PATCH 078/105] skip the root block --- engine/execution/ingestion/engine.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index f6930d9a1bc..daf4f10a016 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -306,12 +306,24 @@ func (e *Engine) reloadUnexecutedBlocks() error { log.Info().Msg("reloading unexecuted blocks") // saving an executed block is currently not transactional, so it's possible - // the block is marked as executed but the receipt is not saved during a crash - // in order to mitigate the problem, we always re-execute the last executed and finalized + // the block is marked as executed but the receipt might not be saved during a crash. + // in order to mitigate this problem, we always re-execute the last executed and finalized // block - err = e.reloadBlock(blockByCollection, executionQueues, lastExecutedFinal) + // there is an exception, if the last executed final is a root block, then don't execute it, + // because the root has already been executed during bootstrapping phase. And re-executing + // a root block will fail, because the root block doesn't have a parent block, and could not + // get the result of it + // TODO: remove this, when saving a executed block is transactional + last, err := e.state.AtBlockID(lastExecutedFinal).Head() if err != nil { - return fmt.Errorf("could not reload the last executed final block: %v, %w", lastExecutedFinal, err) + return fmt.Errorf("could not get last executed final by ID: %w") + } + + if last.ParentID != flow.ZeroID { + err = e.reloadBlock(blockByCollection, executionQueues, lastExecutedFinal) + if err != nil { + return fmt.Errorf("could not reload the last executed final block: %v, %w", lastExecutedFinal, err) + } } for _, blockID := range unexecuted { @@ -559,7 +571,7 @@ func (e *Engine) onBlockExecuted(executed *entity.ExecutableBlock, finalState fl // when the block no longer exists in the queue, it means there was a race condition that // two onBlockExecuted was called for the same block, and one process has already removed the // block from the queue, so we will print an error here - return fmt.Errorf("block has been executed already, no long exists in the queue") + return fmt.Errorf("block has been executed already, no longer exists in the queue") } // dismount the executed block and all its children From 72ab7fc1633541c5c0df92efdb8a8fb721998d88 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 14:38:58 -0700 Subject: [PATCH 079/105] fix linting --- engine/execution/ingestion/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index daf4f10a016..693039fdced 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -316,7 +316,7 @@ func (e *Engine) reloadUnexecutedBlocks() error { // TODO: remove this, when saving a executed block is transactional last, err := e.state.AtBlockID(lastExecutedFinal).Head() if err != nil { - return fmt.Errorf("could not get last executed final by ID: %w") + return fmt.Errorf("could not get last executed final by ID: %w", err) } if last.ParentID != flow.ZeroID { From c7ed0cfa26cdc73e5c2dc2e28c9d18860ca186e3 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 15:15:47 -0700 Subject: [PATCH 080/105] disable flakey tests --- network/gossip/libp2p/keyTranslator_test.go | 4 +++- network/gossip/libp2p/libp2pNode_test.go | 4 +++- network/gossip/libp2p/libp2pUtils_test.go | 4 +++- network/gossip/libp2p/peerManager_test.go | 4 +++- network/gossip/libp2p/pubsub_test.go | 4 +++- network/gossip/libp2p/sporking_test.go | 5 +++-- 6 files changed, 18 insertions(+), 7 deletions(-) diff --git a/network/gossip/libp2p/keyTranslator_test.go b/network/gossip/libp2p/keyTranslator_test.go index dbcd7d4509a..d292d289a4d 100644 --- a/network/gossip/libp2p/keyTranslator_test.go +++ b/network/gossip/libp2p/keyTranslator_test.go @@ -23,7 +23,9 @@ type KeyTranslatorTestSuite struct { // TestKeyTranslatorTestSuite runs all the test methods in this test suite func TestKeyTranslatorTestSuite(t *testing.T) { - suite.Run(t, new(KeyTranslatorTestSuite)) + t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") + _ = KeyTranslatorTestSuite{} + // suite.Run(t, new(KeyTranslatorTestSuite)) } // TestPrivateKeyConversion tests that Private keys are successfully converted from Flow to LibP2P representation diff --git a/network/gossip/libp2p/libp2pNode_test.go b/network/gossip/libp2p/libp2pNode_test.go index 05fd334417c..00aa6c7e7c5 100644 --- a/network/gossip/libp2p/libp2pNode_test.go +++ b/network/gossip/libp2p/libp2pNode_test.go @@ -44,7 +44,9 @@ type LibP2PNodeTestSuite struct { // TestLibP2PNodesTestSuite runs all the test methods in this test suit func TestLibP2PNodesTestSuite(t *testing.T) { - suite.Run(t, new(LibP2PNodeTestSuite)) + t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") + _ = LibP2PNodeTestSuite{} + // suite.Run(t, new(LibP2PNodeTestSuite)) } // SetupTests initiates the test setups prior to each test diff --git a/network/gossip/libp2p/libp2pUtils_test.go b/network/gossip/libp2p/libp2pUtils_test.go index 23c1592c7ec..7ea110e71bc 100644 --- a/network/gossip/libp2p/libp2pUtils_test.go +++ b/network/gossip/libp2p/libp2pUtils_test.go @@ -20,7 +20,9 @@ type LibP2PUtilsTestSuite struct { } func TestLibP2PUtilsTestSuite(t *testing.T) { - suite.Run(t, new(LibP2PUtilsTestSuite)) + t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") + _ = LibP2PUtilsTestSuite{} + // suite.Run(t, new(LibP2PUtilsTestSuite)) } // TestPeerInfoFromID tests that PeerInfoFromID converts a flow.Identity to peer.AddrInfo correctly diff --git a/network/gossip/libp2p/peerManager_test.go b/network/gossip/libp2p/peerManager_test.go index 26d3a9106f0..451df354753 100644 --- a/network/gossip/libp2p/peerManager_test.go +++ b/network/gossip/libp2p/peerManager_test.go @@ -26,7 +26,9 @@ type PeerManagerTestSuite struct { } func TestPeerManagerTestSuite(t *testing.T) { - suite.Run(t, new(PeerManagerTestSuite)) + t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") + _ = PeerManagerTestSuite{} + // suite.Run(t, new(PeerManagerTestSuite)) } func (ts *PeerManagerTestSuite) SetupTest() { diff --git a/network/gossip/libp2p/pubsub_test.go b/network/gossip/libp2p/pubsub_test.go index 14860c01b77..9933e6f7e3a 100644 --- a/network/gossip/libp2p/pubsub_test.go +++ b/network/gossip/libp2p/pubsub_test.go @@ -30,7 +30,9 @@ type PubSubTestSuite struct { // TestLibP2PNodesTestSuite runs all the test methods in this test suit func TestPubSubTestSuite(t *testing.T) { - suite.Run(t, new(PubSubTestSuite)) + t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") + _ = PubSubTestSuite{} + // suite.Run(t, new(PubSubTestSuite)) } // SetupTests initiates the test setups prior to each test diff --git a/network/gossip/libp2p/sporking_test.go b/network/gossip/libp2p/sporking_test.go index 1f0b1c613c6..54f46e8e343 100644 --- a/network/gossip/libp2p/sporking_test.go +++ b/network/gossip/libp2p/sporking_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/utils/unittest" ) @@ -23,7 +22,9 @@ type SporkingTestSuite struct { } func TestHardSpooningTestSuite(t *testing.T) { - suite.Run(t, new(SporkingTestSuite)) + t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") + _ = SporkingTestSuite{} + // suite.Run(t, new(SporkingTestSuite)) } // TestNetworkKeyChangedAfterHardSpoon tests that a node from the old chain cannot talk to a node in the new chain if it's From 210e56dafc4b6ed016042527edf285282febbcc0 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 15:35:12 -0700 Subject: [PATCH 081/105] fix lint --- network/gossip/libp2p/keyTranslator_test.go | 3 +-- network/gossip/libp2p/libp2pNode_test.go | 3 +-- network/gossip/libp2p/libp2pUtils_test.go | 3 +-- network/gossip/libp2p/peerManager_test.go | 3 +-- network/gossip/libp2p/pubsub_test.go | 3 +-- network/gossip/libp2p/sporking_test.go | 4 ++-- 6 files changed, 7 insertions(+), 12 deletions(-) diff --git a/network/gossip/libp2p/keyTranslator_test.go b/network/gossip/libp2p/keyTranslator_test.go index d292d289a4d..76d11ae3938 100644 --- a/network/gossip/libp2p/keyTranslator_test.go +++ b/network/gossip/libp2p/keyTranslator_test.go @@ -24,8 +24,7 @@ type KeyTranslatorTestSuite struct { // TestKeyTranslatorTestSuite runs all the test methods in this test suite func TestKeyTranslatorTestSuite(t *testing.T) { t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") - _ = KeyTranslatorTestSuite{} - // suite.Run(t, new(KeyTranslatorTestSuite)) + suite.Run(t, new(KeyTranslatorTestSuite)) } // TestPrivateKeyConversion tests that Private keys are successfully converted from Flow to LibP2P representation diff --git a/network/gossip/libp2p/libp2pNode_test.go b/network/gossip/libp2p/libp2pNode_test.go index 00aa6c7e7c5..c0ed3e83eeb 100644 --- a/network/gossip/libp2p/libp2pNode_test.go +++ b/network/gossip/libp2p/libp2pNode_test.go @@ -45,8 +45,7 @@ type LibP2PNodeTestSuite struct { // TestLibP2PNodesTestSuite runs all the test methods in this test suit func TestLibP2PNodesTestSuite(t *testing.T) { t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") - _ = LibP2PNodeTestSuite{} - // suite.Run(t, new(LibP2PNodeTestSuite)) + suite.Run(t, new(LibP2PNodeTestSuite)) } // SetupTests initiates the test setups prior to each test diff --git a/network/gossip/libp2p/libp2pUtils_test.go b/network/gossip/libp2p/libp2pUtils_test.go index 7ea110e71bc..4c0b72dcfb7 100644 --- a/network/gossip/libp2p/libp2pUtils_test.go +++ b/network/gossip/libp2p/libp2pUtils_test.go @@ -21,8 +21,7 @@ type LibP2PUtilsTestSuite struct { func TestLibP2PUtilsTestSuite(t *testing.T) { t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") - _ = LibP2PUtilsTestSuite{} - // suite.Run(t, new(LibP2PUtilsTestSuite)) + suite.Run(t, new(LibP2PUtilsTestSuite)) } // TestPeerInfoFromID tests that PeerInfoFromID converts a flow.Identity to peer.AddrInfo correctly diff --git a/network/gossip/libp2p/peerManager_test.go b/network/gossip/libp2p/peerManager_test.go index 451df354753..afe8871eaf0 100644 --- a/network/gossip/libp2p/peerManager_test.go +++ b/network/gossip/libp2p/peerManager_test.go @@ -27,8 +27,7 @@ type PeerManagerTestSuite struct { func TestPeerManagerTestSuite(t *testing.T) { t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") - _ = PeerManagerTestSuite{} - // suite.Run(t, new(PeerManagerTestSuite)) + suite.Run(t, new(PeerManagerTestSuite)) } func (ts *PeerManagerTestSuite) SetupTest() { diff --git a/network/gossip/libp2p/pubsub_test.go b/network/gossip/libp2p/pubsub_test.go index 9933e6f7e3a..9b334d09701 100644 --- a/network/gossip/libp2p/pubsub_test.go +++ b/network/gossip/libp2p/pubsub_test.go @@ -31,8 +31,7 @@ type PubSubTestSuite struct { // TestLibP2PNodesTestSuite runs all the test methods in this test suit func TestPubSubTestSuite(t *testing.T) { t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") - _ = PubSubTestSuite{} - // suite.Run(t, new(PubSubTestSuite)) + suite.Run(t, new(PubSubTestSuite)) } // SetupTests initiates the test setups prior to each test diff --git a/network/gossip/libp2p/sporking_test.go b/network/gossip/libp2p/sporking_test.go index 54f46e8e343..ac206ca63e3 100644 --- a/network/gossip/libp2p/sporking_test.go +++ b/network/gossip/libp2p/sporking_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/utils/unittest" ) @@ -23,8 +24,7 @@ type SporkingTestSuite struct { func TestHardSpooningTestSuite(t *testing.T) { t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") - _ = SporkingTestSuite{} - // suite.Run(t, new(SporkingTestSuite)) + suite.Run(t, new(SporkingTestSuite)) } // TestNetworkKeyChangedAfterHardSpoon tests that a node from the old chain cannot talk to a node in the new chain if it's From e9eb8b78629cea64381f756b9b1759fa5a23d99e Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 15:39:21 -0700 Subject: [PATCH 082/105] fix lint --- network/gossip/libp2p/keyTranslator_test.go | 1 - network/gossip/libp2p/libp2pNode_test.go | 1 - network/gossip/libp2p/libp2pUtils_test.go | 1 - network/gossip/libp2p/pubsub_test.go | 1 - network/gossip/libp2p/sporking_test.go | 1 - 5 files changed, 5 deletions(-) diff --git a/network/gossip/libp2p/keyTranslator_test.go b/network/gossip/libp2p/keyTranslator_test.go index 76d11ae3938..dbcd7d4509a 100644 --- a/network/gossip/libp2p/keyTranslator_test.go +++ b/network/gossip/libp2p/keyTranslator_test.go @@ -23,7 +23,6 @@ type KeyTranslatorTestSuite struct { // TestKeyTranslatorTestSuite runs all the test methods in this test suite func TestKeyTranslatorTestSuite(t *testing.T) { - t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") suite.Run(t, new(KeyTranslatorTestSuite)) } diff --git a/network/gossip/libp2p/libp2pNode_test.go b/network/gossip/libp2p/libp2pNode_test.go index c0ed3e83eeb..05fd334417c 100644 --- a/network/gossip/libp2p/libp2pNode_test.go +++ b/network/gossip/libp2p/libp2pNode_test.go @@ -44,7 +44,6 @@ type LibP2PNodeTestSuite struct { // TestLibP2PNodesTestSuite runs all the test methods in this test suit func TestLibP2PNodesTestSuite(t *testing.T) { - t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") suite.Run(t, new(LibP2PNodeTestSuite)) } diff --git a/network/gossip/libp2p/libp2pUtils_test.go b/network/gossip/libp2p/libp2pUtils_test.go index 4c0b72dcfb7..23c1592c7ec 100644 --- a/network/gossip/libp2p/libp2pUtils_test.go +++ b/network/gossip/libp2p/libp2pUtils_test.go @@ -20,7 +20,6 @@ type LibP2PUtilsTestSuite struct { } func TestLibP2PUtilsTestSuite(t *testing.T) { - t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") suite.Run(t, new(LibP2PUtilsTestSuite)) } diff --git a/network/gossip/libp2p/pubsub_test.go b/network/gossip/libp2p/pubsub_test.go index 9b334d09701..14860c01b77 100644 --- a/network/gossip/libp2p/pubsub_test.go +++ b/network/gossip/libp2p/pubsub_test.go @@ -30,7 +30,6 @@ type PubSubTestSuite struct { // TestLibP2PNodesTestSuite runs all the test methods in this test suit func TestPubSubTestSuite(t *testing.T) { - t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") suite.Run(t, new(PubSubTestSuite)) } diff --git a/network/gossip/libp2p/sporking_test.go b/network/gossip/libp2p/sporking_test.go index ac206ca63e3..1f0b1c613c6 100644 --- a/network/gossip/libp2p/sporking_test.go +++ b/network/gossip/libp2p/sporking_test.go @@ -23,7 +23,6 @@ type SporkingTestSuite struct { } func TestHardSpooningTestSuite(t *testing.T) { - t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") suite.Run(t, new(SporkingTestSuite)) } From bc9910a3dcb21f4656c7d5c431d1f73ca8a35e32 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 15:56:25 -0700 Subject: [PATCH 083/105] reload the last executed block instead of the last finalized executed block --- engine/execution/ingestion/engine.go | 70 +++++++++++++---------- engine/execution/ingestion/engine_test.go | 21 +++---- 2 files changed, 46 insertions(+), 45 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 693039fdced..959dc5c6e89 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -186,11 +186,11 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } } -func (e *Engine) finalizedUnexecutedBlocks() (flow.Identifier, []flow.Identifier, error) { +func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { // get finalized height final, err := e.state.Final().Head() if err != nil { - return flow.ZeroID, nil, fmt.Errorf("could not get finalized block: %w", err) + return nil, fmt.Errorf("could not get finalized block: %w", err) } // find the first unexecuted and finalized block @@ -202,21 +202,19 @@ func (e *Engine) finalizedUnexecutedBlocks() (flow.Identifier, []flow.Identifier // because the next loop will ensure it only iterate through finalized // block. lastExecuted := final.Height - lastExecutedID := final.ID() for ; lastExecuted > 0; lastExecuted-- { header, err := e.state.AtHeight(lastExecuted).Head() if err != nil { - return flow.ZeroID, nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) + return nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) } executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, header.ID()) if err != nil { - return flow.ZeroID, nil, fmt.Errorf("could not check whether block is executed: %w", err) + return nil, fmt.Errorf("could not check whether block is executed: %w", err) } if executed { - lastExecutedID = header.ID() break } } @@ -232,13 +230,13 @@ func (e *Engine) finalizedUnexecutedBlocks() (flow.Identifier, []flow.Identifier for height := firstUnexecuted; height <= final.Height; height++ { header, err := e.state.AtHeight(height).Head() if err != nil { - return flow.ZeroID, nil, fmt.Errorf("could not get header at height: %v, %w", height, err) + return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) } unexecuted = append(unexecuted, header.ID()) } - return lastExecutedID, unexecuted, nil + return unexecuted, nil } func (e *Engine) pendingUnexecutedBlocks() ([]flow.Identifier, error) { @@ -263,18 +261,18 @@ func (e *Engine) pendingUnexecutedBlocks() ([]flow.Identifier, error) { return unexecuted, nil } -func (e *Engine) unexecutedBlocks() (lastExecutedFinal flow.Identifier, finalized []flow.Identifier, pending []flow.Identifier, err error) { - lastExecutedFinal, finalized, err = e.finalizedUnexecutedBlocks() +func (e *Engine) unexecutedBlocks() (finalized []flow.Identifier, pending []flow.Identifier, err error) { + finalized, err = e.finalizedUnexecutedBlocks() if err != nil { - return flow.ZeroID, nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") + return nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") } pending, err = e.pendingUnexecutedBlocks() if err != nil { - return flow.ZeroID, nil, nil, fmt.Errorf("could not read pending unexecuted blocks") + return nil, nil, fmt.Errorf("could not read pending unexecuted blocks") } - return lastExecutedFinal, finalized, pending, nil + return finalized, pending, nil } // on nodes startup, we need to load all the unexecuted blocks to the execution queues. @@ -291,20 +289,6 @@ func (e *Engine) reloadUnexecutedBlocks() error { blockByCollection *stdmap.BlockByCollectionBackdata, executionQueues *stdmap.QueuesBackdata) error { - lastExecutedFinal, finalized, pending, err := e.unexecutedBlocks() - if err != nil { - return fmt.Errorf("could not reload unexecuted blocks: %w", err) - } - - unexecuted := append(finalized, pending...) - - log := e.log.With(). - Int("total", len(unexecuted)). - Int("finalized", len(finalized)). - Int("pending", len(pending)).Logger() - - log.Info().Msg("reloading unexecuted blocks") - // saving an executed block is currently not transactional, so it's possible // the block is marked as executed but the receipt might not be saved during a crash. // in order to mitigate this problem, we always re-execute the last executed and finalized @@ -314,18 +298,42 @@ func (e *Engine) reloadUnexecutedBlocks() error { // a root block will fail, because the root block doesn't have a parent block, and could not // get the result of it // TODO: remove this, when saving a executed block is transactional - last, err := e.state.AtBlockID(lastExecutedFinal).Head() + lastExecutedHeight, lastExecutedID, err := e.execState.GetHighestExecutedBlockID(e.unit.Ctx()) + if err != nil { + return fmt.Errorf("could not get last executed: %w", err) + } + + last, err := e.state.AtBlockID(lastExecutedID).Head() if err != nil { return fmt.Errorf("could not get last executed final by ID: %w", err) } - if last.ParentID != flow.ZeroID { - err = e.reloadBlock(blockByCollection, executionQueues, lastExecutedFinal) + // don't reload root block + isRoot := last.ParentID == flow.ZeroID + if !isRoot { + err = e.reloadBlock(blockByCollection, executionQueues, lastExecutedID) if err != nil { - return fmt.Errorf("could not reload the last executed final block: %v, %w", lastExecutedFinal, err) + return fmt.Errorf("could not reload the last executed final block: %v, %w", lastExecutedID, err) } } + finalized, pending, err := e.unexecutedBlocks() + if err != nil { + return fmt.Errorf("could not reload unexecuted blocks: %w", err) + } + + unexecuted := append(finalized, pending...) + + log := e.log.With(). + Int("total", len(unexecuted)). + Int("finalized", len(finalized)). + Int("pending", len(pending)). + Uint64("last_executed", lastExecutedHeight). + Hex("last_executed_id", lastExecutedID[:]). + Logger() + + log.Info().Msg("reloading unexecuted blocks") + for _, blockID := range unexecuted { err := e.reloadBlock(blockByCollection, executionQueues, blockID) if err != nil { diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 0f7a9f7b7fd..053e7dd633c 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -647,10 +647,9 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { es := mocks.NewES(seal) engine := newIngestionEngine(t, ps, es) - lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() + finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) - unittest.IDEqual(t, genesis.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{}, pending) }) @@ -673,10 +672,9 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { es := mocks.NewES(seal) engine := newIngestionEngine(t, ps, es) - lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() + finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) - unittest.IDEqual(t, genesis.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockA.ID(), blockB.ID(), blockC.ID(), blockD.ID()}, pending) }) @@ -702,10 +700,9 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockA) mocks.ExecuteBlock(t, es, blockB) - lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() + finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) - unittest.IDEqual(t, genesis.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockC.ID(), blockD.ID()}, pending) }) @@ -734,10 +731,9 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockB) mocks.ExecuteBlock(t, es, blockC) - lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() + finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) - unittest.IDEqual(t, blockC.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) }) @@ -766,10 +762,9 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockB) mocks.ExecuteBlock(t, es, blockC) - lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() + finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) - unittest.IDEqual(t, blockC.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) }) @@ -798,10 +793,9 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockC) mocks.ExecuteBlock(t, es, blockD) - lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() + finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) - unittest.IDEqual(t, blockA.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{}, pending) }) @@ -857,10 +851,9 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { mocks.ExecuteBlock(t, es, blockG) mocks.ExecuteBlock(t, es, blockJ) - lastExecutedFinal, finalized, pending, err := engine.unexecutedBlocks() + finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) - unittest.IDEqual(t, blockC.ID(), lastExecutedFinal) unittest.IDsEqual(t, []flow.Identifier{}, finalized) unittest.IDsEqual(t, []flow.Identifier{ blockI.ID(), // I is still pending, and unexecuted From 70917c70bc5616aee3d5260cccaf91cd74ec1118 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 16:12:59 -0700 Subject: [PATCH 084/105] refactor mocks --- engine/execution/ingestion/engine_test.go | 66 +++++++++++------------ utils/unittest/mocks/execution_state.go | 14 ++--- utils/unittest/mocks/protocol_state.go | 30 +++++------ 3 files changed, 55 insertions(+), 55 deletions(-) diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 053e7dd633c..352bad000c1 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -567,7 +567,7 @@ func TestShouldTriggerStateSync(t *testing.T) { require.True(t, shouldTriggerStateSync(20, 29, 10)) } -func newIngestionEngine(t *testing.T, ps *mocks.PS, es *mocks.ES) *Engine { +func newIngestionEngine(t *testing.T, ps *mocks.ProtocolState, es *mocks.ExecutionState) *Engine { log := unittest.Logger() metrics := metrics.NewNoopCollector() tracer, err := trace.NewTracer(log, "test") @@ -635,7 +635,7 @@ func logChain(chain []*flow.Block) { func TestLoadingUnexecutedBlocks(t *testing.T) { t.Run("only genesis", func(t *testing.T) { - ps := mocks.NewPS() + ps := mocks.NewProtocolState() chain, result, seal := unittest.ChainFixture(0) genesis := chain[0] @@ -644,7 +644,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Bootstrap(genesis, result, seal)) - es := mocks.NewES(seal) + es := mocks.NewExecutionState(seal) engine := newIngestionEngine(t, ps, es) finalized, pending, err := engine.unexecutedBlocks() @@ -655,7 +655,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { }) t.Run("no finalized, nor pending unexected", func(t *testing.T) { - ps := mocks.NewPS() + ps := mocks.NewProtocolState() chain, result, seal := unittest.ChainFixture(4) genesis, blockA, blockB, blockC, blockD := @@ -669,7 +669,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Extend(blockC)) require.NoError(t, ps.Mutate().Extend(blockD)) - es := mocks.NewES(seal) + es := mocks.NewExecutionState(seal) engine := newIngestionEngine(t, ps, es) finalized, pending, err := engine.unexecutedBlocks() @@ -680,7 +680,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { }) t.Run("no finalized, some pending executed", func(t *testing.T) { - ps := mocks.NewPS() + ps := mocks.NewProtocolState() chain, result, seal := unittest.ChainFixture(4) genesis, blockA, blockB, blockC, blockD := @@ -694,11 +694,11 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Extend(blockC)) require.NoError(t, ps.Mutate().Extend(blockD)) - es := mocks.NewES(seal) + es := mocks.NewExecutionState(seal) engine := newIngestionEngine(t, ps, es) - mocks.ExecuteBlock(t, es, blockA) - mocks.ExecuteBlock(t, es, blockB) + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) @@ -708,7 +708,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { }) t.Run("all finalized have been executed, and no pending executed", func(t *testing.T) { - ps := mocks.NewPS() + ps := mocks.NewProtocolState() chain, result, seal := unittest.ChainFixture(4) genesis, blockA, blockB, blockC, blockD := @@ -724,12 +724,12 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Finalize(blockC.ID())) - es := mocks.NewES(seal) + es := mocks.NewExecutionState(seal) engine := newIngestionEngine(t, ps, es) - mocks.ExecuteBlock(t, es, blockA) - mocks.ExecuteBlock(t, es, blockB) - mocks.ExecuteBlock(t, es, blockC) + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) @@ -739,7 +739,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { }) t.Run("some finalized are executed and conflicting are executed", func(t *testing.T) { - ps := mocks.NewPS() + ps := mocks.NewProtocolState() chain, result, seal := unittest.ChainFixture(4) genesis, blockA, blockB, blockC, blockD := @@ -755,12 +755,12 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Finalize(blockC.ID())) - es := mocks.NewES(seal) + es := mocks.NewExecutionState(seal) engine := newIngestionEngine(t, ps, es) - mocks.ExecuteBlock(t, es, blockA) - mocks.ExecuteBlock(t, es, blockB) - mocks.ExecuteBlock(t, es, blockC) + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) @@ -770,7 +770,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { }) t.Run("all pending executed", func(t *testing.T) { - ps := mocks.NewPS() + ps := mocks.NewProtocolState() chain, result, seal := unittest.ChainFixture(4) genesis, blockA, blockB, blockC, blockD := @@ -785,13 +785,13 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Extend(blockD)) require.NoError(t, ps.Mutate().Finalize(blockA.ID())) - es := mocks.NewES(seal) + es := mocks.NewExecutionState(seal) engine := newIngestionEngine(t, ps, es) - mocks.ExecuteBlock(t, es, blockA) - mocks.ExecuteBlock(t, es, blockB) - mocks.ExecuteBlock(t, es, blockC) - mocks.ExecuteBlock(t, es, blockD) + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) + es.ExecuteBlock(t, blockD) finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) @@ -801,7 +801,7 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { }) t.Run("some fork is executed", func(t *testing.T) { - ps := mocks.NewPS() + ps := mocks.NewProtocolState() // Genesis <- A <- B <- C (finalized) <- D <- E <- F // ^--- G <- H @@ -840,16 +840,16 @@ func TestLoadingUnexecutedBlocks(t *testing.T) { require.NoError(t, ps.Mutate().Finalize(blockC.ID())) - es := mocks.NewES(seal) + es := mocks.NewExecutionState(seal) engine := newIngestionEngine(t, ps, es) - mocks.ExecuteBlock(t, es, blockA) - mocks.ExecuteBlock(t, es, blockB) - mocks.ExecuteBlock(t, es, blockC) - mocks.ExecuteBlock(t, es, blockD) - mocks.ExecuteBlock(t, es, blockG) - mocks.ExecuteBlock(t, es, blockJ) + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) + es.ExecuteBlock(t, blockD) + es.ExecuteBlock(t, blockG) + es.ExecuteBlock(t, blockJ) finalized, pending, err := engine.unexecutedBlocks() require.NoError(t, err) diff --git a/utils/unittest/mocks/execution_state.go b/utils/unittest/mocks/execution_state.go index eb1952a0866..262301e1ce2 100644 --- a/utils/unittest/mocks/execution_state.go +++ b/utils/unittest/mocks/execution_state.go @@ -13,30 +13,30 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -// ES is a mocked version of execution state that +// ExecutionState is a mocked version of execution state that // simulates some of its behavior for testing purpose -type ES struct { +type ExecutionState struct { sync.Mutex state.ExecutionState commits map[flow.Identifier]flow.StateCommitment } -func NewES(seal *flow.Seal) *ES { +func NewExecutionState(seal *flow.Seal) *ExecutionState { commits := make(map[flow.Identifier]flow.StateCommitment) commits[seal.BlockID] = seal.FinalState - return &ES{ + return &ExecutionState{ commits: commits, } } -func (es *ES) PersistStateCommitment(ctx context.Context, blockID flow.Identifier, commit flow.StateCommitment) error { +func (es *ExecutionState) PersistStateCommitment(ctx context.Context, blockID flow.Identifier, commit flow.StateCommitment) error { es.Lock() defer es.Unlock() es.commits[blockID] = commit return nil } -func (es *ES) StateCommitmentByBlockID(ctx context.Context, blockID flow.Identifier) (flow.StateCommitment, error) { +func (es *ExecutionState) StateCommitmentByBlockID(ctx context.Context, blockID flow.Identifier) (flow.StateCommitment, error) { commit, ok := es.commits[blockID] if !ok { return nil, storage.ErrNotFound @@ -45,7 +45,7 @@ func (es *ES) StateCommitmentByBlockID(ctx context.Context, blockID flow.Identif return commit, nil } -func ExecuteBlock(t *testing.T, es *ES, block *flow.Block) { +func (es *ExecutionState) ExecuteBlock(t *testing.T, block *flow.Block) { _, ok := es.commits[block.Header.ParentID] require.True(t, ok, "parent block not executed") require.NoError(t, diff --git a/utils/unittest/mocks/protocol_state.go b/utils/unittest/mocks/protocol_state.go index 29b98c1a2a2..989b444520e 100644 --- a/utils/unittest/mocks/protocol_state.go +++ b/utils/unittest/mocks/protocol_state.go @@ -12,13 +12,13 @@ import ( "github.com/onflow/flow-go/storage" ) -// PS is a mocked version of protocol state, which +// ProtocolState is a mocked version of protocol state, which // has very close behavior to the real implementation // but for testing purpose. // If you are testing a module that depends on protocol state's // behavior, but you don't want to mock up the methods and its return // value, then just use this module -type PS struct { +type ProtocolState struct { sync.Mutex protocol.State blocks map[flow.Identifier]*flow.Block @@ -29,20 +29,20 @@ type PS struct { seal *flow.Seal } -func NewPS() *PS { - return &PS{ +func NewProtocolState() *ProtocolState { + return &ProtocolState{ blocks: make(map[flow.Identifier]*flow.Block), children: make(map[flow.Identifier][]flow.Identifier), heights: make(map[uint64]*flow.Block), } } -type PSMutator struct { +type ProtocolStateMutator struct { protocolmock.Mutator - ps *PS + ps *ProtocolState } -func (ps *PS) AtBlockID(blockID flow.Identifier) protocol.Snapshot { +func (ps *ProtocolState) AtBlockID(blockID flow.Identifier) protocol.Snapshot { ps.Lock() defer ps.Unlock() @@ -56,7 +56,7 @@ func (ps *PS) AtBlockID(blockID flow.Identifier) protocol.Snapshot { return snapshot } -func (ps *PS) AtHeight(height uint64) protocol.Snapshot { +func (ps *ProtocolState) AtHeight(height uint64) protocol.Snapshot { ps.Lock() defer ps.Unlock() @@ -70,7 +70,7 @@ func (ps *PS) AtHeight(height uint64) protocol.Snapshot { return snapshot } -func (ps *PS) Final() protocol.Snapshot { +func (ps *ProtocolState) Final() protocol.Snapshot { ps.Lock() defer ps.Unlock() @@ -91,7 +91,7 @@ func (ps *PS) Final() protocol.Snapshot { return snapshot } -func pending(ps *PS, blockID flow.Identifier) []flow.Identifier { +func pending(ps *ProtocolState, blockID flow.Identifier) []flow.Identifier { var pendingIDs []flow.Identifier pendingIDs, ok := ps.children[blockID] @@ -107,14 +107,14 @@ func pending(ps *PS, blockID flow.Identifier) []flow.Identifier { return pendingIDs } -func (ps *PS) Mutate() protocol.Mutator { - return &PSMutator{ +func (ps *ProtocolState) Mutate() protocol.Mutator { + return &ProtocolStateMutator{ protocolmock.Mutator{}, ps, } } -func (m *PSMutator) Bootstrap(root *flow.Block, result *flow.ExecutionResult, seal *flow.Seal) error { +func (m *ProtocolStateMutator) Bootstrap(root *flow.Block, result *flow.ExecutionResult, seal *flow.Seal) error { m.ps.Lock() defer m.ps.Unlock() @@ -130,7 +130,7 @@ func (m *PSMutator) Bootstrap(root *flow.Block, result *flow.ExecutionResult, se return nil } -func (m *PSMutator) Extend(block *flow.Block) error { +func (m *ProtocolStateMutator) Extend(block *flow.Block) error { m.ps.Lock() defer m.ps.Unlock() @@ -157,7 +157,7 @@ func (m *PSMutator) Extend(block *flow.Block) error { return nil } -func (m *PSMutator) Finalize(blockID flow.Identifier) error { +func (m *ProtocolStateMutator) Finalize(blockID flow.Identifier) error { m.ps.Lock() defer m.ps.Unlock() From 114a8e82e74b67fbc6f0ebfbcfca6e29f6f72895 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 16:14:13 -0700 Subject: [PATCH 085/105] fix lint --- network/gossip/libp2p/peerManager_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/network/gossip/libp2p/peerManager_test.go b/network/gossip/libp2p/peerManager_test.go index afe8871eaf0..26d3a9106f0 100644 --- a/network/gossip/libp2p/peerManager_test.go +++ b/network/gossip/libp2p/peerManager_test.go @@ -26,7 +26,6 @@ type PeerManagerTestSuite struct { } func TestPeerManagerTestSuite(t *testing.T) { - t.Skip("skip until https://github.com/onflow/flow-go/pull/99 is merged") suite.Run(t, new(PeerManagerTestSuite)) } From 905a97350086b8968dd8f4a2643fa9bc7ec09faf Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 16:17:29 -0700 Subject: [PATCH 086/105] update comment --- engine/execution/ingestion/engine.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 959dc5c6e89..d10ef3f1603 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -292,11 +292,11 @@ func (e *Engine) reloadUnexecutedBlocks() error { // saving an executed block is currently not transactional, so it's possible // the block is marked as executed but the receipt might not be saved during a crash. // in order to mitigate this problem, we always re-execute the last executed and finalized - // block - // there is an exception, if the last executed final is a root block, then don't execute it, + // block. + // there is an exception, if the last executed block is a root block, then don't execute it, // because the root has already been executed during bootstrapping phase. And re-executing // a root block will fail, because the root block doesn't have a parent block, and could not - // get the result of it + // get the result of it. // TODO: remove this, when saving a executed block is transactional lastExecutedHeight, lastExecutedID, err := e.execState.GetHighestExecutedBlockID(e.unit.Ctx()) if err != nil { From 99e74c710ca1d83f945404d98054077f7859a84b Mon Sep 17 00:00:00 2001 From: Jordan Schalm Date: Thu, 29 Oct 2020 17:10:54 -0700 Subject: [PATCH 087/105] fix error equality checking in collector test The test assumed that transaction properties were validated in a particular order, which changed as part of implementing epochs. This commit changes the test's assertion to be less specific (merely that an error occurred), since the actual error that is returned is not directly related to the failure condition we are testing, it is instead a side effect of the failure condition we are testing. --- integration/tests/collection/tx_ingress_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/integration/tests/collection/tx_ingress_test.go b/integration/tests/collection/tx_ingress_test.go index f332f6bb1e8..8533d1a4655 100644 --- a/integration/tests/collection/tx_ingress_test.go +++ b/integration/tests/collection/tx_ingress_test.go @@ -37,14 +37,10 @@ func (suite *CollectorSuite) TestTransactionIngress_InvalidTransaction() { tx.SetReferenceBlockID(sdk.EmptyID) }) - expected := access.IncompleteTransactionError{ - MissingFields: []string{flow.TransactionFieldRefBlockID.String()}, - } - ctx, cancel := context.WithTimeout(suite.ctx, defaultTimeout) defer cancel() err := client.SendTransaction(ctx, *malformed) - unittest.AssertErrSubstringMatch(t, expected, err) + suite.Assert().Error(err) }) t.Run("missing script", func(t *testing.T) { From c9e749000646287ba76f5e9203c2103db49000eb Mon Sep 17 00:00:00 2001 From: Tarak Ben Youssef Date: Thu, 29 Oct 2020 17:22:02 -0700 Subject: [PATCH 088/105] comment fix --- crypto/bls12381_utils.c | 2 +- crypto/dkg_core.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/bls12381_utils.c b/crypto/bls12381_utils.c index c6818892217..c6444d7325e 100644 --- a/crypto/bls12381_utils.c +++ b/crypto/bls12381_utils.c @@ -574,7 +574,7 @@ void ep2_sum_vector(ep2_t jointy, ep2_st* y, int len){ ep2_add_projc(jointy, jointy, &y[i]); } ep2_norm(jointy, jointy); // not necessary but left here to optimize the - // the multiple pairing computations with the same + // multiple pairing computations with the same // public key } diff --git a/crypto/dkg_core.c b/crypto/dkg_core.c index 02db10f3e8a..c0e9da8f685 100644 --- a/crypto/dkg_core.c +++ b/crypto/dkg_core.c @@ -69,7 +69,7 @@ static void G2_polynomialImage(ep2_t y, const ep2_st* A, const int len_A, } ep2_norm(y, y); // not necessary but left here to optimize the - // the multiple pairing computations with the same public key + // multiple pairing computations with the same public key bn_free(bn_x); } From b6a9788dcde9462bdb99bc40894a7cd11a8bfaca Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 17:35:43 -0700 Subject: [PATCH 089/105] fix logging --- engine/execution/ingestion/engine.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index d10ef3f1603..212398527c7 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -435,6 +435,11 @@ func (e *Engine) enqueueBlockAndCheckExecutable( blockID := executableBlock.ID() + lg := e.log.With(). + Hex("block_id", blockID[:]). + Uint64("block_height", executableBlock.Block.Header.Height). + Logger() + // adding the block to the queue, queue, added := enqueue(executableBlock, executionQueues) @@ -474,11 +479,11 @@ func (e *Engine) enqueueBlockAndCheckExecutable( // exist in the queue, then we need to load the block from the storage. _, ok := queue.Nodes[blockID] if !ok { - log.Error().Msgf("an unexecuted parent block is missing in the queue") + lg.Error().Msgf("an unexecuted parent block is missing in the queue") } } else { // if there is exception, then crash - log.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") + lg.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") } // check if we have all the collections for the block, and request them if there is missing. @@ -490,7 +495,7 @@ func (e *Engine) enqueueBlockAndCheckExecutable( // execute the block if the block is ready to be executed completed := e.executeBlockIfComplete(executableBlock) - log.Info(). + lg.Info(). // if the execution is halt, but the queue keeps growing, we could check which block // hasn't been executed. Uint64("first_unexecuted_in_queue", firstUnexecutedHeight). @@ -709,9 +714,9 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col collID := collection.ID() - log := e.log.With().Hex("collection_id", collID[:]).Logger() + lg := e.log.With().Hex("collection_id", collID[:]).Logger() - log.Info().Hex("sender", originID[:]).Msg("handle collection") + lg.Info().Hex("sender", originID[:]).Msg("handle collection") // TODO: bail if have seen this collection before. err := e.collections.Store(collection) @@ -728,8 +733,7 @@ func (e *Engine) handleCollection(originID flow.Identifier, collection *flow.Col // or it was ejected from the mempool when it was full. // either way, we will return if !exists { - e.log.Debug().Hex("collection_id", collID[:]). - Msg("could not find block for collection") + lg.Debug().Msg("could not find block for collection") return nil } From 12c327aacc488b0e41e82773700d86eb1e43b782 Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Thu, 29 Oct 2020 23:27:13 -0700 Subject: [PATCH 090/105] Apply suggestions from code review Co-authored-by: Alexander Hentschel --- engine/execution/ingestion/engine.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 212398527c7..be32cb326a9 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -194,16 +194,21 @@ func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { } // find the first unexecuted and finalized block - // we iterate from the last finalized, check if it has been executed, + // We iterate from the last finalized, check if it has been executed, // if not, keep going to the lower height, until we find an executed // block, and then the next height is the first unexecuted. - // if there is only one finalized, and it's executed (i.e. genesis), + // If there is only one finalized, and it's executed (i.e. root block), // then the firstUnexecuted is a unfinalized block, which is ok, - // because the next loop will ensure it only iterate through finalized - // block. + // because the next loop will ensure it only iterates through finalized + // blocks. lastExecuted := final.Height - for ; lastExecuted > 0; lastExecuted-- { + rootBlock, err := e.state.Params().Root() + if err != nil { + return nil, fmt.Errorf("failed to retrieve root block: %w", err) + } + + for ; lastExecuted > rootBlock.Height; lastExecuted-- { header, err := e.state.AtHeight(lastExecuted).Head() if err != nil { return nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) From a26a683429708e6c3dad0bf320c3d8b0a0cf086f Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Thu, 29 Oct 2020 23:40:00 -0700 Subject: [PATCH 091/105] Update state/protocol/badger/mutator.go Co-authored-by: Alexander Hentschel --- state/protocol/badger/mutator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/state/protocol/badger/mutator.go b/state/protocol/badger/mutator.go index beb4b7b0213..87c82bd3ab4 100644 --- a/state/protocol/badger/mutator.go +++ b/state/protocol/badger/mutator.go @@ -300,7 +300,7 @@ func (m *Mutator) headerExtend(candidate *flow.Block) error { // block G is not a valid block, because it does not include C which has been finalized. // block H and I are a valid, because its their includes C. return state.NewOutdatedExtensionErrorf( - "candidate block (height: %v) conflicts with finalized state (ancestor: %d final: %d)", + "candidate block (height: %d) conflicts with finalized state (ancestor: %d final: %d)", header.Height, ancestor.Height, finalizedHeight) } ancestorID = ancestor.ParentID From 6eea192f036df3acd30e0e67a4ef4e2e559335c2 Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Thu, 29 Oct 2020 23:40:30 -0700 Subject: [PATCH 092/105] Update engine/execution/state/state.go Co-authored-by: Alexander Hentschel --- engine/execution/state/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index 9f0e2ebb19d..07e0b5ab941 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -52,7 +52,7 @@ type ReadOnlyExecutionState interface { } // IsBlockExecuted returns whether the block has been executed. -// it checks whether the statecommitment exists in execution state. +// it checks whether the state commitment exists in execution state. func IsBlockExecuted(ctx context.Context, state ReadOnlyExecutionState, block flow.Identifier) (bool, error) { _, err := state.StateCommitmentByBlockID(ctx, block) From 70fdcc21b8bd6e1251dcfbbec22d0a34f17f8fe2 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 29 Oct 2020 23:41:10 -0700 Subject: [PATCH 093/105] pin the snapshot for reloading blocks --- engine/execution/ingestion/engine.go | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index be32cb326a9..f79573ebcaa 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -186,9 +186,9 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } } -func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { +func (e *Engine) finalizedUnexecutedBlocks(finalized protocol.Snapshot) ([]flow.Identifier, error) { // get finalized height - final, err := e.state.Final().Head() + final, err := finalized.Head() if err != nil { return nil, fmt.Errorf("could not get finalized block: %w", err) } @@ -244,14 +244,14 @@ func (e *Engine) finalizedUnexecutedBlocks() ([]flow.Identifier, error) { return unexecuted, nil } -func (e *Engine) pendingUnexecutedBlocks() ([]flow.Identifier, error) { - unexecuted := make([]flow.Identifier, 0) - - pendings, err := e.state.Final().Pending() +func (e *Engine) pendingUnexecutedBlocks(finalized protocol.Snapshot) ([]flow.Identifier, error) { + pendings, err := finalized.Pending() if err != nil { return nil, fmt.Errorf("could not get pending blocks: %w", err) } + unexecuted := make([]flow.Identifier, 0) + for _, pending := range pendings { executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, pending) if err != nil { @@ -267,12 +267,16 @@ func (e *Engine) pendingUnexecutedBlocks() ([]flow.Identifier, error) { } func (e *Engine) unexecutedBlocks() (finalized []flow.Identifier, pending []flow.Identifier, err error) { - finalized, err = e.finalizedUnexecutedBlocks() + // pin the snapshot so that finalizedUnexecutedBlocks and pendingUnexecutedBlocks are based + // on the same snapshot. + snapshot := e.state.Final() + + finalized, err = e.finalizedUnexecutedBlocks(snapshot) if err != nil { return nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") } - pending, err = e.pendingUnexecutedBlocks() + pending, err = e.pendingUnexecutedBlocks(snapshot) if err != nil { return nil, nil, fmt.Errorf("could not read pending unexecuted blocks") } @@ -314,7 +318,12 @@ func (e *Engine) reloadUnexecutedBlocks() error { } // don't reload root block - isRoot := last.ParentID == flow.ZeroID + rootBlock, err := e.state.Params().Root() + if err != nil { + return fmt.Errorf("failed to retrieve root block: %w", err) + } + + isRoot := rootBlock.ID() == last.ID() if !isRoot { err = e.reloadBlock(blockByCollection, executionQueues, lastExecutedID) if err != nil { From fa3f1af8f007940717758e63709104101380218f Mon Sep 17 00:00:00 2001 From: Danu Date: Fri, 30 Oct 2020 19:38:01 +0000 Subject: [PATCH 094/105] Command to create Staking & Networking keys for internal nodes (#96) * Initial work for /cmd/bootstrap/cmd/keygen.go * Check if `keys` dir exists and print summary log * Update long and short description * Remove redunant read * Move functions to util * Role counting logic moved to util * Update cmd/bootstrap/cmd/keygen.go Co-authored-by: Leo Zhang * Update cmd/bootstrap/cmd/keygen.go Co-authored-by: Leo Zhang * Fix linting issues Co-authored-by: Leo Zhang --- cmd/bootstrap/cmd/keygen.go | 76 +++++++++++++++++++++++++++++++++++++ cmd/bootstrap/cmd/util.go | 29 ++++++++++++++ 2 files changed, 105 insertions(+) create mode 100755 cmd/bootstrap/cmd/keygen.go diff --git a/cmd/bootstrap/cmd/keygen.go b/cmd/bootstrap/cmd/keygen.go new file mode 100755 index 00000000000..ae8c1ac3f0c --- /dev/null +++ b/cmd/bootstrap/cmd/keygen.go @@ -0,0 +1,76 @@ +package cmd + +import ( + "fmt" + "io" + "os" + + "github.com/spf13/cobra" + + model "github.com/onflow/flow-go/model/bootstrap" +) + +// keygenCmd represents the key gen command +var keygenCmd = &cobra.Command{ + Use: "keygen", + Short: "Generate Staking and Networking keys for a list of nodes", + Long: `Generate Staking and Networking keys for a list of nodes provided by the flag '--config'`, + Run: func(cmd *cobra.Command, args []string) { + // check if out directory exists + exists, err := pathExists(flagOutdir) + if err != nil { + log.Error().Msg("could not check if directory exists") + return + } + + // check if the out directory is empty or has contents + if exists { + empty, err := isEmptyDir(flagOutdir) + if err != nil { + log.Error().Msg("could not check if directory as empty") + return + } + + if !empty { + log.Error().Msg("output directory already exists and has content. delete and try again.") + return + } + } + + // create keys + log.Info().Msg("generating internal private networking and staking keys") + nodes := genNetworkAndStakingKeys([]model.NodeInfo{}) + log.Info().Msg("") + + // count roles + roleCounts := nodeCountByRole(nodes) + for role, count := range roleCounts { + log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", count, role.String())) + } + }, +} + +func init() { + rootCmd.AddCommand(keygenCmd) + + // required parameters + keygenCmd.Flags(). + StringVar(&flagConfig, "config", "--node-config.json", "path to a JSON file containing multiple node configurations (Role, Address, Stake)") + _ = keygenCmd.MarkFlagRequired("config") + +} + +// isEmptyDir returns True if the directory contains children +func isEmptyDir(path string) (bool, error) { + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) + if err == io.EOF { + return true, nil + } + return false, err // Either not empty or error, suits both cases +} diff --git a/cmd/bootstrap/cmd/util.go b/cmd/bootstrap/cmd/util.go index 258e1b75cd4..47a8f437eb4 100644 --- a/cmd/bootstrap/cmd/util.go +++ b/cmd/bootstrap/cmd/util.go @@ -9,6 +9,8 @@ import ( "path/filepath" "github.com/onflow/flow-go/crypto" + model "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/io" ) @@ -78,3 +80,30 @@ func filesInDir(dir string) ([]string, error) { }) return files, err } + +// pathExists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func nodeCountByRole(nodes []model.NodeInfo) map[flow.Role]uint16 { + roleCounts := map[flow.Role]uint16{ + flow.RoleCollection: 0, + flow.RoleConsensus: 0, + flow.RoleExecution: 0, + flow.RoleVerification: 0, + flow.RoleAccess: 0, + } + for _, node := range nodes { + roleCounts[node.Role] = roleCounts[node.Role] + 1 + } + + return roleCounts +} From 7b5bf29163bc1c3a197c23f42ec2175d13a7b13d Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Fri, 30 Oct 2020 13:48:01 -0700 Subject: [PATCH 095/105] truncate-database command line utility --- cmd/util/cmd/common/storage.go | 7 ++++- cmd/util/cmd/truncate-database/cmd.go | 38 +++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 cmd/util/cmd/truncate-database/cmd.go diff --git a/cmd/util/cmd/common/storage.go b/cmd/util/cmd/common/storage.go index d243521e090..b3297b63df0 100644 --- a/cmd/util/cmd/common/storage.go +++ b/cmd/util/cmd/common/storage.go @@ -8,10 +8,15 @@ import ( ) func InitStorage(datadir string) *badger.DB { + return InitStorageWithTruncate(datadir, false) +} + +func InitStorageWithTruncate(datadir string, truncate bool) *badger.DB { opts := badger. DefaultOptions(datadir). WithKeepL0InMemory(true). - WithLogger(nil) + WithLogger(nil). + WithTruncate(truncate) db, err := badger.Open(opts) if err != nil { diff --git a/cmd/util/cmd/truncate-database/cmd.go b/cmd/util/cmd/truncate-database/cmd.go new file mode 100644 index 00000000000..19a54d124f2 --- /dev/null +++ b/cmd/util/cmd/truncate-database/cmd.go @@ -0,0 +1,38 @@ +package find + +import ( + "github.com/rs/zerolog/log" + + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/cmd/common" +) + +var ( + flagBlockHeight uint64 + flagDatadir string +) + +var Cmd = &cobra.Command{ + Use: "truncate-database", + Short: "Truncates protocol state database (Possible data loss!)", + Run: run, +} + +func init() { + + Cmd.Flags().StringVar(&flagDatadir, "datadir", "", + "directory that stores the protocol state") + _ = Cmd.MarkFlagRequired("datadir") + +} + +func run(*cobra.Command, []string) { + + log.Info().Msg("Opening database with truncate") + + db := common.InitStorageWithTruncate(flagDatadir, true) + defer db.Close() + + log.Info().Msg("Truncated") +} From e1d0ec8f0a6c2dcfbbd3dbcea05f2ec7d0cc6cb3 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Fri, 30 Oct 2020 14:01:24 -0700 Subject: [PATCH 096/105] Yahya/5005-fix-flakey-libp2p-test (#99) * fixes peer manager non-blocking start * fixes peer manager assertion issue * adds RequireConcurrentCallsReturnBefore * refactors logs at debug level * fixes wait group negative count issue * adds error level logs * adds free port function * fixes libp2p tests * refactors unnecessary suite nested calls * adds more unittest utils * simplifies some tests * adjusts timeouts * fixes TestConcurrentOnDemandPeerUpdate * Update network/gossip/libp2p/libp2pNode_test.go Co-authored-by: Vishal <1117327+vishalchangrani@users.noreply.github.com> * Update network/gossip/libp2p/libp2pNode_test.go Co-authored-by: Vishal <1117327+vishalchangrani@users.noreply.github.com> * Update network/gossip/libp2p/peerManager.go Co-authored-by: Vishal <1117327+vishalchangrani@users.noreply.github.com> * fixes lint issues * makes allocated ports concurrency safe * Update network/gossip/libp2p/libp2pNode_test.go Co-authored-by: Vishal <1117327+vishalchangrani@users.noreply.github.com> * fixes deferring closing streams * fixes TestConcurrentOnDemandPeerUpdate * fixes log issue * adds error assertion * fixes failure issue * fixes lint issue * fixes unittest error * Update network/gossip/libp2p/peerManager_test.go Co-authored-by: Vishal <1117327+vishalchangrani@users.noreply.github.com> * Update network/gossip/libp2p/test/testUtil.go Co-authored-by: Vishal <1117327+vishalchangrani@users.noreply.github.com> * encapsulates port allocator test helper * fixes concurrent access on mock run method * updates peer manager with ready done aware * updates go mod * fixes small commented code * fixes comments * adds require return before Co-authored-by: Vishal <1117327+vishalchangrani@users.noreply.github.com> --- crypto/bls_test.go | 16 +- crypto/sign_test_utils.go | 20 +- integration/tests/access/access_api_test.go | 2 +- network/gossip/libp2p/libp2pNode_test.go | 317 +++++++++--------- network/gossip/libp2p/middleware.go | 19 +- network/gossip/libp2p/peerManager.go | 48 ++- network/gossip/libp2p/peerManager_test.go | 162 +++++---- network/gossip/libp2p/test/echoengine_test.go | 4 +- .../libp2p/test/epochtransition_test.go | 5 +- network/gossip/libp2p/test/meshengine_test.go | 3 +- network/gossip/libp2p/test/middleware_test.go | 5 +- network/gossip/libp2p/test/portallocator.go | 48 +++ network/gossip/libp2p/test/testUtil.go | 17 +- network/gossip/libp2p/test/topology_test.go | 4 +- utils/unittest/unittest.go | 68 +++- 15 files changed, 430 insertions(+), 308 deletions(-) create mode 100644 network/gossip/libp2p/test/portallocator.go diff --git a/crypto/bls_test.go b/crypto/bls_test.go index e168c896452..62b247aa81d 100644 --- a/crypto/bls_test.go +++ b/crypto/bls_test.go @@ -128,7 +128,7 @@ func TestAggregateSignatures(t *testing.T) { sks := make([]PrivateKey, 0, sigsNum) pks := make([]PublicKey, 0, sigsNum) seed := make([]byte, KeyGenSeedMinLenBLSBLS12381) - var aggSig,expectedSig Signature + var aggSig, expectedSig Signature // create the signatures for i := 0; i < sigsNum; i++ { @@ -161,7 +161,6 @@ func TestAggregateSignatures(t *testing.T) { fmt.Sprintf("Verification of %s failed, signature should be %s private keys are %s, input is %x", aggSig, expectedSig, sks, input)) }) - // check if one the signatures is not correct t.Run("one invalid signatures", func(t *testing.T) { @@ -182,7 +181,6 @@ func TestAggregateSignatures(t *testing.T) { sigs[randomIndex], err = sks[randomIndex].Sign(input, kmac) }) - // check if one the public keys is not correct t.Run("one invalid public key", func(t *testing.T) { randomIndex := mrand.Intn(sigsNum) @@ -390,10 +388,10 @@ func TestBatchVerify(t *testing.T) { }) // pick a random number of invalid signatures - invalidSigsNum := mrand.Intn(sigsNum-1) + 1 - // generate a random permutation of indices to pick the + invalidSigsNum := mrand.Intn(sigsNum-1) + 1 + // generate a random permutation of indices to pick the // invalid signatures. - indices := make([]int, 0, sigsNum) + indices := make([]int, 0, sigsNum) for i := 0; i < sigsNum; i++ { indices = append(indices, i) } @@ -425,7 +423,7 @@ func TestBatchVerify(t *testing.T) { sigs[indices[i]] = sigs[indices[i]][:3] // test the short signatures } } - + valid, err := BatchVerifySignaturesOneMessage(pks, sigs, input, kmac) require.NoError(t, err) assert.Equal(t, valid, expectedValid, @@ -456,7 +454,7 @@ func TestBatchVerify(t *testing.T) { } valid, err := BatchVerifySignaturesOneMessage(pks, sigs, input, nil) require.Error(t, err) - + assert.Equal(t, valid, expectedValid, fmt.Sprintf("verification should fail with incorrect input lenghts, got %v", valid)) }) @@ -509,7 +507,7 @@ func BenchmarkBatchVerify(b *testing.B) { // - if all signatures are invalid (valid points in G1): // (2*2*(n-1)) pairings compared to (2*n) pairings for the simple verification. b.Run("unhappy path", func(b *testing.B) { - // only one invalid signature + // only one invalid signature alterSignature(sigs[sigsNum/2]) b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/crypto/sign_test_utils.go b/crypto/sign_test_utils.go index e5402850998..475d5cff359 100644 --- a/crypto/sign_test_utils.go +++ b/crypto/sign_test_utils.go @@ -34,13 +34,13 @@ func testGenSignVerify(t *testing.T, salg SigningAlgorithm, halg hash.Hasher) { s, err := sk.Sign(input, halg) require.NoError(t, err) pk := sk.PublicKey() - + // test a valid signature result, err := pk.Verify(s, input, halg) require.NoError(t, err) assert.True(t, result, fmt.Sprintf( "Verification should succeed:\n signature:%s\n message:%x\n private key:%s", s, input, sk)) - + // test with a different message input[0] ^= 1 result, err = pk.Verify(s, input, halg) @@ -101,16 +101,16 @@ func testEncodeDecode(t *testing.T, salg SigningAlgorithm) { // test invalid private keys (equal to the curve group order) groupOrder := make(map[SigningAlgorithm][]byte) - groupOrder[ECDSAP256] = []byte{255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, - 255, 255, 255, 255, 255, 188, 230, 250, 173, 167, + groupOrder[ECDSAP256] = []byte{255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, + 255, 255, 255, 255, 255, 188, 230, 250, 173, 167, 23, 158, 132, 243, 185, 202, 194, 252, 99, 37, 81} - - groupOrder[ECDSASecp256k1] = []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 254, 186, 174, 220, 230, + + groupOrder[ECDSASecp256k1] = []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 254, 186, 174, 220, 230, 175, 72, 160, 59, 191, 210, 94, 140, 208, 54, 65, 65} - - groupOrder[BLSBLS12381] = []byte{0x73, 0xED, 0xA7, 0x53, 0x29, 0x9D, 0x7D, 0x48, 0x33, 0x39, - 0xD8, 0x08, 0x09, 0xA1, 0xD8, 0x05, 0x53, 0xBD, 0xA4, 0x02, 0xFF, 0xFE, + + groupOrder[BLSBLS12381] = []byte{0x73, 0xED, 0xA7, 0x53, 0x29, 0x9D, 0x7D, 0x48, 0x33, 0x39, + 0xD8, 0x08, 0x09, 0xA1, 0xD8, 0x05, 0x53, 0xBD, 0xA4, 0x02, 0xFF, 0xFE, 0x5B, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01} _, err = DecodePrivateKey(salg, groupOrder[salg]) require.Error(t, err, "the key decoding should fail - private key value is too large") diff --git a/integration/tests/access/access_api_test.go b/integration/tests/access/access_api_test.go index 40b73442eed..56e437afc26 100644 --- a/integration/tests/access/access_api_test.go +++ b/integration/tests/access/access_api_test.go @@ -57,7 +57,7 @@ func (suite *AccessSuite) SetupTest() { conID := unittest.IdentifierFixture() nodeConfig := testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithID(conID), - testnet.AsGhost(), + testnet.AsGhost()) nodeConfigs = append(nodeConfigs, nodeConfig) } diff --git a/network/gossip/libp2p/libp2pNode_test.go b/network/gossip/libp2p/libp2pNode_test.go index 05fd334417c..bab9cbfe5c9 100644 --- a/network/gossip/libp2p/libp2pNode_test.go +++ b/network/gossip/libp2p/libp2pNode_test.go @@ -22,7 +22,6 @@ import ( "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -48,19 +47,19 @@ func TestLibP2PNodesTestSuite(t *testing.T) { } // SetupTests initiates the test setups prior to each test -func (l *LibP2PNodeTestSuite) SetupTest() { - l.logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() - l.ctx, l.cancel = context.WithCancel(context.Background()) - golog.SetAllLoggers(golog.LevelDebug) +func (suite *LibP2PNodeTestSuite) SetupTest() { + suite.logger = zerolog.New(os.Stderr).Level(zerolog.DebugLevel) + golog.SetAllLoggers(golog.LevelError) + suite.ctx, suite.cancel = context.WithCancel(context.Background()) } -func (l *LibP2PNodeTestSuite) TearDownTest() { - l.cancel() +func (suite *LibP2PNodeTestSuite) TearDownTest() { + suite.cancel() } // TestMultiAddress evaluates correct translations from // dns and ip4 to libp2p multi-address -func (l *LibP2PNodeTestSuite) TestMultiAddress() { +func (suite *LibP2PNodeTestSuite) TestMultiAddress() { tt := []struct { address NodeAddress multiaddress string @@ -93,29 +92,29 @@ func (l *LibP2PNodeTestSuite) TestMultiAddress() { for _, tc := range tt { actualAddress := MultiaddressStr(tc.address) - assert.Equal(l.Suite.T(), tc.multiaddress, actualAddress, "incorrect multi-address translation") + assert.Equal(suite.T(), tc.multiaddress, actualAddress, "incorrect multi-address translation") } } -func (l *LibP2PNodeTestSuite) TestSingleNodeLifeCycle() { +func (suite *LibP2PNodeTestSuite) TestSingleNodeLifeCycle() { // creates a single - nodes, _ := l.CreateNodes(1, nil, false) + nodes, _ := suite.CreateNodes(1, nil, false) // stops the created node done, err := nodes[0].Stop() - assert.NoError(l.Suite.T(), err) + assert.NoError(suite.T(), err) <-done } // TestGetPeerInfo evaluates the deterministic translation between the nodes address and // their libp2p info. It generates an address, and checks whether repeated translations // yields the same info or not. -func (l *LibP2PNodeTestSuite) TestGetPeerInfo() { +func (suite *LibP2PNodeTestSuite) TestGetPeerInfo() { for i := 0; i < 10; i++ { name := fmt.Sprintf("node%d", i) key, err := generateNetworkingKey(name) - require.NoError(l.Suite.T(), err) + require.NoError(suite.T(), err) // creates node-i address address := NodeAddress{ @@ -127,33 +126,33 @@ func (l *LibP2PNodeTestSuite) TestGetPeerInfo() { // translates node-i address into info info, err := GetPeerInfo(address) - require.NoError(l.Suite.T(), err) + require.NoError(suite.T(), err) // repeats the translation for node-i for j := 0; j < 10; j++ { rinfo, err := GetPeerInfo(address) - require.NoError(l.Suite.T(), err) - assert.True(l.Suite.T(), rinfo.String() == info.String(), "inconsistent id generated") + require.NoError(suite.T(), err) + assert.True(suite.T(), rinfo.String() == info.String(), "inconsistent id generated") } } } // TestAddPeers checks if nodes can be added as peers to a given node -func (l *LibP2PNodeTestSuite) TestAddPeers() { +func (suite *LibP2PNodeTestSuite) TestAddPeers() { count := 3 // create nodes - nodes, addrs := l.CreateNodes(count, nil, false) - defer l.StopNodes(nodes) + nodes, addrs := suite.CreateNodes(count, nil, false) + defer suite.StopNodes(nodes) // add the remaining nodes to the first node as its set of peers for _, p := range addrs[1:] { - require.NoError(l.Suite.T(), nodes[0].AddPeer(l.ctx, p)) + require.NoError(suite.T(), nodes[0].AddPeer(suite.ctx, p)) } // Checks if all 3 nodes have been added as peers to the first node - assert.Len(l.Suite.T(), nodes[0].libP2PHost.Peerstore().Peers(), count) + assert.Len(suite.T(), nodes[0].libP2PHost.Peerstore().Peers(), count) // Checks whether the first node is connected to the rest for _, peer := range nodes[0].libP2PHost.Peerstore().Peers() { @@ -161,28 +160,28 @@ func (l *LibP2PNodeTestSuite) TestAddPeers() { if nodes[0].libP2PHost.ID().String() == peer.String() { continue } - assert.Eventuallyf(l.Suite.T(), func() bool { + assert.Eventuallyf(suite.T(), func() bool { return network.Connected == nodes[0].libP2PHost.Network().Connectedness(peer) }, 2*time.Second, tickForAssertEventually, fmt.Sprintf(" first node is not connected to %s", peer.String())) } } // TestAddPeers checks if nodes can be added as peers to a given node -func (l *LibP2PNodeTestSuite) TestRemovePeers() { +func (suite *LibP2PNodeTestSuite) TestRemovePeers() { count := 3 // create nodes - nodes, addrs := l.CreateNodes(count, nil, false) - defer l.StopNodes(nodes) + nodes, addrs := suite.CreateNodes(count, nil, false) + defer suite.StopNodes(nodes) // add nodes two and three to the first node as its peers for _, p := range addrs[1:] { - require.NoError(l.Suite.T(), nodes[0].AddPeer(l.ctx, p)) + require.NoError(suite.T(), nodes[0].AddPeer(suite.ctx, p)) } // check if all 3 nodes have been added as peers to the first node - assert.Len(l.Suite.T(), nodes[0].libP2PHost.Peerstore().Peers(), count) + assert.Len(suite.T(), nodes[0].libP2PHost.Peerstore().Peers(), count) // check whether the first node is connected to the rest for _, peer := range nodes[0].libP2PHost.Peerstore().Peers() { @@ -190,46 +189,46 @@ func (l *LibP2PNodeTestSuite) TestRemovePeers() { if nodes[0].libP2PHost.ID().String() == peer.String() { continue } - assert.Eventually(l.Suite.T(), func() bool { + assert.Eventually(suite.T(), func() bool { return network.Connected == nodes[0].libP2PHost.Network().Connectedness(peer) }, 2*time.Second, tickForAssertEventually) } // disconnect from each peer and assert that the connection no longer exists for _, p := range addrs[1:] { - require.NoError(l.Suite.T(), nodes[0].RemovePeer(l.ctx, p)) + require.NoError(suite.T(), nodes[0].RemovePeer(suite.ctx, p)) pInfo, err := GetPeerInfo(p) - assert.NoError(l.Suite.T(), err) - assert.Equal(l.Suite.T(), network.NotConnected, nodes[0].libP2PHost.Network().Connectedness(pInfo.ID)) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), network.NotConnected, nodes[0].libP2PHost.Network().Connectedness(pInfo.ID)) } } // TestCreateStreams checks if a new streams is created each time when CreateStream is called and an existing stream is not reused -func (l *LibP2PNodeTestSuite) TestCreateStream() { +func (suite *LibP2PNodeTestSuite) TestCreateStream() { count := 2 // Creates nodes - nodes, addrs := l.CreateNodes(count, nil, false) - defer l.StopNodes(nodes) + nodes, addrs := suite.CreateNodes(count, nil, false) + defer suite.StopNodes(nodes) address2 := addrs[1] flowProtocolID := generateProtocolID(rootBlockID) // Assert that there is no outbound stream to the target yet - require.Equal(l.T(), 0, CountStream(nodes[0].libP2PHost, nodes[1].libP2PHost.ID(), flowProtocolID, network.DirOutbound)) + require.Equal(suite.T(), 0, CountStream(nodes[0].libP2PHost, nodes[1].libP2PHost.ID(), flowProtocolID, network.DirOutbound)) // Now attempt to create another 100 outbound stream to the same destination by calling CreateStream var streams []network.Stream for i := 0; i < 100; i++ { anotherStream, err := nodes[0].CreateStream(context.Background(), address2) // Assert that a stream was returned without error - require.NoError(l.T(), err) - require.NotNil(l.T(), anotherStream) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), anotherStream) // assert that the stream count within libp2p incremented (a new stream was created) - require.Equal(l.T(), i+1, CountStream(nodes[0].libP2PHost, nodes[1].libP2PHost.ID(), flowProtocolID, network.DirOutbound)) + require.Equal(suite.T(), i+1, CountStream(nodes[0].libP2PHost, nodes[1].libP2PHost.ID(), flowProtocolID, network.DirOutbound)) // assert that the same connection is reused - require.Len(l.T(), nodes[0].libP2PHost.Network().Conns(), 1) + require.Len(suite.T(), nodes[0].libP2PHost.Network().Conns(), 1) streams = append(streams, anotherStream) } @@ -244,12 +243,12 @@ func (l *LibP2PNodeTestSuite) TestCreateStream() { }() wg.Wait() // assert that the stream count within libp2p decremented - require.Equal(l.T(), i, CountStream(nodes[0].libP2PHost, nodes[1].libP2PHost.ID(), flowProtocolID, network.DirOutbound)) + require.Equal(suite.T(), i, CountStream(nodes[0].libP2PHost, nodes[1].libP2PHost.ID(), flowProtocolID, network.DirOutbound)) } } // TestOneToOneComm sends a message from node 1 to node 2 and then from node 2 to node 1 -func (l *LibP2PNodeTestSuite) TestOneToOneComm() { +func (suite *LibP2PNodeTestSuite) TestOneToOneComm() { count := 2 ch := make(chan string, count) @@ -258,149 +257,138 @@ func (l *LibP2PNodeTestSuite) TestOneToOneComm() { handler := func(s network.Stream) { rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) str, err := rw.ReadString('\n') - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) ch <- str } // Creates peers - peers, addrs := l.CreateNodes(count, handler, false) - defer l.StopNodes(peers) - require.Len(l.T(), addrs, count) + peers, addrs := suite.CreateNodes(count, handler, false) + defer suite.StopNodes(peers) + require.Len(suite.T(), addrs, count) addr1 := addrs[0] addr2 := addrs[1] // Create stream from node 1 to node 2 s1, err := peers[0].CreateStream(context.Background(), addr2) - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) rw := bufio.NewReadWriter(bufio.NewReader(s1), bufio.NewWriter(s1)) // Send message from node 1 to 2 msg := "hello\n" _, err = rw.WriteString(msg) - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) // Flush the stream - assert.NoError(l.T(), rw.Flush()) + assert.NoError(suite.T(), rw.Flush()) // Wait for the message to be received select { case rcv := <-ch: - require.Equal(l.T(), msg, rcv) + require.Equal(suite.T(), msg, rcv) case <-time.After(1 * time.Second): - assert.Fail(l.T(), "message not received") + assert.Fail(suite.T(), "message not received") } // Create stream from node 2 to node 1 s2, err := peers[1].CreateStream(context.Background(), addr1) - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) rw = bufio.NewReadWriter(bufio.NewReader(s2), bufio.NewWriter(s2)) // Send message from node 2 to 1 msg = "hey\n" _, err = rw.WriteString(msg) - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) // Flush the stream - assert.NoError(l.T(), rw.Flush()) + assert.NoError(suite.T(), rw.Flush()) select { case rcv := <-ch: - require.Equal(l.T(), msg, rcv) + require.Equal(suite.T(), msg, rcv) case <-time.After(3 * time.Second): - assert.Fail(l.T(), "message not received") + assert.Fail(suite.T(), "message not received") } } // TestCreateStreamTimeoutWithUnresponsiveNode tests that the CreateStream call does not block longer than the default // unicast timeout interval -func (l *LibP2PNodeTestSuite) TestCreateStreamTimeoutWithUnresponsiveNode() { +func (suite *LibP2PNodeTestSuite) TestCreateStreamTimeoutWithUnresponsiveNode() { // creates a regular node - peers, addrs := l.CreateNodes(1, nil, false) - defer l.StopNodes(peers) - require.Len(l.T(), addrs, 1) + peers, addrs := suite.CreateNodes(1, nil, false) + defer suite.StopNodes(peers) + require.Len(suite.T(), addrs, 1) // create a silent node which never replies - listener, silentNodeAddress := newSilentNode(l.T()) - defer listener.Close() + listener, silentNodeAddress := newSilentNode(suite.T()) + defer func() { + require.NoError(suite.T(), listener.Close()) + }() // setup the context to expire after the default timeout ctx, cancel := context.WithTimeout(context.Background(), DefaultUnicastTimeout) defer cancel() // attempt to create a stream from node 1 to node 2 and assert that it fails after timeout - grace := 10 * time.Millisecond + grace := 1 * time.Second var err error - unittest.AssertReturnsBefore(l.T(), + unittest.AssertReturnsBefore(suite.T(), func() { _, err = peers[0].CreateStream(ctx, silentNodeAddress) }, DefaultUnicastTimeout+grace) - assert.Error(l.T(), err) + assert.Error(suite.T(), err) } // TestCreateStreamIsConcurrent tests that CreateStream calls can be made concurrently such that one blocked call // does not block another concurrent call. -func (l *LibP2PNodeTestSuite) TestCreateStreamIsConcurrent() { +func (suite *LibP2PNodeTestSuite) TestCreateStreamIsConcurrent() { // bump up the unicast timeout to a high value unicastTimeout = time.Hour // create two regular node - goodPeers, goodAddrs := l.CreateNodes(2, nil, false) - defer l.StopNodes(goodPeers) - require.Len(l.T(), goodAddrs, 2) + goodPeers, goodAddrs := suite.CreateNodes(2, nil, false) + defer suite.StopNodes(goodPeers) + require.Len(suite.T(), goodAddrs, 2) // create a silent node which never replies - listener, silentNodeAddress := newSilentNode(l.T()) - defer listener.Close() - - wg := sync.WaitGroup{} - wg.Add(1) - - ch := make(chan struct{}) - // spin off a go routine to create a stream to the unresponsive node - go func() { - wg.Done() - _, _ = goodPeers[0].CreateStream(l.ctx, silentNodeAddress) // this call will block - close(ch) + listener, silentNodeAddress := newSilentNode(suite.T()) + defer func() { + require.NoError(suite.T(), listener.Close()) }() - // make sure the go routine to create a stream with the unresponsive node actually started - unittest.AssertReturnsBefore(l.T(), wg.Wait, 5*time.Millisecond) - // make sure the go routine to create a stream did not finish - select { - case <-time.After(10 * time.Millisecond): - case <-ch: - assert.Fail(l.T(), "CreateStream attempt to the unresponsive peer did not block") - } + // creates a stream to unresponsive node and makes sure that the stream creation is blocked + blockedCallCh := unittest.RequireNeverReturnBefore(suite.T(), + func() { + _, _ = goodPeers[0].CreateStream(suite.ctx, silentNodeAddress) // this call will block + }, + 1*time.Second, + "CreateStream attempt to the unresponsive peer did not block") - var err error - // assert that the same peer can still connect to the other regular peer without being blocked - unittest.AssertReturnsBefore(l.T(), + // requires same peer can still connect to the other regular peer without being blocked + unittest.RequireReturnsBefore(suite.T(), func() { - _, err = goodPeers[0].CreateStream(l.ctx, goodAddrs[1]) + _, err := goodPeers[0].CreateStream(suite.ctx, goodAddrs[1]) + require.NoError(suite.T(), err) }, - 10*time.Millisecond) - assert.NoError(l.T(), err) + 1*time.Second, "creating stream to a responsive node failed while concurrently blocked on unresponsive node") - // assert that the CreateStream call to the unresponsive node was blocked while we attempted the CreateStream to the + // requires the CreateStream call to the unresponsive node was blocked while we attempted the CreateStream to the // good address - select { - case <-ch: - assert.Fail(l.T(), "CreateStream attempt to the unresponsive peer did not block") - default: - } + unittest.RequireNeverClosedWithin(suite.T(), blockedCallCh, 1*time.Millisecond, + "CreateStream attempt to the unresponsive peer did not block after connecting to good node") + } // TestCreateStreamIsConcurrencySafe tests that the CreateStream is concurrency safe -func (l *LibP2PNodeTestSuite) TestCreateStreamIsConcurrencySafe() { +func (suite *LibP2PNodeTestSuite) TestCreateStreamIsConcurrencySafe() { // create two nodes - peers, addrs := l.CreateNodes(2, nil, false) - defer l.StopNodes(peers) - require.Len(l.T(), addrs, 2) + peers, addrs := suite.CreateNodes(2, nil, false) + defer suite.StopNodes(peers) + require.Len(suite.T(), addrs, 2) wg := sync.WaitGroup{} @@ -409,10 +397,11 @@ func (l *LibP2PNodeTestSuite) TestCreateStreamIsConcurrencySafe() { createStream := func() { <-gate - _, err := peers[0].CreateStream(l.ctx, addrs[1]) - assert.NoError(l.T(), err) // assert that stream was successfully created + _, err := peers[0].CreateStream(suite.ctx, addrs[1]) + assert.NoError(suite.T(), err) // assert that stream was successfully created wg.Done() } + // kick off 10 concurrent calls to CreateStream for i := 0; i < 10; i++ { wg.Add(1) @@ -422,11 +411,11 @@ func (l *LibP2PNodeTestSuite) TestCreateStreamIsConcurrencySafe() { close(gate) // no call should block - unittest.AssertReturnsBefore(l.T(), wg.Wait, 10*time.Millisecond) + unittest.AssertReturnsBefore(suite.T(), wg.Wait, 10*time.Second) } // TestStreamClosing tests 1-1 communication with streams closed using libp2p2 handler.FullClose -func (l *LibP2PNodeTestSuite) TestStreamClosing() { +func (suite *LibP2PNodeTestSuite) TestStreamClosing() { count := 10 ch := make(chan string, count) @@ -445,9 +434,9 @@ func (l *LibP2PNodeTestSuite) TestStreamClosing() { s.Close() return } - assert.Fail(l.T(), fmt.Sprintf("received error %v", err)) + assert.Fail(suite.T(), fmt.Sprintf("received error %v", err)) err = s.Reset() - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) return } select { @@ -461,50 +450,50 @@ func (l *LibP2PNodeTestSuite) TestStreamClosing() { } // Creates peers - peers, addrs := l.CreateNodes(2, handler, false) - defer l.StopNodes(peers) + peers, addrs := suite.CreateNodes(2, handler, false) + defer suite.StopNodes(peers) for i := 0; i < count; i++ { // Create stream from node 1 to node 2 (reuse if one already exists) s, err := peers[0].CreateStream(context.Background(), addrs[1]) - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) w := bufio.NewWriter(s) // Send message from node 1 to 2 msg := fmt.Sprintf("hello%d\n", i) _, err = w.WriteString(msg) - assert.NoError(l.T(), err) + assert.NoError(suite.T(), err) // Flush the stream - assert.NoError(l.T(), w.Flush()) + assert.NoError(suite.T(), w.Flush()) wg := sync.WaitGroup{} wg.Add(1) go func(s network.Stream) { defer wg.Done() // close the stream err := helpers.FullClose(s) - require.NoError(l.T(), err) + require.NoError(suite.T(), err) }(s) // wait for stream to be closed wg.Wait() // wait for the message to be received - select { - case rcv := <-ch: - require.Equal(l.T(), msg, rcv) - case <-time.After(10 * time.Second): - require.Fail(l.T(), fmt.Sprintf("message %s not received", msg)) - break - } + unittest.RequireReturnsBefore(suite.T(), + func() { + rcv := <-ch + require.Equal(suite.T(), msg, rcv) + }, + 10*time.Second, + fmt.Sprintf("message %s not received", msg)) } } // TestPing tests that a node can ping another node -func (l *LibP2PNodeTestSuite) TestPing() { +func (suite *LibP2PNodeTestSuite) TestPing() { // creates two nodes - nodes, nodeAddr := l.CreateNodes(2, nil, false) - defer l.StopNodes(nodes) + nodes, nodeAddr := suite.CreateNodes(2, nil, false) + defer suite.StopNodes(nodes) node1 := nodes[0] node2 := nodes[1] @@ -512,75 +501,75 @@ func (l *LibP2PNodeTestSuite) TestPing() { node2Addr := nodeAddr[1] // test node1 can ping node 2 - _, err := node1.Ping(l.ctx, node2Addr) - require.NoError(l.T(), err) + _, err := node1.Ping(suite.ctx, node2Addr) + require.NoError(suite.T(), err) // test node 2 can ping node 1 - _, err = node2.Ping(l.ctx, node1Addr) - require.NoError(l.T(), err) + _, err = node2.Ping(suite.ctx, node1Addr) + require.NoError(suite.T(), err) } // TestConnectionGating tests node allow listing by peer.ID -func (l *LibP2PNodeTestSuite) TestConnectionGating() { +func (suite *LibP2PNodeTestSuite) TestConnectionGating() { // create 2 nodes - nodes, nodeAddrs := l.CreateNodes(2, nil, true) + nodes, nodeAddrs := suite.CreateNodes(2, nil, true) node1 := nodes[0] node1Addr := nodeAddrs[0] - defer l.StopNode(node1) + defer suite.StopNode(node1) node2 := nodes[1] node2Addr := nodeAddrs[1] - defer l.StopNode(node2) + defer suite.StopNode(node2) requireError := func(err error) { - require.Error(l.T(), err) - require.True(l.T(), errors.Is(err, swarm.ErrGaterDisallowedConnection)) + require.Error(suite.T(), err) + require.True(suite.T(), errors.Is(err, swarm.ErrGaterDisallowedConnection)) } - l.Run("outbound connection to a not-allowed node is rejected", func() { + suite.Run("outbound connection to a not-allowed node is rejected", func() { // node1 and node2 both have no allowListed peers - _, err := node1.CreateStream(l.ctx, node2Addr) + _, err := node1.CreateStream(suite.ctx, node2Addr) requireError(err) - _, err = node2.CreateStream(l.ctx, node1Addr) + _, err = node2.CreateStream(suite.ctx, node1Addr) requireError(err) }) - l.Run("inbound connection from an allowed node is rejected", func() { + suite.Run("inbound connection from an allowed node is rejected", func() { // node1 allowlists node2 but node2 does not allowlists node1 err := node1.UpdateAllowlist([]NodeAddress{node2Addr}...) - require.NoError(l.T(), err) + require.NoError(suite.T(), err) // node1 attempts to connect to node2 // node2 should reject the inbound connection - _, err = node1.CreateStream(l.ctx, node2Addr) - require.Error(l.T(), err) + _, err = node1.CreateStream(suite.ctx, node2Addr) + require.Error(suite.T(), err) }) - l.Run("outbound connection to an approved node is allowed", func() { + suite.Run("outbound connection to an approved node is allowed", func() { // node1 allowlists node2 err := node1.UpdateAllowlist([]NodeAddress{node2Addr}...) - require.NoError(l.T(), err) + require.NoError(suite.T(), err) // node2 allowlists node1 err = node2.UpdateAllowlist([]NodeAddress{node1Addr}...) - require.NoError(l.T(), err) + require.NoError(suite.T(), err) // node1 should be allowed to connect to node2 - _, err = node1.CreateStream(l.ctx, node2Addr) - require.NoError(l.T(), err) + _, err = node1.CreateStream(suite.ctx, node2Addr) + require.NoError(suite.T(), err) // node2 should be allowed to connect to node1 - _, err = node2.CreateStream(l.ctx, node1Addr) - require.NoError(l.T(), err) + _, err = node2.CreateStream(suite.ctx, node1Addr) + require.NoError(suite.T(), err) }) } // CreateNodes creates a number of libp2pnodes equal to the count with the given callback function for stream handling // it also asserts the correctness of nodes creations // a single error in creating one node terminates the entire test -func (l *LibP2PNodeTestSuite) CreateNodes(count int, handler network.StreamHandler, allowList bool) ([]*P2PNode, []NodeAddress) { +func (suite *LibP2PNodeTestSuite) CreateNodes(count int, handler network.StreamHandler, allowList bool) ([]*P2PNode, []NodeAddress) { // keeps track of errors on creating a node var err error var nodes []*P2PNode @@ -588,7 +577,7 @@ func (l *LibP2PNodeTestSuite) CreateNodes(count int, handler network.StreamHandl defer func() { if err != nil && nodes != nil { // stops all nodes upon an error in starting even one single node - l.StopNodes(nodes) + suite.StopNodes(nodes) } }() @@ -598,17 +587,17 @@ func (l *LibP2PNodeTestSuite) CreateNodes(count int, handler network.StreamHandl name := fmt.Sprintf("node%d", i+1) pkey, err := generateNetworkingKey(name) - require.NoError(l.Suite.T(), err) + require.NoError(suite.T(), err) // create a node on localhost with a random port assigned by the OS - n, nodeID := l.CreateNode(name, pkey, "0.0.0.0", "0", rootBlockID, handler, allowList) + n, nodeID := suite.CreateNode(name, pkey, "0.0.0.0", "0", rootBlockID, handler, allowList) nodes = append(nodes, n) nodeAddrs = append(nodeAddrs, nodeID) } return nodes, nodeAddrs } -func (l *LibP2PNodeTestSuite) CreateNode(name string, key crypto.PrivKey, ip string, port string, rootID string, +func (suite *LibP2PNodeTestSuite) CreateNode(name string, key crypto.PrivKey, ip string, port string, rootID string, handler network.StreamHandler, allowList bool) (*P2PNode, NodeAddress) { n := &P2PNode{} nodeID := NodeAddress{ @@ -627,30 +616,30 @@ func (l *LibP2PNodeTestSuite) CreateNode(name string, key crypto.PrivKey, ip str handlerFunc = func(network.Stream) {} } - err := n.Start(l.ctx, nodeID, l.logger, key, handlerFunc, rootID, allowList, nil) - require.NoError(l.T(), err) - require.Eventuallyf(l.T(), func() bool { + err := n.Start(suite.ctx, nodeID, suite.logger, key, handlerFunc, rootID, allowList, nil) + require.NoError(suite.T(), err) + require.Eventuallyf(suite.T(), func() bool { ip, p, err := n.GetIPPort() return err == nil && ip != "" && p != "" }, 3*time.Second, tickForAssertEventually, fmt.Sprintf("could not start node %s", name)) // get the actual IP and port that have been assigned by the subsystem nodeID.IP, nodeID.Port, err = n.GetIPPort() - require.NoError(l.T(), err) + require.NoError(suite.T(), err) return n, nodeID } // StopNodes stop all nodes in the input slice -func (l *LibP2PNodeTestSuite) StopNodes(nodes []*P2PNode) { +func (suite *LibP2PNodeTestSuite) StopNodes(nodes []*P2PNode) { for _, n := range nodes { - l.StopNode(n) + suite.StopNode(n) } } -func (l *LibP2PNodeTestSuite) StopNode(node *P2PNode) { +func (suite *LibP2PNodeTestSuite) StopNode(node *P2PNode) { done, err := node.Stop() - assert.NoError(l.Suite.T(), err) + assert.NoError(suite.T(), err) <-done } diff --git a/network/gossip/libp2p/middleware.go b/network/gossip/libp2p/middleware.go index 9eee145a508..f90d6cddde8 100644 --- a/network/gossip/libp2p/middleware.go +++ b/network/gossip/libp2p/middleware.go @@ -191,10 +191,13 @@ func (m *Middleware) Start(ov middleware.Overlay) error { if err != nil { return fmt.Errorf("failed to create libp2pConnector: %w", err) } + m.peerManager = NewPeerManager(m.ctx, m.log, m.ov.Topology, libp2pConnector) - err = m.peerManager.Start() - if err != nil { - return fmt.Errorf("failed to start peer manager: %w", err) + select { + case <-m.peerManager.Ready(): + m.log.Debug().Msg("peer manager successfully started") + case <-time.After(30 * time.Second): + return fmt.Errorf("could not start peer manager") } // the ip,port may change after libp2p has been started. e.g. 0.0.0.0:0 would change to an actual IP and port @@ -208,13 +211,17 @@ func (m *Middleware) Start(ov middleware.Overlay) error { // Stop will end the execution of the middleware and wait for it to end. func (m *Middleware) Stop() { - // stop libp2p + // stops peer manager + <-m.peerManager.Done() + m.log.Debug().Msg("peer manager successfully stopped") + + // stops libp2p done, err := m.libP2PNode.Stop() if err != nil { - m.log.Error().Err(err).Msg("stopping failed") + m.log.Error().Err(err).Msg("could not stop libp2p node") } else { <-done - m.log.Debug().Msg("node stopped successfully") + m.log.Debug().Msg("libp2p node successfully stopped") } // cancel the context (this also signals any lingering libp2p go routines to exit) diff --git a/network/gossip/libp2p/peerManager.go b/network/gossip/libp2p/peerManager.go index bec50dd503b..f210fb5fa60 100644 --- a/network/gossip/libp2p/peerManager.go +++ b/network/gossip/libp2p/peerManager.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" ) @@ -31,6 +32,7 @@ var PeerUpdateInterval = 1 * time.Minute // PeerManager adds and removes connections to peers periodically and on request type PeerManager struct { + unit *engine.Unit ctx context.Context logger zerolog.Logger idsProvider func() (flow.IdentityList, error) // callback to retrieve list of peers to connect to @@ -42,9 +44,11 @@ type PeerManager struct { // NewPeerManager creates a new peer manager which calls the idsProvider callback to get a list of peers to connect to // and it uses the connector to actually connect or disconnect from peers. -func NewPeerManager(ctx context.Context, logger zerolog.Logger, idsProvider func() (flow.IdentityList, error), connector Connector) *PeerManager { +func NewPeerManager(ctx context.Context, logger zerolog.Logger, idsProvider func() (flow.IdentityList, error), + connector Connector) *PeerManager { return &PeerManager{ ctx: ctx, + unit: engine.NewUnit(), logger: logger, idsProvider: idsProvider, connector: connector, @@ -52,39 +56,31 @@ func NewPeerManager(ctx context.Context, logger zerolog.Logger, idsProvider func } } -// Start kicks off the ambient periodic connection updates -func (pm *PeerManager) Start() error { - go pm.updateLoop() - go pm.periodicUpdate() - return nil +// Ready kicks off the ambient periodic connection updates. +func (pm *PeerManager) Ready() <-chan struct{} { + // makes sure that peer update request is invoked + // once before returning + pm.RequestPeerUpdate() + + // also starts running it periodically + pm.unit.LaunchPeriodically(pm.RequestPeerUpdate, PeerUpdateInterval, time.Duration(0)) + + pm.unit.Launch(pm.updateLoop) + + return pm.unit.Ready() +} + +func (pm *PeerManager) Done() <-chan struct{} { + return pm.unit.Done() } // updateLoop triggers an update peer request when it has been requested func (pm *PeerManager) updateLoop() { for { select { - case <-pm.ctx.Done(): - return case <-pm.peerRequestQ: pm.updatePeers() - } - } -} - -// updateLoop request periodic connection update -func (pm *PeerManager) periodicUpdate() { - - // request initial discovery - pm.RequestPeerUpdate() - - ticker := time.NewTicker(PeerUpdateInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - pm.RequestPeerUpdate() - case <-pm.ctx.Done(): + case <-pm.unit.Quit(): return } } diff --git a/network/gossip/libp2p/peerManager_test.go b/network/gossip/libp2p/peerManager_test.go index 26d3a9106f0..4c4271034a6 100644 --- a/network/gossip/libp2p/peerManager_test.go +++ b/network/gossip/libp2p/peerManager_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + golog "github.com/ipfs/go-log" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" testifymock "github.com/stretchr/testify/mock" @@ -29,14 +30,15 @@ func TestPeerManagerTestSuite(t *testing.T) { suite.Run(t, new(PeerManagerTestSuite)) } -func (ts *PeerManagerTestSuite) SetupTest() { - ts.log = ts.log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() - ts.ctx = context.Background() +func (suite *PeerManagerTestSuite) SetupTest() { + suite.log = zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) + golog.SetAllLoggers(golog.LevelError) + suite.ctx = context.Background() } // TestUpdatePeers tests that updatePeers calls the connector with the expected list of ids to connect and disconnect // from. The tests are cumulative and ordered. -func (ts *PeerManagerTestSuite) TestUpdatePeers() { +func (suite *PeerManagerTestSuite) TestUpdatePeers() { // create some test ids currentIDs := unittest.IdentityListFixture(10) @@ -51,59 +53,59 @@ func (ts *PeerManagerTestSuite) TestUpdatePeers() { // create the connector mock to check ids requested for connect and disconnect connector := new(mock.Connector) - connector.On("ConnectPeers", ts.ctx, testifymock.AnythingOfType("flow.IdentityList")). + connector.On("ConnectPeers", suite.ctx, testifymock.AnythingOfType("flow.IdentityList")). Run(func(args testifymock.Arguments) { idArg := args[1].(flow.IdentityList) - assertListsEqual(ts.T(), currentIDs, idArg) + assertListsEqual(suite.T(), currentIDs, idArg) }). Return(nil) - connector.On("DisconnectPeers", ts.ctx, testifymock.AnythingOfType("flow.IdentityList")). + connector.On("DisconnectPeers", suite.ctx, testifymock.AnythingOfType("flow.IdentityList")). Run(func(args testifymock.Arguments) { idArg := args[1].(flow.IdentityList) - assertListsEqual(ts.T(), extraIDs, idArg) + assertListsEqual(suite.T(), extraIDs, idArg) // assert that ids passed to disconnect have no id in common with those passed to connect - assertListsDisjoint(ts.T(), currentIDs, extraIDs) + assertListsDisjoint(suite.T(), currentIDs, extraIDs) }). Return(nil) // create the peer manager (but don't start it) - pm := NewPeerManager(ts.ctx, ts.log, idProvider, connector) + pm := NewPeerManager(suite.ctx, suite.log, idProvider, connector) // very first call to updatepeer - ts.Run("updatePeers only connects to all peers the first time", func() { + suite.Run("updatePeers only connects to all peers the first time", func() { pm.updatePeers() - connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 1) - connector.AssertNotCalled(ts.T(), "DisconnectPeers") + connector.AssertNumberOfCalls(suite.T(), "ConnectPeers", 1) + connector.AssertNotCalled(suite.T(), "DisconnectPeers") }) // a subsequent call to updatepeer should request a connect to existing ids and new ids - ts.Run("updatePeers connects to old and new peers", func() { + suite.Run("updatePeers connects to old and new peers", func() { // create a new id newIDs := unittest.IdentityListFixture(1) currentIDs = append(currentIDs, newIDs...) pm.updatePeers() - connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 2) - connector.AssertNotCalled(ts.T(), "DisconnectPeers") + connector.AssertNumberOfCalls(suite.T(), "ConnectPeers", 2) + connector.AssertNotCalled(suite.T(), "DisconnectPeers") }) // when ids are excluded, they should be requested to be disconnected - ts.Run("updatePeers disconnects from extra peers", func() { + suite.Run("updatePeers disconnects from extra peers", func() { // delete an id extraIDs = currentIDs.Sample(1) currentIDs = currentIDs.Filter(filter.Not(filter.In(extraIDs))) pm.updatePeers() - connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 3) - connector.AssertNumberOfCalls(ts.T(), "DisconnectPeers", 1) + connector.AssertNumberOfCalls(suite.T(), "ConnectPeers", 3) + connector.AssertNumberOfCalls(suite.T(), "DisconnectPeers", 1) }) // addition and deletion of ids should result in appropriate connect and disconnect calls - ts.Run("updatePeers connects to new peers and disconnects from extra peers", func() { + suite.Run("updatePeers connects to new peers and disconnects from extra peers", func() { // remove a couple of ids extraIDs = currentIDs.Sample(2) currentIDs = currentIDs.Filter(filter.Not(filter.In(extraIDs))) @@ -114,108 +116,124 @@ func (ts *PeerManagerTestSuite) TestUpdatePeers() { pm.updatePeers() - connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 4) - connector.AssertNumberOfCalls(ts.T(), "DisconnectPeers", 2) + connector.AssertNumberOfCalls(suite.T(), "ConnectPeers", 4) + connector.AssertNumberOfCalls(suite.T(), "DisconnectPeers", 2) }) } -// TestPeriodicPeerUpdate tests that the peermanager runs periodically -func (ts *PeerManagerTestSuite) TestPeriodicPeerUpdate() { +// TestPeriodicPeerUpdate tests that the peer manager runs periodically +func (suite *PeerManagerTestSuite) TestPeriodicPeerUpdate() { currentIDs := unittest.IdentityListFixture(10) idProvider := func() (flow.IdentityList, error) { return currentIDs, nil } connector := new(mock.Connector) - connector.On("ConnectPeers", ts.ctx, testifymock.Anything).Return(nil) - connector.On("DisconnectPeers", ts.ctx, testifymock.Anything).Return(nil) - pm := NewPeerManager(ts.ctx, ts.log, idProvider, connector) - + wg := &sync.WaitGroup{} // keeps track of number of calls on `ConnectPeers` + mu := &sync.Mutex{} // provides mutual exclusion on calls to `ConnectPeers` + count := 0 + times := 2 // we expect it to be called twice at least + wg.Add(times) + connector.On("ConnectPeers", suite.ctx, testifymock.Anything).Run(func(args testifymock.Arguments) { + mu.Lock() + defer mu.Unlock() + + if count < times { + count++ + wg.Done() + } + }).Return(nil) + connector.On("DisconnectPeers", suite.ctx, testifymock.Anything).Return(nil) + pm := NewPeerManager(suite.ctx, suite.log, idProvider, connector) PeerUpdateInterval = 5 * time.Millisecond - err := pm.Start() - assert.NoError(ts.T(), err) - assert.Eventually(ts.T(), func() bool { - return connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 2) - }, 2*PeerUpdateInterval+4*time.Millisecond, 2*PeerUpdateInterval) + unittest.RequireClosesBefore(suite.T(), pm.Ready(), 2*time.Second) + + unittest.RequireReturnsBefore(suite.T(), wg.Wait, 2*PeerUpdateInterval, + "ConnectPeers is not running on UpdateIntervals") } // TestOnDemandPeerUpdate tests that the a peer update can be requested on demand and in between the periodic runs -func (ts *PeerManagerTestSuite) TestOnDemandPeerUpdate() { +func (suite *PeerManagerTestSuite) TestOnDemandPeerUpdate() { currentIDs := unittest.IdentityListFixture(10) idProvider := func() (flow.IdentityList, error) { return currentIDs, nil } + // chooses peer interval rate deliberately long to capture on demand peer update + PeerUpdateInterval = time.Hour + + // creates mock connector + wg := &sync.WaitGroup{} // keeps track of number of calls on `ConnectPeers` + mu := &sync.Mutex{} // provides mutual exclusion on calls to `ConnectPeers` + count := 0 + times := 2 // we expect it to be called twice overall + wg.Add(1) // this accounts for one invocation, the other invocation is subsequent connector := new(mock.Connector) - connector.On("ConnectPeers", ts.ctx, testifymock.Anything).Return(nil) - connector.On("DisconnectPeers", ts.ctx, testifymock.Anything).Return(nil) - pm := NewPeerManager(ts.ctx, ts.log, idProvider, connector) + // captures the first periodic update initiated after start to complete + connector.On("ConnectPeers", suite.ctx, testifymock.Anything).Run(func(args testifymock.Arguments) { + mu.Lock() + defer mu.Unlock() - PeerUpdateInterval = time.Hour - err := pm.Start() - assert.NoError(ts.T(), err) + if count < times { + count++ + wg.Done() + } + }).Return(nil) + connector.On("DisconnectPeers", suite.ctx, testifymock.Anything).Return(nil) + + pm := NewPeerManager(suite.ctx, suite.log, idProvider, connector) + unittest.RequireClosesBefore(suite.T(), pm.Ready(), 2*time.Second) - // wait for the first periodic update initiated after start to complete - assert.Eventually(ts.T(), func() bool { - return connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 1) - }, 10*time.Millisecond, 1*time.Millisecond) + unittest.RequireReturnsBefore(suite.T(), wg.Wait, 1*time.Second, + "ConnectPeers is not running on startup") - // make a request for peer update + // makes a request for peer update + wg.Add(1) // expects a call to `ConnectPeers` by requesting peer update pm.RequestPeerUpdate() // assert that a call to connect to peers is made - assert.Eventually(ts.T(), func() bool { - return connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 2) - }, 10*time.Millisecond, 1*time.Millisecond) + unittest.RequireReturnsBefore(suite.T(), wg.Wait, 1*time.Second, + "ConnectPeers is not running on request") } // TestConcurrentOnDemandPeerUpdate tests that concurrent on-demand peer update request never block -func (ts *PeerManagerTestSuite) TestConcurrentOnDemandPeerUpdate() { +func (suite *PeerManagerTestSuite) TestConcurrentOnDemandPeerUpdate() { currentIDs := unittest.IdentityListFixture(10) idProvider := func() (flow.IdentityList, error) { return currentIDs, nil } - ctx, cancel := context.WithCancel(ts.ctx) + ctx, cancel := context.WithCancel(suite.ctx) defer cancel() connector := new(mock.Connector) // connectPeerGate channel gates the return of the connector connectPeerGate := make(chan time.Time) defer close(connectPeerGate) - connector.On("ConnectPeers", ctx, testifymock.Anything).Return(nil).WaitUntil(connectPeerGate) - connector.On("DisconnectPeers", ctx, testifymock.Anything).Return(nil) - pm := NewPeerManager(ctx, ts.log, idProvider, connector) + connector.On("ConnectPeers", ctx, testifymock.Anything).Return(nil). + WaitUntil(connectPeerGate) // blocks call for connectPeerGate channel + connector.On("DisconnectPeers", ctx, testifymock.Anything).Return(nil) + pm := NewPeerManager(ctx, suite.log, idProvider, connector) // set the periodic interval to a high value so that periodic runs don't interfere with this test PeerUpdateInterval = time.Hour // start the peer manager // this should trigger the first update and which will block on the ConnectPeers to return - err := pm.Start() - assert.NoError(ts.T(), err) - - // make 10 concurrent request for peer update - wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - pm.RequestPeerUpdate() - wg.Done() - }() - } + unittest.RequireClosesBefore(suite.T(), pm.Ready(), 2*time.Second) - // assert that none of the request is blocked even if update is blocked - unittest.AssertReturnsBefore(ts.T(), wg.Wait, time.Second) + // makes 10 concurrent request for peer update + unittest.RequireConcurrentCallsReturnBefore(suite.T(), pm.RequestPeerUpdate, 10, time.Second, + "concurrent peer update requests could not return on time") - // allow the first update to finish + // allow the first (periodic/on-demand) and the second (on-demand) update to finish connectPeerGate <- time.Now() - // assert that only two calls to ConnectPeers were made (one for periodic update and one for the on-demand request) - assert.Eventually(ts.T(), func() bool { - return connector.AssertNumberOfCalls(ts.T(), "ConnectPeers", 2) - }, 10*time.Millisecond, 1*time.Millisecond) + // requires two calls to ConnectPeers were made + assert.Eventually(suite.T(), func() bool { + return connector.AssertNumberOfCalls(suite.T(), "ConnectPeers", 2) + }, 3*time.Second, 100*time.Millisecond) } // assertListsEqual asserts that two identity list are equal ignoring the order diff --git a/network/gossip/libp2p/test/echoengine_test.go b/network/gossip/libp2p/test/echoengine_test.go index 2debf46896f..2fb3617c7fa 100644 --- a/network/gossip/libp2p/test/echoengine_test.go +++ b/network/gossip/libp2p/test/echoengine_test.go @@ -10,7 +10,6 @@ import ( golog "github.com/ipfs/go-log" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -42,8 +41,9 @@ func TestStubEngineTestSuite(t *testing.T) { func (s *EchoEngineTestSuite) SetupTest() { const count = 2 + + logger := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) golog.SetAllLoggers(golog.LevelError) - logger := log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() s.ids, s.mws, s.nets = generateIDsMiddlewaresNetworks(s.T(), count, logger, 100, nil, false) } diff --git a/network/gossip/libp2p/test/epochtransition_test.go b/network/gossip/libp2p/test/epochtransition_test.go index 5622d15e5e3..83237e50c0d 100644 --- a/network/gossip/libp2p/test/epochtransition_test.go +++ b/network/gossip/libp2p/test/epochtransition_test.go @@ -10,7 +10,6 @@ import ( golog "github.com/ipfs/go-log" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -48,8 +47,8 @@ func TestEpochTransitionTestSuite(t *testing.T) { func (ts *EpochTransitionTestSuite) SetupTest() { rand.Seed(time.Now().UnixNano()) nodeCount := 10 + ts.logger = zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) golog.SetAllLoggers(golog.LevelError) - ts.logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() // create ids ids, mws := generateIDsAndMiddlewares(ts.T(), nodeCount, ts.logger) @@ -106,7 +105,6 @@ func (ts *EpochTransitionTestSuite) TearDownTest() { // TestNewNodeAdded tests that an additional node in the next epoch gets connected to other nodes and can exchange messages // in the current epoch func (ts *EpochTransitionTestSuite) TestNewNodeAdded() { - // create the id, middleware and network for a new node ids, mws, nets := generateIDsMiddlewaresNetworks(ts.T(), 1, ts.logger, 100, nil, false) newMiddleware := mws[0] @@ -143,7 +141,6 @@ func (ts *EpochTransitionTestSuite) TestNewNodeAdded() { // TestNodeRemoved tests that a node that is removed in the next epoch remains connected for the current epoch func (ts *EpochTransitionTestSuite) TestNodeRemoved() { - // choose a random node to remove removeIndex := rand.Intn(len(ts.ids)) removedID := ts.ids[removeIndex] diff --git a/network/gossip/libp2p/test/meshengine_test.go b/network/gossip/libp2p/test/meshengine_test.go index e5f4a178cd9..bf5bdfe4461 100644 --- a/network/gossip/libp2p/test/meshengine_test.go +++ b/network/gossip/libp2p/test/meshengine_test.go @@ -12,7 +12,6 @@ import ( golog "github.com/ipfs/go-log" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -45,8 +44,8 @@ func TestMeshNetTestSuite(t *testing.T) { func (m *MeshEngineTestSuite) SetupTest() { // defines total number of nodes in our network (minimum 3 needed to use 1-k messaging) const count = 10 + logger := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) golog.SetAllLoggers(golog.LevelError) - logger := log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() var err error m.ids, m.mws, m.nets = generateIDsMiddlewaresNetworks(m.T(), count, logger, 100, nil, false) require.NoError(m.Suite.T(), err) diff --git a/network/gossip/libp2p/test/middleware_test.go b/network/gossip/libp2p/test/middleware_test.go index 1011989d003..7f9dc35ce6b 100644 --- a/network/gossip/libp2p/test/middleware_test.go +++ b/network/gossip/libp2p/test/middleware_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" + golog "github.com/ipfs/go-log" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" mockery "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -41,7 +41,8 @@ func TestMiddlewareTestSuit(t *testing.T) { // SetupTest initiates the test setups prior to each test func (m *MiddlewareTestSuite) SetupTest() { - logger := log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() + logger := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) + golog.SetAllLoggers(golog.LevelError) m.size = 2 // operates on two middlewares diff --git a/network/gossip/libp2p/test/portallocator.go b/network/gossip/libp2p/test/portallocator.go new file mode 100644 index 00000000000..f1994e166d7 --- /dev/null +++ b/network/gossip/libp2p/test/portallocator.go @@ -0,0 +1,48 @@ +package test + +import ( + "sync" + "testing" + + "github.com/phayes/freeport" + "github.com/stretchr/testify/require" +) + +// portAllocator is a test helper type keeping track of allocated free ports for testing. +type portAllocator struct { + sync.Mutex + allocatedPorts map[int]struct{} // keeps track of ports allocated to different tests +} + +func newPortAllocator() *portAllocator { + return &portAllocator{ + allocatedPorts: make(map[int]struct{}), + } +} + +// getFreePorts finds `n` free ports on the machine and marks them as allocated. +func (p *portAllocator) getFreePorts(t *testing.T, n int) []int { + p.Lock() + defer p.Unlock() + + ports := make([]int, n) + // keeps track of discovered ports + for count := 0; count < n; { + // get free ports + freePorts, err := freeport.GetFreePorts(1) + require.NoError(t, err) + port := freePorts[0] + + if _, ok := p.allocatedPorts[port]; ok { + // port has already been allocated + continue + } + + // records port address and mark it as allocated + ports[count] = port + p.allocatedPorts[port] = struct{}{} + count++ + } + + return ports +} diff --git a/network/gossip/libp2p/test/testUtil.go b/network/gossip/libp2p/test/testUtil.go index d56e28db61f..3abc911a623 100644 --- a/network/gossip/libp2p/test/testUtil.go +++ b/network/gossip/libp2p/test/testUtil.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/phayes/freeport" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -24,16 +23,22 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -var rootBlockID = unittest.IdentifierFixture().String() +var rootBlockID string + +var allocator *portAllocator + +// init is a built-in golang function getting called first time this +// initialize the allocated ports map and the root block ID +func init() { + allocator = newPortAllocator() + rootBlockID = unittest.IdentifierFixture().String() +} // generateIDs generate flow Identities with a valid port and networking key func generateIDs(t *testing.T, n int) (flow.IdentityList, []crypto.PrivateKey) { identities := make([]*flow.Identity, n) privateKeys := make([]crypto.PrivateKey, n) - - // get free ports - freePorts, err := freeport.GetFreePorts(n) - require.NoError(t, err) + freePorts := allocator.getFreePorts(t, n) for i := 0; i < n; i++ { diff --git a/network/gossip/libp2p/test/topology_test.go b/network/gossip/libp2p/test/topology_test.go index c1d8ad70969..02f4608d05c 100644 --- a/network/gossip/libp2p/test/topology_test.go +++ b/network/gossip/libp2p/test/topology_test.go @@ -6,6 +6,7 @@ import ( "sort" "testing" + golog "github.com/ipfs/go-log" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" @@ -31,7 +32,8 @@ func TestNetworkTestSuit(t *testing.T) { func (n *TopologyTestSuite) TestTopologySize() { totalNodes := 100 - logger := log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() + logger := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) + golog.SetAllLoggers(golog.LevelError) // create totalNodes number of networks _, _, nets := generateIDsMiddlewaresNetworks(n.T(), totalNodes, logger, 100, nil, true) diff --git a/utils/unittest/unittest.go b/utils/unittest/unittest.go index e5cf19943c6..6f403c361cb 100644 --- a/utils/unittest/unittest.go +++ b/utils/unittest/unittest.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "os" "strings" + "sync" "testing" "time" @@ -47,8 +48,18 @@ func AssertReturnsBefore(t *testing.T, f func(), duration time.Duration) { func AssertClosesBefore(t *testing.T, done <-chan struct{}, duration time.Duration) { select { case <-time.After(duration): - t.Log("channel did not return in time") - t.Fail() + assert.Fail(t, "channel did not return in time") + case <-done: + return + } +} + +// RequireClosesBefore requires that the given channel closes before the +// duration expires. +func RequireClosesBefore(t *testing.T, done <-chan struct{}, duration time.Duration) { + select { + case <-time.After(duration): + require.Fail(t, "channel did not return in time") case <-done: return } @@ -66,12 +77,63 @@ func RequireReturnsBefore(t testing.TB, f func(), duration time.Duration, messag select { case <-time.After(duration): - require.Fail(t, "function did not return in time: "+message) + require.Fail(t, "function did not return on time: "+message) case <-done: return } } +// RequireConcurrentCallsReturnBefore is a test helper that runs function `f` count-many times concurrently, +// and requires all invocations to return within duration. +func RequireConcurrentCallsReturnBefore(t *testing.T, f func(), count int, duration time.Duration, message string) { + wg := &sync.WaitGroup{} + for i := 0; i < count; i++ { + wg.Add(1) + go func() { + f() + wg.Done() + }() + } + + RequireReturnsBefore(t, wg.Wait, duration, message) +} + +// RequireNeverReturnBefore is a test helper that tries invoking function `f` and fails the test if either: +// - function `f` is not invoked within 1 second. +// - function `f` returns before specified `duration`. +// +// It also returns a channel that is closed once the function `f` returns and hence its openness can evaluate +// return status of function `f` for intervals longer than duration. +func RequireNeverReturnBefore(t *testing.T, f func(), duration time.Duration, message string) <-chan struct{} { + ch := make(chan struct{}) + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + wg.Done() + f() + close(ch) + }() + + // requires function invoked within next 1 second + RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not invoke the function: "+message) + + // requires function never returns within duration + RequireNeverClosedWithin(t, ch, duration, "unexpected return: "+message) + + return ch +} + +// RequireNeverClosedWithin is a test helper function that fails the test if channel `ch` is closed before the +// determined duration. +func RequireNeverClosedWithin(t *testing.T, ch <-chan struct{}, duration time.Duration, message string) { + select { + case <-time.After(duration): + case <-ch: + require.Fail(t, "channel closed before timeout: "+message) + } +} + // AssertErrSubstringMatch asserts that two errors match with substring // checking on the Error method (`expected` must be a substring of `actual`, to // account for the actual error being wrapped). Fails the test if either error From c33444a070da76adb5fb2a4a78fbe60649aa604b Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Fri, 30 Oct 2020 14:02:42 -0700 Subject: [PATCH 097/105] Linting --- cmd/util/cmd/truncate-database/cmd.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/util/cmd/truncate-database/cmd.go b/cmd/util/cmd/truncate-database/cmd.go index 19a54d124f2..e43fe4caf36 100644 --- a/cmd/util/cmd/truncate-database/cmd.go +++ b/cmd/util/cmd/truncate-database/cmd.go @@ -9,8 +9,7 @@ import ( ) var ( - flagBlockHeight uint64 - flagDatadir string + flagDatadir string ) var Cmd = &cobra.Command{ From 80234228ff58fe0f2ff85b1b9ecd721d9e8f25cf Mon Sep 17 00:00:00 2001 From: Maks Pawlak <120831+m4ksio@users.noreply.github.com> Date: Fri, 30 Oct 2020 14:12:09 -0700 Subject: [PATCH 098/105] Add command --- cmd/util/cmd/root.go | 2 ++ cmd/util/cmd/truncate-database/cmd.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index d112cef061a..be15e018275 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -14,6 +14,7 @@ import ( extract "github.com/onflow/flow-go/cmd/util/cmd/execution-state-extract" "github.com/onflow/flow-go/cmd/util/cmd/find-block" "github.com/onflow/flow-go/cmd/util/cmd/read-execution-state" + truncate_database "github.com/onflow/flow-go/cmd/util/cmd/truncate-database" ) var ( @@ -52,6 +53,7 @@ func addCommands() { rootCmd.AddCommand(find.Cmd) rootCmd.AddCommand(read.Cmd) rootCmd.AddCommand(checkpoint_list_tries.Cmd) + rootCmd.AddCommand(truncate_database.Cmd) } func initConfig() { diff --git a/cmd/util/cmd/truncate-database/cmd.go b/cmd/util/cmd/truncate-database/cmd.go index e43fe4caf36..d18cbe610c4 100644 --- a/cmd/util/cmd/truncate-database/cmd.go +++ b/cmd/util/cmd/truncate-database/cmd.go @@ -1,4 +1,4 @@ -package find +package truncate_database import ( "github.com/rs/zerolog/log" From a08e9852065f46efcc95550bb40f63c02a83377f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 30 Oct 2020 14:55:49 -0700 Subject: [PATCH 099/105] fix tests --- utils/unittest/mocks/protocol_state.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/utils/unittest/mocks/protocol_state.go b/utils/unittest/mocks/protocol_state.go index 989b444520e..ebaea65bd58 100644 --- a/utils/unittest/mocks/protocol_state.go +++ b/utils/unittest/mocks/protocol_state.go @@ -25,6 +25,7 @@ type ProtocolState struct { children map[flow.Identifier][]flow.Identifier heights map[uint64]*flow.Block finalized uint64 + root *flow.Block result *flow.ExecutionResult seal *flow.Seal } @@ -42,6 +43,28 @@ type ProtocolStateMutator struct { ps *ProtocolState } +type Params struct { + state *ProtocolState +} + +func (p *Params) ChainID() (flow.ChainID, error) { + return p.state.root.Header.ChainID, nil +} + +func (p *Params) Root() (*flow.Header, error) { + return p.state.root.Header, nil +} + +func (p *Params) Seal() (*flow.Seal, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ps *ProtocolState) Params() protocol.Params { + return &Params{ + state: ps, + } +} + func (ps *ProtocolState) AtBlockID(blockID flow.Identifier) protocol.Snapshot { ps.Lock() defer ps.Unlock() @@ -123,6 +146,7 @@ func (m *ProtocolStateMutator) Bootstrap(root *flow.Block, result *flow.Executio } m.ps.blocks[root.ID()] = root + m.ps.root = root m.ps.result = result m.ps.seal = seal m.ps.heights[root.Header.Height] = root From c0f837bedf58209e9501682cf3d29bc5f1f2ba17 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 30 Oct 2020 15:41:06 -0700 Subject: [PATCH 100/105] add additional logging --- engine/execution/ingestion/engine.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index f79573ebcaa..9e82ecbdad6 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -544,7 +544,7 @@ func (e *Engine) executeBlock(ctx context.Context, executableBlock *entity.Execu e.metrics.ExecutionGasUsedPerBlock(computationResult.GasUsed) e.metrics.ExecutionStateReadsPerBlock(computationResult.StateReads) - finalState, err := e.handleComputationResult(ctx, computationResult, executableBlock.StartState) + finalState, receipt, err := e.handleComputationResult(ctx, computationResult, executableBlock.StartState) if err != nil { e.log.Err(err). Hex("block_id", logging.Entity(executableBlock)). @@ -554,8 +554,13 @@ func (e *Engine) executeBlock(ctx context.Context, executableBlock *entity.Execu e.log.Info(). Hex("block_id", logging.Entity(executableBlock)). + Hex("parent_block", executableBlock.Block.Header.ParentID[:]). Uint64("block_height", executableBlock.Block.Header.Height). + Int("collections", len(executableBlock.Block.Payload.Guarantees)). + Hex("start_state", executableBlock.StartState). Hex("final_state", finalState). + Hex("receipt_id", logging.Entity(receipt)). + Hex("result_id", logging.Entity(receipt.ExecutionResult)). Msg("block executed") err = e.onBlockExecuted(executableBlock, finalState) @@ -961,7 +966,7 @@ func (e *Engine) handleComputationResult( ctx context.Context, result *execution.ComputationResult, startState flow.StateCommitment, -) (flow.StateCommitment, error) { +) (flow.StateCommitment, *flow.ExecutionReceipt, error) { e.log.Debug(). Hex("block_id", logging.Entity(result.ExecutableBlock)). @@ -980,12 +985,12 @@ func (e *Engine) handleComputationResult( ) if err != nil { - return nil, err + return nil, nil, err } err = e.providerEngine.BroadcastExecutionReceipt(ctx, receipt) if err != nil { - return nil, fmt.Errorf("could not send broadcast order: %w", err) + return nil, nil, fmt.Errorf("could not send broadcast order: %w", err) } finalState, ok := receipt.ExecutionResult.FinalStateCommitment() @@ -993,7 +998,7 @@ func (e *Engine) handleComputationResult( finalState = startState } - return finalState, nil + return finalState, receipt, nil } // save the execution result of a block From c5fb6778b825bc5d6ed39744799bcf9c9b39803f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 2 Nov 2020 13:14:17 -0800 Subject: [PATCH 101/105] locking the read of statecommitment in execution state mock --- utils/unittest/mocks/execution_state.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/utils/unittest/mocks/execution_state.go b/utils/unittest/mocks/execution_state.go index 262301e1ce2..9c53511e111 100644 --- a/utils/unittest/mocks/execution_state.go +++ b/utils/unittest/mocks/execution_state.go @@ -7,7 +7,8 @@ import ( "github.com/stretchr/testify/require" - state "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/engine/execution/state/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" @@ -17,7 +18,7 @@ import ( // simulates some of its behavior for testing purpose type ExecutionState struct { sync.Mutex - state.ExecutionState + mock.ExecutionState commits map[flow.Identifier]flow.StateCommitment } @@ -37,6 +38,8 @@ func (es *ExecutionState) PersistStateCommitment(ctx context.Context, blockID fl } func (es *ExecutionState) StateCommitmentByBlockID(ctx context.Context, blockID flow.Identifier) (flow.StateCommitment, error) { + es.Lock() + defer es.Unlock() commit, ok := es.commits[blockID] if !ok { return nil, storage.ErrNotFound @@ -46,8 +49,9 @@ func (es *ExecutionState) StateCommitmentByBlockID(ctx context.Context, blockID } func (es *ExecutionState) ExecuteBlock(t *testing.T, block *flow.Block) { - _, ok := es.commits[block.Header.ParentID] - require.True(t, ok, "parent block not executed") + parentExecuted, err := state.IsBlockExecuted(context.Background(), es, block.Header.ParentID) + require.NoError(t, err) + require.True(t, parentExecuted, "parent block not executed") require.NoError(t, es.PersistStateCommitment( context.Background(), From f8b644244fb6a4c4ca3f571bd90b82f1757b815f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 2 Nov 2020 15:52:51 -0800 Subject: [PATCH 102/105] fix transaction results operation --- storage/badger/operation/transaction_results.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/badger/operation/transaction_results.go b/storage/badger/operation/transaction_results.go index 24f8b176398..b6b4b0d7275 100644 --- a/storage/badger/operation/transaction_results.go +++ b/storage/badger/operation/transaction_results.go @@ -33,5 +33,5 @@ func LookupTransactionResultsByBlockID(blockID flow.Identifier, txResults *[]flo return check, create, handle } - return traverse(makePrefix(codeEvent, blockID), txErrIterFunc) + return traverse(makePrefix(codeTransactionResult, blockID), txErrIterFunc) } From a9698a95044bf845bcf49954c8b120742f85639a Mon Sep 17 00:00:00 2001 From: Kay-Zee Date: Mon, 2 Nov 2020 18:29:16 -0800 Subject: [PATCH 103/105] resolve merge issue --- engine/execution/ingestion/engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 3344110d73d..e17346f35ac 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -989,12 +989,12 @@ func (e *Engine) handleComputationResult( startState, ) if err != nil { - return nil, fmt.Errorf("could not save execution results: %w", err) + return nil, nil, fmt.Errorf("could not save execution results: %w", err) } receipt, err := e.generateExecutionReceipt(ctx, executionResult, result.StateSnapshots) if err != nil { - return nil, fmt.Errorf("could not generate execution receipt: %w", err) + return nil, nil, fmt.Errorf("could not generate execution receipt: %w", err) } err = func() error { From 868b8940316714953bb4fc04c097ff259dab75b5 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik <67895329+janezpodhostnik@users.noreply.github.com> Date: Tue, 3 Nov 2020 16:29:30 +0100 Subject: [PATCH 104/105] Decouple address generator state from accounts (#88) Decouple address generator state from accounts --- .../read-execution-state/list-accounts/cmd.go | 4 +- fvm/account.go | 2 +- fvm/bootstrap.go | 23 ++++++-- fvm/env.go | 59 +++++++++++-------- fvm/script.go | 5 +- fvm/state/accounts.go | 33 +++-------- fvm/state/accounts_test.go | 42 ++++++++++--- fvm/state/addresses.go | 37 ------------ fvm/state/ledger_address_generator.go | 49 +++++++++++++++ fvm/state/ledger_address_generator_test.go | 48 +++++++++++++++ fvm/transaction.go | 7 ++- fvm/transactionSequenceNum.go | 2 +- fvm/transactionVerifier.go | 2 +- 13 files changed, 209 insertions(+), 104 deletions(-) delete mode 100644 fvm/state/addresses.go create mode 100644 fvm/state/ledger_address_generator.go create mode 100644 fvm/state/ledger_address_generator_test.go diff --git a/cmd/util/cmd/read-execution-state/list-accounts/cmd.go b/cmd/util/cmd/read-execution-state/list-accounts/cmd.go index 58cb16c7d87..930d18a821c 100644 --- a/cmd/util/cmd/read-execution-state/list-accounts/cmd.go +++ b/cmd/util/cmd/read-execution-state/list-accounts/cmd.go @@ -95,9 +95,9 @@ func run(*cobra.Command, []string) { return payload[0].Value, nil }) - accounts := state.NewAccounts(ldg, chain) + accounts := state.NewAccounts(ldg) + finalGenerator, err := state.NewLedgerBoundAddressGenerator(ldg, chain) - finalGenerator, err := accounts.GetAddressGeneratorState() if err != nil { log.Fatal().Err(err).Msgf("cannot get current address state") } diff --git a/fvm/account.go b/fvm/account.go index 481b3f8d180..f7cf61bbe2d 100644 --- a/fvm/account.go +++ b/fvm/account.go @@ -15,7 +15,7 @@ func getAccount( chain flow.Chain, address flow.Address, ) (*flow.Account, error) { - accounts := state.NewAccounts(ledger, chain) + accounts := state.NewAccounts(ledger) account, err := accounts.Get(address) if err != nil { diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index ca5b3cf2f29..7b932159045 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -24,6 +24,7 @@ type BootstrapProcedure struct { // genesis parameters serviceAccountPublicKey flow.AccountPublicKey initialTokenSupply cadence.UFix64 + addressGenerator flow.AddressGenerator } // Bootstrap returns a new BootstrapProcedure instance configured with the provided @@ -44,8 +45,12 @@ func (b *BootstrapProcedure) Run(vm *VirtualMachine, ctx Context, ledger state.L b.ledger = ledger // initialize the account addressing state - b.accounts = state.NewAccounts(ledger, ctx.Chain) - b.accounts.InitAddressGeneratorState() + b.accounts = state.NewAccounts(ledger) + addressGenerator, err := state.NewLedgerBoundAddressGenerator(ledger, ctx.Chain) + if err != nil { + panic(fmt.Sprintf("failed to create address generator: %s", err.Error())) + } + b.addressGenerator = addressGenerator service := b.createServiceAccount(b.serviceAccountPublicKey) @@ -63,7 +68,12 @@ func (b *BootstrapProcedure) Run(vm *VirtualMachine, ctx Context, ledger state.L } func (b *BootstrapProcedure) createAccount() flow.Address { - address, err := b.accounts.Create(nil) + address, err := b.addressGenerator.NextAddress() + if err != nil { + panic(fmt.Sprintf("failed to generate address: %s", err)) + } + + err = b.accounts.Create(nil, address) if err != nil { panic(fmt.Sprintf("failed to create account: %s", err)) } @@ -72,7 +82,12 @@ func (b *BootstrapProcedure) createAccount() flow.Address { } func (b *BootstrapProcedure) createServiceAccount(accountKey flow.AccountPublicKey) flow.Address { - address, err := b.accounts.Create([]flow.AccountPublicKey{accountKey}) + address, err := b.addressGenerator.NextAddress() + if err != nil { + panic(fmt.Sprintf("failed to generate address: %s", err)) + } + + err = b.accounts.Create([]flow.AccountPublicKey{accountKey}, address) if err != nil { panic(fmt.Sprintf("failed to create service account: %s", err)) } diff --git a/fvm/env.go b/fvm/env.go index f8ee8e9ee8c..1eb3585993b 100644 --- a/fvm/env.go +++ b/fvm/env.go @@ -21,10 +21,11 @@ var _ runtime.Interface = &hostEnv{} var _ runtime.HighLevelStorage = &hostEnv{} type hostEnv struct { - ctx Context - ledger state.Ledger - accounts *state.Accounts - uuidGenerator *UUIDGenerator + ctx Context + ledger state.Ledger + accounts *state.Accounts + addressGenerator flow.AddressGenerator + uuidGenerator *UUIDGenerator runtime.Metrics @@ -35,18 +36,23 @@ type hostEnv struct { rng *rand.Rand } -func newEnvironment(ctx Context, ledger state.Ledger) *hostEnv { - accounts := state.NewAccounts(ledger, ctx.Chain) +func newEnvironment(ctx Context, ledger state.Ledger) (*hostEnv, error) { + accounts := state.NewAccounts(ledger) + generator, err := state.NewLedgerBoundAddressGenerator(ledger, ctx.Chain) + if err != nil { + return nil, err + } uuids := state.NewUUIDs(ledger) uuidGenerator := NewUUIDGenerator(uuids) env := &hostEnv{ - ctx: ctx, - ledger: ledger, - Metrics: &noopMetricsCollector{}, - accounts: accounts, - uuidGenerator: uuidGenerator, + ctx: ctx, + ledger: ledger, + Metrics: &noopMetricsCollector{}, + accounts: accounts, + addressGenerator: generator, + uuidGenerator: uuidGenerator, } if ctx.BlockHeader != nil { @@ -57,7 +63,7 @@ func newEnvironment(ctx Context, ledger state.Ledger) *hostEnv { env.Metrics = &metricsCollector{ctx.Metrics} } - return env + return env, nil } func (e *hostEnv) seedRNG(header *flow.Header) { @@ -74,6 +80,7 @@ func (e *hostEnv) setTransaction(vm *VirtualMachine, tx *flow.TransactionBody) { e.ctx, e.ledger, e.accounts, + e.addressGenerator, tx, ) } @@ -341,10 +348,11 @@ func (e *hostEnv) GetSigningAccounts() []runtime.Address { // Transaction Environment type transactionEnv struct { - vm *VirtualMachine - ctx Context - ledger state.Ledger - accounts *state.Accounts + vm *VirtualMachine + ctx Context + ledger state.Ledger + accounts *state.Accounts + addressGenerator flow.AddressGenerator tx *flow.TransactionBody authorizers []runtime.Address @@ -355,14 +363,16 @@ func newTransactionEnv( ctx Context, ledger state.Ledger, accounts *state.Accounts, + addressGenerator flow.AddressGenerator, tx *flow.TransactionBody, ) *transactionEnv { return &transactionEnv{ - vm: vm, - ctx: ctx, - ledger: ledger, - accounts: accounts, - tx: tx, + vm: vm, + ctx: ctx, + ledger: ledger, + accounts: accounts, + addressGenerator: addressGenerator, + tx: tx, } } @@ -399,9 +409,12 @@ func (e *transactionEnv) CreateAccount(payer runtime.Address) (address runtime.A } } - var flowAddress flow.Address + flowAddress, err := e.addressGenerator.NextAddress() + if err != nil { + return address, err + } - flowAddress, err = e.accounts.Create(nil) + err = e.accounts.Create(nil, flowAddress) if err != nil { // TODO: improve error passing https://github.com/onflow/cadence/issues/202 return address, err diff --git a/fvm/script.go b/fvm/script.go index 7c0f3182820..fb324b2ef83 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -71,7 +71,10 @@ func (i ScriptInvocator) Process( proc *ScriptProcedure, ledger state.Ledger, ) error { - env := newEnvironment(ctx, ledger) + env, err := newEnvironment(ctx, ledger) + if err != nil { + return err + } location := runtime.ScriptLocation(proc.ID[:]) diff --git a/fvm/state/accounts.go b/fvm/state/accounts.go index f6d8145a325..6f0c2161dd1 100644 --- a/fvm/state/accounts.go +++ b/fvm/state/accounts.go @@ -26,15 +26,11 @@ func keyPublicKey(index uint64) string { type Accounts struct { ledger Ledger - *addresses } -func NewAccounts(ledger Ledger, chain flow.Chain) *Accounts { - addresses := newAddresses(ledger, chain) - +func NewAccounts(ledger Ledger) *Accounts { return &Accounts{ - ledger: ledger, - addresses: addresses, + ledger: ledger, } } @@ -83,17 +79,14 @@ func (a *Accounts) Exists(address flow.Address) (bool, error) { return false, nil } -func (a *Accounts) Create(publicKeys []flow.AccountPublicKey) (flow.Address, error) { - addressState, err := a.addresses.GetAddressGeneratorState() +// Create account sets all required registers on an address. +func (a *Accounts) Create(publicKeys []flow.AccountPublicKey, newAddress flow.Address) error { + exists, err := a.Exists(newAddress) if err != nil { - return flow.EmptyAddress, err + return err } - - // generate the new account address - var newAddress flow.Address - newAddress, err = addressState.NextAddress() - if err != nil { - return flow.EmptyAddress, err + if exists { + return fmt.Errorf("account with address %s already exists", newAddress.Hex()) } // mark that this account exists @@ -101,15 +94,7 @@ func (a *Accounts) Create(publicKeys []flow.AccountPublicKey) (flow.Address, err a.ledger.Set(string(newAddress.Bytes()), string(newAddress.Bytes()), keyCode, nil) - err = a.SetAllPublicKeys(newAddress, publicKeys) - if err != nil { - return flow.EmptyAddress, err - } - - // update the address state - a.addresses.SetAddressGeneratorState(addressState) - - return newAddress, nil + return a.SetAllPublicKeys(newAddress, publicKeys) } func (a *Accounts) GetPublicKey(address flow.Address, keyIndex uint64) (flow.AccountPublicKey, error) { diff --git a/fvm/state/accounts_test.go b/fvm/state/accounts_test.go index 2bcaaf803e0..73976ef0a1e 100644 --- a/fvm/state/accounts_test.go +++ b/fvm/state/accounts_test.go @@ -9,14 +9,41 @@ import ( "github.com/onflow/flow-go/model/flow" ) -func TestAccounts_GetWithNoKeys(t *testing.T) { - chain := flow.Mainnet.Chain() +func TestAccounts_Create(t *testing.T) { + t.Run("Sets registers", func(t *testing.T) { + ledger := state.NewMapLedger() + + accounts := state.NewAccounts(ledger) + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + require.Equal(t, len(ledger.RegisterTouches), 3) // exists + code + key count + }) + + t.Run("Fails if account exists", func(t *testing.T) { + ledger := state.NewMapLedger() + + accounts := state.NewAccounts(ledger) + address := flow.HexToAddress("01") + err := accounts.Create(nil, address) + require.NoError(t, err) + + err = accounts.Create(nil, address) + + require.Error(t, err) + }) +} + +func TestAccounts_GetWithNoKeys(t *testing.T) { ledger := state.NewMapLedger() - accounts := state.NewAccounts(ledger, chain) + accounts := state.NewAccounts(ledger) + address := flow.HexToAddress("01") - address, err := accounts.Create(nil) + err := accounts.Create(nil, address) require.NoError(t, err) require.NotPanics(t, func() { @@ -27,13 +54,12 @@ func TestAccounts_GetWithNoKeys(t *testing.T) { // Some old account could be created without key count register // we recreate it in a test func TestAccounts_GetWithNoKeysCounter(t *testing.T) { - chain := flow.Mainnet.Chain() - ledger := state.NewMapLedger() - accounts := state.NewAccounts(ledger, chain) + accounts := state.NewAccounts(ledger) + address := flow.HexToAddress("01") - address, err := accounts.Create(nil) + err := accounts.Create(nil, address) require.NoError(t, err) ledger.Delete( diff --git a/fvm/state/addresses.go b/fvm/state/addresses.go deleted file mode 100644 index bae374f4a1e..00000000000 --- a/fvm/state/addresses.go +++ /dev/null @@ -1,37 +0,0 @@ -package state - -import ( - "github.com/onflow/flow-go/model/flow" -) - -const keyAddressState = "account_address_state" - -type addresses struct { - ledger Ledger - chain flow.Chain -} - -func newAddresses(ledger Ledger, chain flow.Chain) *addresses { - return &addresses{ - ledger: ledger, - chain: chain, - } -} - -func (a *addresses) InitAddressGeneratorState() { - a.SetAddressGeneratorState(a.chain.NewAddressGenerator()) -} - -func (a *addresses) GetAddressGeneratorState() (flow.AddressGenerator, error) { - stateBytes, err := a.ledger.Get("", "", keyAddressState) - if err != nil { - return nil, err - } - - return a.chain.BytesToAddressGenerator(stateBytes), nil -} - -func (a *addresses) SetAddressGeneratorState(state flow.AddressGenerator) { - stateBytes := state.Bytes() - a.ledger.Set("", "", keyAddressState, stateBytes) -} diff --git a/fvm/state/ledger_address_generator.go b/fvm/state/ledger_address_generator.go new file mode 100644 index 00000000000..82b55617d3f --- /dev/null +++ b/fvm/state/ledger_address_generator.go @@ -0,0 +1,49 @@ +package state + +import ( + "github.com/onflow/flow-go/model/flow" +) + +const keyAddressState = "account_address_state" + +// LedgerBoundAddressGenerator is a decorator for an address generator. +// It uses the underlying generator it gets from the chain. +// The only change is that when next address is called the state is updated as well. +type LedgerBoundAddressGenerator struct { + generator flow.AddressGenerator + ledger Ledger +} + +func NewLedgerBoundAddressGenerator(ledger Ledger, chain flow.Chain) (*LedgerBoundAddressGenerator, error) { + stateBytes, err := ledger.Get("", "", keyAddressState) + if err != nil { + return nil, err + } + + addressGenerator := chain.BytesToAddressGenerator(stateBytes) + return &LedgerBoundAddressGenerator{ + ledger: ledger, + generator: addressGenerator, + }, nil +} + +func (g *LedgerBoundAddressGenerator) NextAddress() (flow.Address, error) { + address, err := g.generator.NextAddress() + if err != nil { + return address, err + } + + // update the ledger state + stateBytes := g.generator.Bytes() + g.ledger.Set("", "", keyAddressState, stateBytes) + + return address, nil +} + +func (g *LedgerBoundAddressGenerator) CurrentAddress() flow.Address { + return g.generator.CurrentAddress() +} + +func (g *LedgerBoundAddressGenerator) Bytes() []byte { + return g.generator.Bytes() +} diff --git a/fvm/state/ledger_address_generator_test.go b/fvm/state/ledger_address_generator_test.go new file mode 100644 index 00000000000..6fc0f9e7520 --- /dev/null +++ b/fvm/state/ledger_address_generator_test.go @@ -0,0 +1,48 @@ +package state_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/state" + "github.com/onflow/flow-go/model/flow" +) + +func Test_NewLedgerBoundAddressGenerator_NoError(t *testing.T) { + ledger := state.NewMapLedger() + chain := flow.MonotonicEmulator.Chain() + _, err := state.NewLedgerBoundAddressGenerator(ledger, chain) + require.NoError(t, err) +} + +func Test_NewLedgerBoundAddressGenerator_GeneratingUpdatesState(t *testing.T) { + ledger := state.NewMapLedger() + chain := flow.MonotonicEmulator.Chain() + generator, err := state.NewLedgerBoundAddressGenerator(ledger, chain) + require.NoError(t, err) + + _, err = generator.NextAddress() + require.NoError(t, err) + + stateBytes, err := ledger.Get("", "", "account_address_state") + require.NoError(t, err) + + require.Equal(t, flow.BytesToAddress(stateBytes), flow.HexToAddress("01")) +} + +func Test_NewLedgerBoundAddressGenerator_UsesLedgerState(t *testing.T) { + ledger := state.NewMapLedger() + ledger.Set("", "", "account_address_state", flow.HexToAddress("01").Bytes()) + chain := flow.MonotonicEmulator.Chain() + generator, err := state.NewLedgerBoundAddressGenerator(ledger, chain) + require.NoError(t, err) + + _, err = generator.NextAddress() + require.NoError(t, err) + + stateBytes, err := ledger.Get("", "", "account_address_state") + require.NoError(t, err) + + require.Equal(t, flow.BytesToAddress(stateBytes), flow.HexToAddress("02")) +} diff --git a/fvm/transaction.go b/fvm/transaction.go index 98c2c8c1a24..51a11d0f45a 100644 --- a/fvm/transaction.go +++ b/fvm/transaction.go @@ -82,12 +82,15 @@ func (i *TransactionInvocator) Process( proc *TransactionProcedure, ledger state.Ledger, ) error { - env := newEnvironment(ctx, ledger) + env, err := newEnvironment(ctx, ledger) + if err != nil { + return err + } env.setTransaction(vm, proc.Transaction) location := runtime.TransactionLocation(proc.ID[:]) - err := vm.Runtime.ExecuteTransaction(proc.Transaction.Script, proc.Transaction.Arguments, env, location) + err = vm.Runtime.ExecuteTransaction(proc.Transaction.Script, proc.Transaction.Arguments, env, location) if err != nil { return err } diff --git a/fvm/transactionSequenceNum.go b/fvm/transactionSequenceNum.go index bbc3df80d57..ad7d67b1874 100644 --- a/fvm/transactionSequenceNum.go +++ b/fvm/transactionSequenceNum.go @@ -19,7 +19,7 @@ func (c *TransactionSequenceNumberChecker) Process( proc *TransactionProcedure, ledger state.Ledger, ) error { - accounts := state.NewAccounts(ledger, ctx.Chain) + accounts := state.NewAccounts(ledger) return c.checkAndIncrementSequenceNumber(proc.Transaction, accounts) } diff --git a/fvm/transactionVerifier.go b/fvm/transactionVerifier.go index b7f25876714..5ffbafea306 100644 --- a/fvm/transactionVerifier.go +++ b/fvm/transactionVerifier.go @@ -25,7 +25,7 @@ func (v *TransactionSignatureVerifier) Process( proc *TransactionProcedure, ledger state.Ledger, ) error { - accounts := state.NewAccounts(ledger, ctx.Chain) + accounts := state.NewAccounts(ledger) return v.verifyTransactionSignatures(proc.Transaction, accounts) } From 1001b58e6cdec04bf630e683c62e9150f0fcce34 Mon Sep 17 00:00:00 2001 From: Vishal <1117327+vishalchangrani@users.noreply.github.com> Date: Wed, 4 Nov 2020 13:32:51 -0800 Subject: [PATCH 105/105] Vishal/transit script version (#110) adding a version option to the transit script --- Makefile | 3 ++- cmd/Dockerfile | 12 ++++++--- cmd/bootstrap/build/version.go | 43 ++++++++++++++++++++++++++++++ cmd/bootstrap/transit/main.go | 27 ++++++++++++++++++- cmd/bootstrap/transit/main_test.go | 29 ++++++++++++++++++++ 5 files changed, 108 insertions(+), 6 deletions(-) create mode 100644 cmd/bootstrap/build/version.go create mode 100644 cmd/bootstrap/transit/main_test.go diff --git a/Makefile b/Makefile index 44b267ac23c..16a8200d3cc 100644 --- a/Makefile +++ b/Makefile @@ -284,7 +284,8 @@ docker-build-bootstrap: .PHONY: docker-build-bootstrap-transit docker-build-bootstrap-transit: - docker build -f cmd/Dockerfile --ssh default --build-arg TARGET=bootstrap/transit --target production-nocgo \ + docker build -f cmd/Dockerfile --ssh default --build-arg TARGET=bootstrap/transit --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(VERSION) --no-cache \ + --target production-transit-nocgo \ -t "$(CONTAINER_REGISTRY)/bootstrap-transit:latest" -t "$(CONTAINER_REGISTRY)/bootstrap-transit:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/bootstrap-transit:$(IMAGE_TAG)" . .PHONY: docker-build-loader diff --git a/cmd/Dockerfile b/cmd/Dockerfile index 739cec170d4..397899958fe 100644 --- a/cmd/Dockerfile +++ b/cmd/Dockerfile @@ -32,6 +32,8 @@ RUN mkdir /app WORKDIR /app ARG TARGET +ARG COMMIT +ARG VERSION COPY . . @@ -76,19 +78,21 @@ COPY --from=build-debug /app/app /bin/app ENTRYPOINT ["dlv", "--listen=:2345", "--headless=true", "--api-version=2", "--accept-multiclient", "exec", "/bin/app"] -FROM build-env as build-production-nocgo +FROM build-env as build-transit-production-nocgo WORKDIR /app RUN --mount=type=cache,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ --mount=type=ssh \ - GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "-extldflags -static" -o ./app ./cmd/${TARGET} + GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "-extldflags -static \ + -X 'github.com/onflow/flow-go/cmd/bootstrap/build.commit=${COMMIT}' -X 'github.com/onflow/flow-go/cmd/bootstrap/build.semver=${VERSION}'" \ + -o ./app ./cmd/${TARGET} RUN chmod a+x /app/app ## (3) Add the statically linked binary to a distroless image -FROM gcr.io/distroless/base as production-nocgo +FROM gcr.io/distroless/base as production-transit-nocgo -COPY --from=build-production-nocgo /app/app /bin/app +COPY --from=build-transit-production-nocgo /app/app /bin/app ENTRYPOINT ["/bin/app"] diff --git a/cmd/bootstrap/build/version.go b/cmd/bootstrap/build/version.go new file mode 100644 index 00000000000..bb4d16c95f2 --- /dev/null +++ b/cmd/bootstrap/build/version.go @@ -0,0 +1,43 @@ +// Package build contains information about the build that injected at build-time. +// +// To use this package, simply import it in your program, then add build +// arguments like the following: +// +// go build -ldflags "-X github.com/dapperlabs/flow-go/version.semver=v1.0.0" +package build + +// Default value for build-time-injected version strings. +const undefined = "undefined" + +// The following variables are injected at build-time using ldflags. +var ( + semver string + commit string +) + +// Semver returns the semantic version of this build. +func Semver() string { + return semver +} + +// Commit returns the commit at which this build was created. +func Commit() string { + return commit +} + +// IsDefined determines whether a version string is defined. Inputs should +// have been produced from this package. +func IsDefined(v string) bool { + return v != undefined +} + +// If any of the build-time-injected variables are empty at initialization, +// mark them as undefined. +func init() { + if len(semver) == 0 { + semver = undefined + } + if len(commit) == 0 { + commit = undefined + } +} diff --git a/cmd/bootstrap/transit/main.go b/cmd/bootstrap/transit/main.go index d6cff5fe9f8..571f02b0158 100644 --- a/cmd/bootstrap/transit/main.go +++ b/cmd/bootstrap/transit/main.go @@ -19,6 +19,7 @@ import ( "google.golang.org/api/iterator" "google.golang.org/api/option" + "github.com/onflow/flow-go/cmd/bootstrap/build" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" utilsio "github.com/onflow/flow-go/utils/io" @@ -29,6 +30,8 @@ var ( FilenameTransitKeyPriv = "transit-key.priv.%v" FilenameRandomBeaconCipher = bootstrap.FilenameRandomBeaconPriv + ".%v.enc" flowBucket string + commit = build.Commit() + semver = build.Semver() ) const fileMode = os.FileMode(0644) @@ -53,8 +56,9 @@ var ( func main() { var bootDir, keyDir, wrapID, role string - var pull, push, prepare bool + var version, pull, push, prepare bool + flag.BoolVar(&version, "v", false, "View version and commit information") flag.StringVar(&bootDir, "d", "~/bootstrap", "The bootstrap directory containing your node-info files") flag.StringVar(&keyDir, "t", "", "Token provided by the Flow team to access the transit server") flag.BoolVar(&pull, "pull", false, "Fetch keys and metadata from the transit server") @@ -65,6 +69,12 @@ func main() { flag.StringVar(&flowBucket, "flow-bucket", "flow-genesis-bootstrap", "Storage for the transit server") flag.Parse() + // always print version information + printVersion() + if version { + return + } + if role == "" { flag.Usage() log.Fatal("Node role must be specified") @@ -132,6 +142,21 @@ func fetchNodeID(bootDir string) (string, error) { return strings.TrimSpace(string(data)), nil } +// Print the version and commit id +func printVersion() { + // Print version/commit strings if they are known + if build.IsDefined(semver) { + fmt.Printf("Transit script Version: %s\n", semver) + } + if build.IsDefined(commit) { + fmt.Printf("Transit script Commit: %s\n", commit) + } + // If no version info is known print a message to indicate this. + if !build.IsDefined(semver) && !build.IsDefined(commit) { + fmt.Printf("Transit script version information unknown\n") + } +} + // Run the push process // - create transit keypair (if the role type is Consensus) // - upload files to GCS bucket diff --git a/cmd/bootstrap/transit/main_test.go b/cmd/bootstrap/transit/main_test.go new file mode 100644 index 00000000000..3b41ed3c3da --- /dev/null +++ b/cmd/bootstrap/transit/main_test.go @@ -0,0 +1,29 @@ +package main + +func Example() { + + undefined := "undefined" + validVersion := "v0.0.1" + validCommit := "fa3f1af8f007940717758e63709104101380218f" + + var tests = []struct { + semver, commit string + }{ + {undefined, undefined}, + {undefined, validCommit}, + {validVersion, undefined}, + {validVersion, validCommit}, + } + for _, t := range tests { + semver = t.semver + commit = t.commit + printVersion() + } + + // Output: + // Transit script version information unknown + // Transit script Commit: fa3f1af8f007940717758e63709104101380218f + // Transit script Version: v0.0.1 + // Transit script Version: v0.0.1 + // Transit script Commit: fa3f1af8f007940717758e63709104101380218f +}