-
Notifications
You must be signed in to change notification settings - Fork 21.9k
eth/downloader: Updated downloader tests to improve perf and reliability #15337
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -704,6 +704,7 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } | |
| func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } | ||
|
|
||
| func testThrottling(t *testing.T, protocol int, mode SyncMode) { | ||
| t.Parallel() | ||
| tester := newTester() | ||
| defer tester.terminate() | ||
|
|
||
|
|
@@ -1166,6 +1167,8 @@ func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 6 | |
| func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } | ||
|
|
||
| func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { | ||
| t.Parallel() | ||
|
|
||
| tester := newTester() | ||
| defer tester.terminate() | ||
|
|
||
|
|
@@ -1198,6 +1201,8 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback( | |
| func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } | ||
|
|
||
| func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { | ||
| t.Parallel() | ||
|
|
||
| tester := newTester() | ||
| defer tester.terminate() | ||
|
|
||
|
|
@@ -1310,6 +1315,8 @@ func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDr | |
| func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } | ||
|
|
||
| func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { | ||
| t.Parallel() | ||
|
|
||
| // Define the disconnection requirement for individual hash fetch errors | ||
| tests := []struct { | ||
| result error | ||
|
|
@@ -1665,12 +1672,26 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { | |
|
|
||
| // This test reproduces an issue where unexpected deliveries would | ||
| // block indefinitely if they arrived at the right time. | ||
| func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) } | ||
| func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) } | ||
| func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) } | ||
| func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) } | ||
| func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) } | ||
| func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) } | ||
| // We use data driven subtests to manage this so that it will be parallel on its own | ||
| // and not with the other tests, avoiding intermittent failures. | ||
| func TestDeliverHeadersHang(t *testing.T) { | ||
| testCases := []struct { | ||
| protocol int | ||
| syncMode SyncMode | ||
| }{ | ||
| {62, FullSync}, | ||
| {63, FullSync}, | ||
| {63, FastSync}, | ||
| {64, FullSync}, | ||
| {64, FastSync}, | ||
| {64, LightSync}, | ||
| } | ||
| for _, tc := range testCases { | ||
| t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { | ||
| testDeliverHeadersHang(t, tc.protocol, tc.syncMode) | ||
| }) | ||
| } | ||
| } | ||
|
|
||
| type floodingTestPeer struct { | ||
| peer Peer | ||
|
|
@@ -1703,7 +1724,7 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int | |
| // Deliver the actual requested headers. | ||
| go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) | ||
| // None of the extra deliveries should block. | ||
| timeout := time.After(15 * time.Second) | ||
| timeout := time.After(60 * time.Second) | ||
| for i := 0; i < cap(deliveriesDone); i++ { | ||
| select { | ||
| case <-deliveriesDone: | ||
|
|
@@ -1732,7 +1753,6 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { | |
| tester.downloader.peers.peers["peer"].peer, | ||
| tester, | ||
| } | ||
|
|
||
| if err := tester.sync("peer", nil, mode); err != nil { | ||
| t.Errorf("sync failed: %v", err) | ||
| } | ||
|
|
@@ -1742,12 +1762,28 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { | |
|
|
||
| // Tests that if fast sync aborts in the critical section, it can restart a few | ||
| // times before giving up. | ||
| func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) } | ||
| func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) } | ||
| func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) } | ||
| func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) } | ||
| // We use data driven subtests to manage this so that it will be parallel on its own | ||
| // and not with the other tests, avoiding intermittent failures. | ||
| func TestFastCriticalRestarts(t *testing.T) { | ||
| testCases := []struct { | ||
| protocol int | ||
| progress bool | ||
| }{ | ||
| {63, false}, | ||
| {64, false}, | ||
| {63, true}, | ||
| {64, true}, | ||
| } | ||
| for _, tc := range testCases { | ||
| t.Run(fmt.Sprintf("protocol %d progress %v", tc.protocol, tc.progress), func(t *testing.T) { | ||
| testFastCriticalRestarts(t, tc.protocol, tc.progress) | ||
| }) | ||
| } | ||
| } | ||
|
|
||
| func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) { | ||
| t.Parallel() | ||
|
|
||
| tester := newTester() | ||
| defer tester.terminate() | ||
|
|
||
|
|
@@ -1776,6 +1812,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) { | |
|
|
||
| // If it's the first failure, pivot should be locked => reenable all others to detect pivot changes | ||
| if i == 0 { | ||
| time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why is this needed?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If you look at the lines of code just below, there is a sleep directly before failing. I found that moving the sleep to just before the explicit fail was a better place to have it, and made the test more reliable.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @fjl Hi, just checking in to see if you saw my reply to your comment? |
||
| if tester.downloader.fsPivotLock == nil { | ||
| time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too | ||
| t.Fatalf("pivot block not locked in after critical section failure") | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why does running these tests synchronously cause intermittent failures?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't have a deep understanding, but suspect that a timeout in a different test was interacting badly with these. There is still parallel tests being run, with all the header tests running in parallel, but not in parallel with the other tests.