-
Notifications
You must be signed in to change notification settings - Fork 650
/
Copy pathnode.cpp
5558 lines (5000 loc) · 277 KB
/
node.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
*
* The MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <sstream>
#include <iomanip>
#include <deque>
#include <unordered_set>
#include <list>
#include <forward_list>
#include <iostream>
#include <algorithm>
#include <tuple>
#include <boost/tuple/tuple.hpp>
#include <boost/circular_buffer.hpp>
#include <boost/multi_index_container.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/mem_fun.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/random_access_index.hpp>
#include <boost/multi_index/tag.hpp>
#include <boost/multi_index/sequenced_index.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/logic/tribool.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
#include <boost/range/algorithm/find.hpp>
#include <boost/range/numeric.hpp>
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics/stats.hpp>
#include <boost/accumulators/statistics/rolling_mean.hpp>
#include <boost/accumulators/statistics/min.hpp>
#include <boost/accumulators/statistics/max.hpp>
#include <boost/accumulators/statistics/sum.hpp>
#include <boost/accumulators/statistics/count.hpp>
#include <boost/preprocessor/seq/for_each.hpp>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/stringize.hpp>
#include <fc/thread/thread.hpp>
#include <fc/thread/future.hpp>
#include <fc/thread/non_preemptable_scope_check.hpp>
#include <fc/thread/mutex.hpp>
#include <fc/thread/scoped_lock.hpp>
#include <fc/log/logger.hpp>
#include <fc/io/json.hpp>
#include <fc/io/enum_type.hpp>
#include <fc/crypto/rand.hpp>
#include <fc/network/rate_limiting.hpp>
#include <fc/network/ip.hpp>
#include <fc/smart_ref_impl.hpp>
#include <graphene/net/node.hpp>
#include <graphene/net/peer_database.hpp>
#include <graphene/net/peer_connection.hpp>
#include <graphene/net/stcp_socket.hpp>
#include <graphene/net/config.hpp>
#include <graphene/net/exceptions.hpp>
#include <graphene/chain/config.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/git_revision.hpp>
//#define ENABLE_DEBUG_ULOGS
#ifdef DEFAULT_LOGGER
# undef DEFAULT_LOGGER
#endif
#define DEFAULT_LOGGER "p2p"
#define P2P_IN_DEDICATED_THREAD 1
#define INVOCATION_COUNTER(name) \
static unsigned total_ ## name ## _counter = 0; \
static unsigned active_ ## name ## _counter = 0; \
struct name ## _invocation_logger { \
unsigned *total; \
unsigned *active; \
name ## _invocation_logger(unsigned *total, unsigned *active) : \
total(total), active(active) \
{ \
++*total; \
++*active; \
dlog("NEWDEBUG: Entering " #name ", now ${total} total calls, ${active} active calls", ("total", *total)("active", *active)); \
} \
~name ## _invocation_logger() \
{ \
--*active; \
dlog("NEWDEBUG: Leaving " #name ", now ${total} total calls, ${active} active calls", ("total", *total)("active", *active)); \
} \
} invocation_logger(&total_ ## name ## _counter, &active_ ## name ## _counter)
//log these messages even at warn level when operating on the test network
#ifdef GRAPHENE_TEST_NETWORK
#define testnetlog wlog
#else
#define testnetlog(...) do {} while (0)
#endif
namespace graphene { namespace net {
namespace detail
{
namespace bmi = boost::multi_index;
class blockchain_tied_message_cache
{
private:
static const uint32_t cache_duration_in_blocks = GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS;
struct message_hash_index{};
struct message_contents_hash_index{};
struct block_clock_index{};
struct message_info
{
message_hash_type message_hash;
message message_body;
uint32_t block_clock_when_received;
// for network performance stats
message_propagation_data propagation_data;
fc::uint160_t message_contents_hash; // hash of whatever the message contains (if it's a transaction, this is the transaction id, if it's a block, it's the block_id)
message_info( const message_hash_type& message_hash,
const message& message_body,
uint32_t block_clock_when_received,
const message_propagation_data& propagation_data,
fc::uint160_t message_contents_hash ) :
message_hash( message_hash ),
message_body( message_body ),
block_clock_when_received( block_clock_when_received ),
propagation_data( propagation_data ),
message_contents_hash( message_contents_hash )
{}
};
typedef boost::multi_index_container
< message_info,
bmi::indexed_by< bmi::ordered_unique< bmi::tag<message_hash_index>,
bmi::member<message_info, message_hash_type, &message_info::message_hash> >,
bmi::ordered_non_unique< bmi::tag<message_contents_hash_index>,
bmi::member<message_info, fc::uint160_t, &message_info::message_contents_hash> >,
bmi::ordered_non_unique< bmi::tag<block_clock_index>,
bmi::member<message_info, uint32_t, &message_info::block_clock_when_received> > >
> message_cache_container;
message_cache_container _message_cache;
uint32_t block_clock;
public:
blockchain_tied_message_cache() :
block_clock( 0 )
{}
void block_accepted();
void cache_message( const message& message_to_cache, const message_hash_type& hash_of_message_to_cache,
const message_propagation_data& propagation_data, const fc::uint160_t& message_content_hash );
message get_message( const message_hash_type& hash_of_message_to_lookup );
message_propagation_data get_message_propagation_data( const fc::uint160_t& hash_of_message_contents_to_lookup ) const;
size_t size() const { return _message_cache.size(); }
};
void blockchain_tied_message_cache::block_accepted()
{
++block_clock;
if( block_clock > cache_duration_in_blocks )
_message_cache.get<block_clock_index>().erase(_message_cache.get<block_clock_index>().begin(),
_message_cache.get<block_clock_index>().lower_bound(block_clock - cache_duration_in_blocks ) );
}
void blockchain_tied_message_cache::cache_message( const message& message_to_cache,
const message_hash_type& hash_of_message_to_cache,
const message_propagation_data& propagation_data,
const fc::uint160_t& message_content_hash )
{
_message_cache.insert( message_info(hash_of_message_to_cache,
message_to_cache,
block_clock,
propagation_data,
message_content_hash ) );
}
message blockchain_tied_message_cache::get_message( const message_hash_type& hash_of_message_to_lookup )
{
message_cache_container::index<message_hash_index>::type::const_iterator iter =
_message_cache.get<message_hash_index>().find(hash_of_message_to_lookup );
if( iter != _message_cache.get<message_hash_index>().end() )
return iter->message_body;
FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" );
}
message_propagation_data blockchain_tied_message_cache::get_message_propagation_data( const fc::uint160_t& hash_of_message_contents_to_lookup ) const
{
if( hash_of_message_contents_to_lookup != fc::uint160_t() )
{
message_cache_container::index<message_contents_hash_index>::type::const_iterator iter =
_message_cache.get<message_contents_hash_index>().find(hash_of_message_contents_to_lookup );
if( iter != _message_cache.get<message_contents_hash_index>().end() )
return iter->propagation_data;
}
FC_THROW_EXCEPTION( fc::key_not_found_exception, "Requested message not in cache" );
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// This specifies configuration info for the local node. It's stored as JSON
// in the configuration directory (application data directory)
struct node_configuration
{
node_configuration() : accept_incoming_connections(true), wait_if_endpoint_is_busy(true) {}
fc::ip::endpoint listen_endpoint;
bool accept_incoming_connections;
bool wait_if_endpoint_is_busy;
/**
* Originally, our p2p code just had a 'node-id' that was a random number identifying this node
* on the network. This is now a private key/public key pair, where the public key is used
* in place of the old random node-id. The private part is unused, but might be used in
* the future to support some notion of trusted peers.
*/
fc::ecc::private_key private_key;
};
} } } // end namespace graphene::net::detail
FC_REFLECT(graphene::net::detail::node_configuration, (listen_endpoint)
(accept_incoming_connections)
(wait_if_endpoint_is_busy)
(private_key));
namespace graphene { namespace net { namespace detail {
// when requesting items from peers, we want to prioritize any blocks before
// transactions, but otherwise request items in the order we heard about them
struct prioritized_item_id
{
item_id item;
unsigned sequence_number;
fc::time_point timestamp; // the time we last heard about this item in an inventory message
prioritized_item_id(const item_id& item, unsigned sequence_number) :
item(item),
sequence_number(sequence_number),
timestamp(fc::time_point::now())
{}
bool operator<(const prioritized_item_id& rhs) const
{
static_assert(graphene::net::block_message_type > graphene::net::trx_message_type,
"block_message_type must be greater than trx_message_type for prioritized_item_ids to sort correctly");
if (item.item_type != rhs.item.item_type)
return item.item_type > rhs.item.item_type;
return (signed)(rhs.sequence_number - sequence_number) > 0;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////
class statistics_gathering_node_delegate_wrapper : public node_delegate
{
private:
node_delegate *_node_delegate;
fc::thread *_thread;
typedef boost::accumulators::accumulator_set<int64_t, boost::accumulators::stats<boost::accumulators::tag::min,
boost::accumulators::tag::rolling_mean,
boost::accumulators::tag::max,
boost::accumulators::tag::sum,
boost::accumulators::tag::count> > call_stats_accumulator;
#define NODE_DELEGATE_METHOD_NAMES (has_item) \
(handle_message) \
(handle_block) \
(handle_transaction) \
(get_block_ids) \
(get_item) \
(get_chain_id) \
(get_blockchain_synopsis) \
(sync_status) \
(connection_count_changed) \
(get_block_number) \
(get_block_time) \
(get_head_block_id) \
(estimate_last_known_fork_from_git_revision_timestamp) \
(error_encountered) \
(get_current_block_interval_in_seconds)
#define DECLARE_ACCUMULATOR(r, data, method_name) \
mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator)); \
mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_before_accumulator)); \
mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator));
BOOST_PP_SEQ_FOR_EACH(DECLARE_ACCUMULATOR, unused, NODE_DELEGATE_METHOD_NAMES)
#undef DECLARE_ACCUMULATOR
class call_statistics_collector
{
private:
fc::time_point _call_requested_time;
fc::time_point _begin_execution_time;
fc::time_point _execution_completed_time;
const char* _method_name;
call_stats_accumulator* _execution_accumulator;
call_stats_accumulator* _delay_before_accumulator;
call_stats_accumulator* _delay_after_accumulator;
public:
class actual_execution_measurement_helper
{
call_statistics_collector &_collector;
public:
actual_execution_measurement_helper(call_statistics_collector& collector) :
_collector(collector)
{
_collector.starting_execution();
}
~actual_execution_measurement_helper()
{
_collector.execution_completed();
}
};
call_statistics_collector(const char* method_name,
call_stats_accumulator* execution_accumulator,
call_stats_accumulator* delay_before_accumulator,
call_stats_accumulator* delay_after_accumulator) :
_call_requested_time(fc::time_point::now()),
_method_name(method_name),
_execution_accumulator(execution_accumulator),
_delay_before_accumulator(delay_before_accumulator),
_delay_after_accumulator(delay_after_accumulator)
{}
~call_statistics_collector()
{
fc::time_point end_time(fc::time_point::now());
fc::microseconds actual_execution_time(_execution_completed_time - _begin_execution_time);
fc::microseconds delay_before(_begin_execution_time - _call_requested_time);
fc::microseconds delay_after(end_time - _execution_completed_time);
fc::microseconds total_duration(actual_execution_time + delay_before + delay_after);
(*_execution_accumulator)(actual_execution_time.count());
(*_delay_before_accumulator)(delay_before.count());
(*_delay_after_accumulator)(delay_after.count());
if (total_duration > fc::milliseconds(500))
{
ilog("Call to method node_delegate::${method} took ${total_duration}us, longer than our target maximum of 500ms",
("method", _method_name)
("total_duration", total_duration.count()));
ilog("Actual execution took ${execution_duration}us, with a ${delegate_delay}us delay before the delegate thread started "
"executing the method, and a ${p2p_delay}us delay after it finished before the p2p thread started processing the response",
("execution_duration", actual_execution_time)
("delegate_delay", delay_before)
("p2p_delay", delay_after));
}
}
void starting_execution()
{
_begin_execution_time = fc::time_point::now();
}
void execution_completed()
{
_execution_completed_time = fc::time_point::now();
}
};
public:
statistics_gathering_node_delegate_wrapper(node_delegate* delegate, fc::thread* thread_for_delegate_calls);
fc::variant_object get_call_statistics();
bool has_item( const net::item_id& id ) override;
void handle_message( const message& ) override;
bool handle_block( const graphene::net::block_message& block_message, bool sync_mode, std::vector<fc::uint160_t>& contained_transaction_message_ids ) override;
void handle_transaction( const graphene::net::trx_message& transaction_message ) override;
std::vector<item_hash_t> get_block_ids(const std::vector<item_hash_t>& blockchain_synopsis,
uint32_t& remaining_item_count,
uint32_t limit = 2000) override;
message get_item( const item_id& id ) override;
chain_id_type get_chain_id() const override;
std::vector<item_hash_t> get_blockchain_synopsis(const item_hash_t& reference_point,
uint32_t number_of_blocks_after_reference_point) override;
void sync_status( uint32_t item_type, uint32_t item_count ) override;
void connection_count_changed( uint32_t c ) override;
uint32_t get_block_number(const item_hash_t& block_id) override;
fc::time_point_sec get_block_time(const item_hash_t& block_id) override;
item_hash_t get_head_block_id() const override;
uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override;
void error_encountered(const std::string& message, const fc::oexception& error) override;
uint8_t get_current_block_interval_in_seconds() const override;
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////
class node_impl : public peer_connection_delegate
{
public:
#ifdef P2P_IN_DEDICATED_THREAD
std::shared_ptr<fc::thread> _thread;
#endif // P2P_IN_DEDICATED_THREAD
std::unique_ptr<statistics_gathering_node_delegate_wrapper> _delegate;
fc::sha256 _chain_id;
#define NODE_CONFIGURATION_FILENAME "node_config.json"
#define POTENTIAL_PEER_DATABASE_FILENAME "peers.json"
fc::path _node_configuration_directory;
node_configuration _node_configuration;
/// stores the endpoint we're listening on. This will be the same as
// _node_configuration.listen_endpoint, unless that endpoint was already
// in use.
fc::ip::endpoint _actual_listening_endpoint;
/// we determine whether we're firewalled by asking other nodes. Store the result here:
firewalled_state _is_firewalled;
/// if we're behind NAT, our listening endpoint address will appear different to the rest of the world. store it here.
fc::optional<fc::ip::endpoint> _publicly_visible_listening_endpoint;
fc::time_point _last_firewall_check_message_sent;
/// used by the task that manages connecting to peers
// @{
std::list<potential_peer_record> _add_once_node_list; /// list of peers we want to connect to as soon as possible
peer_database _potential_peer_db;
fc::promise<void>::ptr _retrigger_connect_loop_promise;
bool _potential_peer_database_updated;
fc::future<void> _p2p_network_connect_loop_done;
// @}
/// used by the task that fetches sync items during synchronization
// @{
fc::promise<void>::ptr _retrigger_fetch_sync_items_loop_promise;
bool _sync_items_to_fetch_updated;
fc::future<void> _fetch_sync_items_loop_done;
typedef std::unordered_map<graphene::net::block_id_type, fc::time_point> active_sync_requests_map;
active_sync_requests_map _active_sync_requests; /// list of sync blocks we've asked for from peers but have not yet received
std::list<graphene::net::block_message> _new_received_sync_items; /// list of sync blocks we've just received but haven't yet tried to process
std::list<graphene::net::block_message> _received_sync_items; /// list of sync blocks we've received, but can't yet process because we are still missing blocks that come earlier in the chain
// @}
fc::future<void> _process_backlog_of_sync_blocks_done;
bool _suspend_fetching_sync_blocks;
/// used by the task that fetches items during normal operation
// @{
fc::promise<void>::ptr _retrigger_fetch_item_loop_promise;
bool _items_to_fetch_updated;
fc::future<void> _fetch_item_loop_done;
struct item_id_index{};
typedef boost::multi_index_container<prioritized_item_id,
boost::multi_index::indexed_by<boost::multi_index::ordered_unique<boost::multi_index::identity<prioritized_item_id> >,
boost::multi_index::hashed_unique<boost::multi_index::tag<item_id_index>,
boost::multi_index::member<prioritized_item_id, item_id, &prioritized_item_id::item>,
std::hash<item_id> > >
> items_to_fetch_set_type;
unsigned _items_to_fetch_sequence_counter;
items_to_fetch_set_type _items_to_fetch; /// list of items we know another peer has and we want
peer_connection::timestamped_items_set_type _recently_failed_items; /// list of transactions we've recently pushed and had rejected by the delegate
// @}
/// used by the task that advertises inventory during normal operation
// @{
fc::promise<void>::ptr _retrigger_advertise_inventory_loop_promise;
fc::future<void> _advertise_inventory_loop_done;
std::unordered_set<item_id> _new_inventory; /// list of items we have received but not yet advertised to our peers
// @}
fc::future<void> _terminate_inactive_connections_loop_done;
uint8_t _recent_block_interval_in_seconds; // a cached copy of the block interval, to avoid a thread hop to the blockchain to get the current value
std::string _user_agent_string;
/** _node_public_key is a key automatically generated when the client is first run, stored in
* node_config.json. It doesn't really have much of a purpose yet, there was just some thought
* that we might someday have a use for nodes having a private key (sent in hello messages)
*/
node_id_t _node_public_key;
/**
* _node_id is a random number generated each time the client is launched, used to prevent us
* from connecting to the same client multiple times (sent in hello messages).
* Since this was introduced after the hello_message was finalized, this is sent in the
* user_data field.
* While this shares the same underlying type as a public key, it is really just a random
* number.
*/
node_id_t _node_id;
/** if we have less than `_desired_number_of_connections`, we will try to connect with more nodes */
uint32_t _desired_number_of_connections;
/** if we have _maximum_number_of_connections or more, we will refuse any inbound connections */
uint32_t _maximum_number_of_connections;
/** retry connections to peers that have failed or rejected us this often, in seconds */
uint32_t _peer_connection_retry_timeout;
/** how many seconds of inactivity are permitted before disconnecting a peer */
uint32_t _peer_inactivity_timeout;
fc::tcp_server _tcp_server;
fc::future<void> _accept_loop_complete;
/** Stores all connections which have not yet finished key exchange or are still sending initial handshaking messages
* back and forth (not yet ready to initiate syncing) */
std::unordered_set<peer_connection_ptr> _handshaking_connections;
/** stores fully established connections we're either syncing with or in normal operation with */
std::unordered_set<peer_connection_ptr> _active_connections;
/** stores connections we've closed (sent closing message, not actually closed), but are still waiting for the remote end to close before we delete them */
std::unordered_set<peer_connection_ptr> _closing_connections;
/** stores connections we've closed, but are still waiting for the OS to notify us that the socket is really closed */
std::unordered_set<peer_connection_ptr> _terminating_connections;
boost::circular_buffer<item_hash_t> _most_recent_blocks_accepted; // the /n/ most recent blocks we've accepted (currently tuned to the max number of connections)
uint32_t _sync_item_type;
uint32_t _total_number_of_unfetched_items; /// the number of items we still need to fetch while syncing
std::vector<uint32_t> _hard_fork_block_numbers; /// list of all block numbers where there are hard forks
blockchain_tied_message_cache _message_cache; /// cache message we have received and might be required to provide to other peers via inventory requests
fc::rate_limiting_group _rate_limiter;
uint32_t _last_reported_number_of_connections; // number of connections last reported to the client (to avoid sending duplicate messages)
bool _peer_advertising_disabled;
fc::future<void> _fetch_updated_peer_lists_loop_done;
boost::circular_buffer<uint32_t> _average_network_read_speed_seconds;
boost::circular_buffer<uint32_t> _average_network_write_speed_seconds;
boost::circular_buffer<uint32_t> _average_network_read_speed_minutes;
boost::circular_buffer<uint32_t> _average_network_write_speed_minutes;
boost::circular_buffer<uint32_t> _average_network_read_speed_hours;
boost::circular_buffer<uint32_t> _average_network_write_speed_hours;
unsigned _average_network_usage_second_counter;
unsigned _average_network_usage_minute_counter;
fc::time_point_sec _bandwidth_monitor_last_update_time;
fc::future<void> _bandwidth_monitor_loop_done;
fc::future<void> _dump_node_status_task_done;
/* We have two alternate paths through the schedule_peer_for_deletion code -- one that
* uses a mutex to prevent one fiber from adding items to the queue while another is deleting
* items from it, and one that doesn't. The one that doesn't is simpler and more efficient
* code, but we're keeping around the version that uses the mutex because it crashes, and
* this crash probably indicates a bug in our underlying threading code that needs
* fixing. To produce the bug, define USE_PEERS_TO_DELETE_MUTEX and then connect up
* to the network and set your desired/max connection counts high
*/
//#define USE_PEERS_TO_DELETE_MUTEX 1
#ifdef USE_PEERS_TO_DELETE_MUTEX
fc::mutex _peers_to_delete_mutex;
#endif
std::list<peer_connection_ptr> _peers_to_delete;
fc::future<void> _delayed_peer_deletion_task_done;
#ifdef ENABLE_P2P_DEBUGGING_API
std::set<node_id_t> _allowed_peers;
#endif // ENABLE_P2P_DEBUGGING_API
bool _node_is_shutting_down; // set to true when we begin our destructor, used to prevent us from starting new tasks while we're shutting down
unsigned _maximum_number_of_blocks_to_handle_at_one_time;
unsigned _maximum_number_of_sync_blocks_to_prefetch;
unsigned _maximum_blocks_per_peer_during_syncing;
std::list<fc::future<void> > _handle_message_calls_in_progress;
node_impl(const std::string& user_agent);
virtual ~node_impl();
void save_node_configuration();
void p2p_network_connect_loop();
void trigger_p2p_network_connect_loop();
bool have_already_received_sync_item( const item_hash_t& item_hash );
void request_sync_item_from_peer( const peer_connection_ptr& peer, const item_hash_t& item_to_request );
void request_sync_items_from_peer( const peer_connection_ptr& peer, const std::vector<item_hash_t>& items_to_request );
void fetch_sync_items_loop();
void trigger_fetch_sync_items_loop();
bool is_item_in_any_peers_inventory(const item_id& item) const;
void fetch_items_loop();
void trigger_fetch_items_loop();
void advertise_inventory_loop();
void trigger_advertise_inventory_loop();
void terminate_inactive_connections_loop();
void fetch_updated_peer_lists_loop();
void update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second);
void bandwidth_monitor_loop();
void dump_node_status_task();
bool is_accepting_new_connections();
bool is_wanting_new_connections();
uint32_t get_number_of_connections();
peer_connection_ptr get_peer_by_node_id(const node_id_t& id);
bool is_already_connected_to_id(const node_id_t& node_id);
bool merge_address_info_with_potential_peer_database( const std::vector<address_info> addresses );
void display_current_connections();
uint32_t calculate_unsynced_block_count_from_all_peers();
std::vector<item_hash_t> create_blockchain_synopsis_for_peer( const peer_connection* peer );
void fetch_next_batch_of_item_ids_from_peer( peer_connection* peer, bool reset_fork_tracking_data_for_peer = false );
fc::variant_object generate_hello_user_data();
void parse_hello_user_data_for_peer( peer_connection* originating_peer, const fc::variant_object& user_data );
void on_message( peer_connection* originating_peer,
const message& received_message ) override;
void on_hello_message( peer_connection* originating_peer,
const hello_message& hello_message_received );
void on_connection_accepted_message( peer_connection* originating_peer,
const connection_accepted_message& connection_accepted_message_received );
void on_connection_rejected_message( peer_connection* originating_peer,
const connection_rejected_message& connection_rejected_message_received );
void on_address_request_message( peer_connection* originating_peer,
const address_request_message& address_request_message_received );
void on_address_message( peer_connection* originating_peer,
const address_message& address_message_received );
void on_fetch_blockchain_item_ids_message( peer_connection* originating_peer,
const fetch_blockchain_item_ids_message& fetch_blockchain_item_ids_message_received );
void on_blockchain_item_ids_inventory_message( peer_connection* originating_peer,
const blockchain_item_ids_inventory_message& blockchain_item_ids_inventory_message_received );
void on_fetch_items_message( peer_connection* originating_peer,
const fetch_items_message& fetch_items_message_received );
void on_item_not_available_message( peer_connection* originating_peer,
const item_not_available_message& item_not_available_message_received );
void on_item_ids_inventory_message( peer_connection* originating_peer,
const item_ids_inventory_message& item_ids_inventory_message_received );
void on_closing_connection_message( peer_connection* originating_peer,
const closing_connection_message& closing_connection_message_received );
void on_current_time_request_message( peer_connection* originating_peer,
const current_time_request_message& current_time_request_message_received );
void on_current_time_reply_message( peer_connection* originating_peer,
const current_time_reply_message& current_time_reply_message_received );
void forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state);
void on_check_firewall_message(peer_connection* originating_peer,
const check_firewall_message& check_firewall_message_received);
void on_check_firewall_reply_message(peer_connection* originating_peer,
const check_firewall_reply_message& check_firewall_reply_message_received);
void on_get_current_connections_request_message(peer_connection* originating_peer,
const get_current_connections_request_message& get_current_connections_request_message_received);
void on_get_current_connections_reply_message(peer_connection* originating_peer,
const get_current_connections_reply_message& get_current_connections_reply_message_received);
void on_connection_closed(peer_connection* originating_peer) override;
void send_sync_block_to_node_delegate(const graphene::net::block_message& block_message_to_send);
void process_backlog_of_sync_blocks();
void trigger_process_backlog_of_sync_blocks();
void process_block_during_sync(peer_connection* originating_peer, const graphene::net::block_message& block_message, const message_hash_type& message_hash);
void process_block_during_normal_operation(peer_connection* originating_peer, const graphene::net::block_message& block_message, const message_hash_type& message_hash);
void process_block_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash);
void process_ordinary_message(peer_connection* originating_peer, const message& message_to_process, const message_hash_type& message_hash);
void start_synchronizing();
void start_synchronizing_with_peer(const peer_connection_ptr& peer);
void new_peer_just_added(const peer_connection_ptr& peer); /// called after a peer finishes handshaking, kicks off syncing
void close();
void accept_connection_task(peer_connection_ptr new_peer);
void accept_loop();
void send_hello_message(const peer_connection_ptr& peer);
void connect_to_task(peer_connection_ptr new_peer, const fc::ip::endpoint& remote_endpoint);
bool is_connection_to_endpoint_in_progress(const fc::ip::endpoint& remote_endpoint);
void move_peer_to_active_list(const peer_connection_ptr& peer);
void move_peer_to_closing_list(const peer_connection_ptr& peer);
void move_peer_to_terminating_list(const peer_connection_ptr& peer);
peer_connection_ptr get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint );
void dump_node_status();
void delayed_peer_deletion_task();
void schedule_peer_for_deletion(const peer_connection_ptr& peer_to_delete);
void disconnect_from_peer( peer_connection* originating_peer,
const std::string& reason_for_disconnect,
bool caused_by_error = false,
const fc::oexception& additional_data = fc::oexception() );
// methods implementing node's public interface
void set_node_delegate(node_delegate* del, fc::thread* thread_for_delegate_calls);
void load_configuration( const fc::path& configuration_directory );
void listen_to_p2p_network();
void connect_to_p2p_network();
void add_node( const fc::ip::endpoint& ep );
void initiate_connect_to(const peer_connection_ptr& peer);
void connect_to_endpoint(const fc::ip::endpoint& ep);
void listen_on_endpoint(const fc::ip::endpoint& ep , bool wait_if_not_available);
void accept_incoming_connections(bool accept);
void listen_on_port( uint16_t port, bool wait_if_not_available );
fc::ip::endpoint get_actual_listening_endpoint() const;
std::vector<peer_status> get_connected_peers() const;
uint32_t get_connection_count() const;
void broadcast(const message& item_to_broadcast, const message_propagation_data& propagation_data);
void broadcast(const message& item_to_broadcast);
void sync_from(const item_id& current_head_block, const std::vector<uint32_t>& hard_fork_block_numbers);
bool is_connected() const;
std::vector<potential_peer_record> get_potential_peers() const;
void set_advanced_node_parameters( const fc::variant_object& params );
fc::variant_object get_advanced_node_parameters();
message_propagation_data get_transaction_propagation_data( const graphene::net::transaction_id_type& transaction_id );
message_propagation_data get_block_propagation_data( const graphene::net::block_id_type& block_id );
node_id_t get_node_id() const;
void set_allowed_peers( const std::vector<node_id_t>& allowed_peers );
void clear_peer_database();
void set_total_bandwidth_limit( uint32_t upload_bytes_per_second, uint32_t download_bytes_per_second );
void disable_peer_advertising();
fc::variant_object get_call_statistics() const;
message get_message_for_item(const item_id& item) override;
fc::variant_object network_get_info() const;
fc::variant_object network_get_usage_stats() const;
bool is_hard_fork_block(uint32_t block_number) const;
uint32_t get_next_known_hard_fork_block_number(uint32_t block_number) const;
}; // end class node_impl
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void node_impl_deleter::operator()(node_impl* impl_to_delete)
{
#ifdef P2P_IN_DEDICATED_THREAD
std::weak_ptr<fc::thread> weak_thread;
if (impl_to_delete)
{
std::shared_ptr<fc::thread> impl_thread(impl_to_delete->_thread);
weak_thread = impl_thread;
impl_thread->async([impl_to_delete](){ delete impl_to_delete; }, "delete node_impl").wait();
dlog("deleting the p2p thread");
}
if (weak_thread.expired())
dlog("done deleting the p2p thread");
else
dlog("failed to delete the p2p thread, we must be leaking a smart pointer somewhere");
#else // P2P_IN_DEDICATED_THREAD
delete impl_to_delete;
#endif // P2P_IN_DEDICATED_THREAD
}
#ifdef P2P_IN_DEDICATED_THREAD
# define VERIFY_CORRECT_THREAD() assert(_thread->is_current())
#else
# define VERIFY_CORRECT_THREAD() do {} while (0)
#endif
#define MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME 200
#define MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH (10 * MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME)
node_impl::node_impl(const std::string& user_agent) :
#ifdef P2P_IN_DEDICATED_THREAD
_thread(std::make_shared<fc::thread>("p2p")),
#endif // P2P_IN_DEDICATED_THREAD
_delegate(nullptr),
_is_firewalled(firewalled_state::unknown),
_potential_peer_database_updated(false),
_sync_items_to_fetch_updated(false),
_suspend_fetching_sync_blocks(false),
_items_to_fetch_updated(false),
_items_to_fetch_sequence_counter(0),
_recent_block_interval_in_seconds(GRAPHENE_MAX_BLOCK_INTERVAL),
_user_agent_string(user_agent),
_desired_number_of_connections(GRAPHENE_NET_DEFAULT_DESIRED_CONNECTIONS),
_maximum_number_of_connections(GRAPHENE_NET_DEFAULT_MAX_CONNECTIONS),
_peer_connection_retry_timeout(GRAPHENE_NET_DEFAULT_PEER_CONNECTION_RETRY_TIME),
_peer_inactivity_timeout(GRAPHENE_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT),
_most_recent_blocks_accepted(_maximum_number_of_connections),
_total_number_of_unfetched_items(0),
_rate_limiter(0, 0),
_last_reported_number_of_connections(0),
_peer_advertising_disabled(false),
_average_network_read_speed_seconds(60),
_average_network_write_speed_seconds(60),
_average_network_read_speed_minutes(60),
_average_network_write_speed_minutes(60),
_average_network_read_speed_hours(72),
_average_network_write_speed_hours(72),
_average_network_usage_second_counter(0),
_average_network_usage_minute_counter(0),
_node_is_shutting_down(false),
_maximum_number_of_blocks_to_handle_at_one_time(MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME),
_maximum_number_of_sync_blocks_to_prefetch(MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH),
_maximum_blocks_per_peer_during_syncing(GRAPHENE_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING)
{
_rate_limiter.set_actual_rate_time_constant(fc::seconds(2));
fc::rand_pseudo_bytes(&_node_id.data[0], (int)_node_id.size());
}
node_impl::~node_impl()
{
VERIFY_CORRECT_THREAD();
ilog( "cleaning up node" );
_node_is_shutting_down = true;
for (const peer_connection_ptr& active_peer : _active_connections)
{
fc::optional<fc::ip::endpoint> inbound_endpoint = active_peer->get_endpoint_for_connecting();
if (inbound_endpoint)
{
fc::optional<potential_peer_record> updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint);
if (updated_peer_record)
{
updated_peer_record->last_seen_time = fc::time_point::now();
_potential_peer_db.update_entry(*updated_peer_record);
}
}
}
try
{
ilog( "close" );
close();
}
catch ( const fc::exception& e )
{
wlog( "unexpected exception on close ${e}", ("e", e) );
}
ilog( "done" );
}
void node_impl::save_node_configuration()
{
VERIFY_CORRECT_THREAD();
if( fc::exists(_node_configuration_directory ) )
{
fc::path configuration_file_name( _node_configuration_directory / NODE_CONFIGURATION_FILENAME );
try
{
fc::json::save_to_file( _node_configuration, configuration_file_name );
}
catch (const fc::canceled_exception&)
{
throw;
}
catch ( const fc::exception& except )
{
elog( "error writing node configuration to file ${filename}: ${error}",
( "filename", configuration_file_name )("error", except.to_detail_string() ) );
}
}
}
void node_impl::p2p_network_connect_loop()
{
VERIFY_CORRECT_THREAD();
while (!_p2p_network_connect_loop_done.canceled())
{
try
{
dlog("Starting an iteration of p2p_network_connect_loop().");
display_current_connections();
// add-once peers bypass our checks on the maximum/desired number of connections (but they will still be counted against the totals once they're connected)
if (!_add_once_node_list.empty())
{
std::list<potential_peer_record> add_once_node_list;
add_once_node_list.swap(_add_once_node_list);
dlog("Processing \"add once\" node list containing ${count} peers:", ("count", add_once_node_list.size()));
for (const potential_peer_record& add_once_peer : add_once_node_list)
{
dlog(" ${peer}", ("peer", add_once_peer.endpoint));
}
for (const potential_peer_record& add_once_peer : add_once_node_list)
{
// see if we have an existing connection to that peer. If we do, disconnect them and
// then try to connect the next time through the loop
peer_connection_ptr existing_connection_ptr = get_connection_to_endpoint( add_once_peer.endpoint );
if(!existing_connection_ptr)
connect_to_endpoint(add_once_peer.endpoint);
}
dlog("Done processing \"add once\" node list");
}
while (is_wanting_new_connections())
{
bool initiated_connection_this_pass = false;
_potential_peer_database_updated = false;
for (peer_database::iterator iter = _potential_peer_db.begin();
iter != _potential_peer_db.end() && is_wanting_new_connections();
++iter)
{
fc::microseconds delay_until_retry = fc::seconds((iter->number_of_failed_connection_attempts + 1) * _peer_connection_retry_timeout);
if (!is_connection_to_endpoint_in_progress(iter->endpoint) &&
((iter->last_connection_disposition != last_connection_failed &&
iter->last_connection_disposition != last_connection_rejected &&
iter->last_connection_disposition != last_connection_handshaking_failed) ||
(fc::time_point::now() - iter->last_connection_attempt_time) > delay_until_retry))
{
connect_to_endpoint(iter->endpoint);
initiated_connection_this_pass = true;
}
}
if (!initiated_connection_this_pass && !_potential_peer_database_updated)
break;
}
display_current_connections();
// if we broke out of the while loop, that means either we have connected to enough nodes, or
// we don't have any good candidates to connect to right now.
#if 0
try
{
_retrigger_connect_loop_promise = fc::promise<void>::ptr( new fc::promise<void>("graphene::net::retrigger_connect_loop") );
if( is_wanting_new_connections() || !_add_once_node_list.empty() )
{
if( is_wanting_new_connections() )
dlog( "Still want to connect to more nodes, but I don't have any good candidates. Trying again in 15 seconds" );
else
dlog( "I still have some \"add once\" nodes to connect to. Trying again in 15 seconds" );
_retrigger_connect_loop_promise->wait_until( fc::time_point::now() + fc::seconds(GRAPHENE_PEER_DATABASE_RETRY_DELAY ) );
}
else
{
dlog( "I don't need any more connections, waiting forever until something changes" );
_retrigger_connect_loop_promise->wait();
}
}
catch ( fc::timeout_exception& ) //intentionally not logged
{
} // catch
#else
fc::usleep(fc::seconds(10));
#endif
}
catch (const fc::canceled_exception&)
{
throw;
}
FC_CAPTURE_AND_LOG( (0) )
}// while(!canceled)
}
void node_impl::trigger_p2p_network_connect_loop()
{
VERIFY_CORRECT_THREAD();
dlog( "Triggering connect loop now" );
_potential_peer_database_updated = true;
//if( _retrigger_connect_loop_promise )
// _retrigger_connect_loop_promise->set_value();
}
bool node_impl::have_already_received_sync_item( const item_hash_t& item_hash )
{
VERIFY_CORRECT_THREAD();
return std::find_if(_received_sync_items.begin(), _received_sync_items.end(),
[&item_hash]( const graphene::net::block_message& message ) { return message.block_id == item_hash; } ) != _received_sync_items.end() ||
std::find_if(_new_received_sync_items.begin(), _new_received_sync_items.end(),
[&item_hash]( const graphene::net::block_message& message ) { return message.block_id == item_hash; } ) != _new_received_sync_items.end(); ;
}
void node_impl::request_sync_item_from_peer( const peer_connection_ptr& peer, const item_hash_t& item_to_request )
{