Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
9cb8606
verbose flag for cpp tests (#10524)
TriplEight Mar 26, 2019
aa8487c
ethcore: add clique engine (#9981)
5chdn Mar 26, 2019
3b23817
Add trace information to eth_estimateGas (#10519)
fanatid Mar 27, 2019
7d26a82
private-tx: replace error_chain (#10510)
ascjones Mar 27, 2019
407de5e
fix(light): make `OnDemand` generic instead of using the concrete typ…
niklasad1 Mar 27, 2019
e4c2fe9
Initial support sccache for windows build (#10520)
General-Beck Mar 27, 2019
0199acb
ethcore: remove eth social and easthub chain configs (#10531)
soc1c Mar 27, 2019
04c6867
Fix max_gas (#10537)
sorpaas Mar 27, 2019
ebf51c0
fix(bump dependencies) (#10540)
niklasad1 Mar 28, 2019
4e2e88a
separate docker image to build docs (#10543)
TriplEight Mar 28, 2019
89d6277
updated lru-cache to 0.1.2 (#10542)
debris Mar 29, 2019
8840a29
clique: make state backfill time measurement more accurate (#10551)
jwasinger Mar 30, 2019
440e52f
build android with cache, win fixes (#10546)
TriplEight Mar 30, 2019
7b2afdf
Implement caching for service transactions checker (#10088)
VladLupashevskyi Mar 31, 2019
95236d2
fix(light eth_gasPrice): ask network if not in cache (#10535)
niklasad1 Mar 31, 2019
ec56b1f
Update light client harcoded headers (#10547)
Tbaut Mar 31, 2019
89f828b
fix(light account response): update `tx_queue` (#10545)
niklasad1 Mar 31, 2019
d9673b0
tx-pool: check transaction readiness before replacing (#10526)
ascjones Apr 1, 2019
0024966
version: bump master to 2.6 (#10560)
soc1c Apr 2, 2019
288d737
Explicitly enable or disable Stratum in config file (Issue 9785) (#10…
lamafab Apr 2, 2019
fba63de
RPC: Implements eth_subscribe("syncing") (#10311)
seunlanlege Apr 2, 2019
69085aa
Update Issue Template to direct security issue to email (#10562)
kirushik Apr 2, 2019
10e1787
fix(light cull): poll light cull instead of timer (#10559)
niklasad1 Apr 4, 2019
8132d38
Node table limiting and cache for node filter (#10288)
VladLupashevskyi Apr 5, 2019
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .github/ISSUE_TEMPLATE.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
_Before filing a new issue, please **provide the following information**._

_If you think that your issue is an exploitable security vulnerability, please mail your bugreport to security@parity.io instead; your submission might be eligible for our Bug Bounty._
_You can find mode info on the reporting process in [SECURITY.md](https://github.com/paritytech/parity-ethereum/blob/master/SECURITY.md)_


- **Parity Ethereum version**: 0.0.0
- **Operating system**: Windows / MacOS / Linux
- **Installation**: homebrew / one-line installer / built from source
- **Fully synchronized**: no / yes
- **Network**: ethereum / ropsten / kovan / ...
- **Network**: ethereum / ropsten / goerli / ...
- **Restarted**: no / yes

_Your issue description goes here below. Try to include **actual** vs. **expected behavior** and **steps to reproduce** the issue._
Expand Down
20 changes: 11 additions & 9 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ variables:

.docker-cache-status: &docker-cache-status
variables:
CARGO_HOME: "/cargo/${CI_JOB_NAME}"
CARGO_HOME: "/ci-cache/parity-ethereum/cargo/${CI_JOB_NAME}"
before_script:
- SCCACHE_ERROR_LOG=/builds/parity/parity-ethereum/sccache_error.log RUST_LOG=sccache::server=debug sccache --start-server
- sccache -s
Expand Down Expand Up @@ -97,23 +97,24 @@ test-linux:

build-android:
stage: build
image: parity/rust-android:gitlab-ci
image: parity/rust-parity-ethereum-android-build:stretch
variables:
CARGO_TARGET: armv7-linux-androideabi
<<: *docker-cache-status
<<: *collect_artifacts
script:
- scripts/gitlab/build-linux.sh
tags:
- linux-docker
<<: *collect_artifacts

build-linux: &build-linux
stage: build
only: *releaseable_branches
<<: *docker-cache-status
<<: *collect_artifacts
script:
- scripts/gitlab/build-linux.sh
- sccache -s
<<: *collect_artifacts

build-linux-i386:
<<: *build-linux
Expand All @@ -136,6 +137,7 @@ build-linux-armhf:
build-darwin:
stage: build
only: *releaseable_branches
<<: *collect_artifacts
variables:
CARGO_TARGET: x86_64-apple-darwin
CARGO_HOME: "${CI_PROJECT_DIR}/.cargo"
Expand All @@ -145,19 +147,19 @@ build-darwin:
- scripts/gitlab/build-linux.sh
tags:
- rust-osx
<<: *collect_artifacts

build-windows:
stage: build
<<: *collect_artifacts
only: *releaseable_branches
variables:
CARGO_TARGET: x86_64-pc-windows-msvc
CARGO_HOME: "${CI_PROJECT_DIR}/.cargo"
CARGO_HOME: "C:/ci-cache/parity-ethereum/cargo/$CI_JOB_NAME"
GIT_SUBMODULE_STRATEGY: none
script:
- sh scripts/gitlab/build-windows.sh
tags:
- rust-windows
<<: *collect_artifacts

publish-docker:
stage: publish
Expand All @@ -173,6 +175,7 @@ publish-docker:
publish-snap: &publish-snap
stage: publish
only: *releaseable_branches
<<: *collect_artifacts
image: snapcore/snapcraft
variables:
BUILD_ARCH: amd64
Expand All @@ -183,7 +186,6 @@ publish-snap: &publish-snap
- linux-docker
script:
- scripts/gitlab/publish-snap.sh
<<: *collect_artifacts

publish-snap-i386:
<<: *publish-snap
Expand Down Expand Up @@ -247,6 +249,7 @@ publish-awss3-release:

publish-docs:
stage: publish
image: parity/rust-parity-ethereum-docs:xenial
only:
- tags
except:
Expand All @@ -256,4 +259,3 @@ publish-docs:
- scripts/gitlab/publish-docs.sh
tags:
- linux-docker

563 changes: 285 additions & 278 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
description = "Parity Ethereum client"
name = "parity-ethereum"
# NOTE Make sure to update util/version/Cargo.toml as well
version = "2.5.0"
version = "2.6.0"
license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"]

Expand Down
2 changes: 1 addition & 1 deletion ethcore/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ keccak-hasher = { path = "../util/keccak-hasher" }
kvdb = "0.1"
kvdb-memorydb = "0.1"
kvdb-rocksdb = { version = "0.1.3", optional = true }
lazy_static = "1.0"
lazy_static = "1.2.0"
len-caching-lock = { path = "../util/len-caching-lock" }
log = "0.4"
lru-cache = "0.1"
Expand Down
16 changes: 9 additions & 7 deletions ethcore/light/src/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ use common_types::blockchain_info::BlockChainInfo;
use common_types::encoded;
use common_types::header::Header;
use common_types::ids::BlockId;
use common_types::verification_queue_info::VerificationQueueInfo as BlockQueueInfo;

use kvdb::KeyValueDB;

Expand Down Expand Up @@ -91,6 +92,9 @@ pub trait LightChainClient: Send + Sync {
/// Attempt to get a block hash by block id.
fn block_hash(&self, id: BlockId) -> Option<H256>;

/// Get block queue information.
fn queue_info(&self) -> BlockQueueInfo;

/// Attempt to get block header by block id.
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;

Expand Down Expand Up @@ -125,9 +129,6 @@ pub trait LightChainClient: Send + Sync {
/// Flush the queue.
fn flush_queue(&self);

/// Get queue info.
fn queue_info(&self) -> queue::QueueInfo;

/// Get the `i`th CHT root.
fn cht_root(&self, i: usize) -> Option<H256>;

Expand Down Expand Up @@ -534,13 +535,18 @@ impl<T: ChainDataFetcher> Client<T> {
}
}


impl<T: ChainDataFetcher> LightChainClient for Client<T> {
fn add_listener(&self, listener: Weak<LightChainNotify>) {
Client::add_listener(self, listener)
}

fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) }

fn queue_info(&self) -> queue::QueueInfo {
self.queue.queue_info()
}

fn queue_header(&self, header: Header) -> EthcoreResult<H256> {
self.import_header(header)
}
Expand Down Expand Up @@ -600,10 +606,6 @@ impl<T: ChainDataFetcher> LightChainClient for Client<T> {
Client::flush_queue(self);
}

fn queue_info(&self) -> queue::QueueInfo {
self.queue.queue_info()
}

fn cht_root(&self, i: usize) -> Option<H256> {
Client::cht_root(self, i)
}
Expand Down
153 changes: 84 additions & 69 deletions ethcore/light/src/on_demand/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,24 @@ pub mod error {
}
}

/// Public interface for performing network requests `OnDemand`
pub trait OnDemandRequester: Send + Sync {
/// Submit a strongly-typed batch of requests.
///
/// Fails if back-reference are not coherent.
fn request<T>(&self, ctx: &BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
where
T: request::RequestAdapter;

/// Submit a vector of requests to be processed together.
///
/// Fails if back-references are not coherent.
/// The returned vector of responses will correspond to the requests exactly.
fn request_raw(&self, ctx: &BasicContext, requests: Vec<Request>)
-> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>;
}


// relevant peer info.
#[derive(Debug, Clone, PartialEq, Eq)]
struct Peer {
Expand Down Expand Up @@ -355,71 +373,8 @@ pub struct OnDemand {
request_number_of_consecutive_errors: usize
}

impl OnDemand {

/// Create a new `OnDemand` service with the given cache.
pub fn new(
cache: Arc<Mutex<Cache>>,
response_time_window: Duration,
request_backoff_start: Duration,
request_backoff_max: Duration,
request_backoff_rounds_max: usize,
request_number_of_consecutive_errors: usize,
) -> Self {

Self {
pending: RwLock::new(Vec::new()),
peers: RwLock::new(HashMap::new()),
in_transit: RwLock::new(HashMap::new()),
cache,
no_immediate_dispatch: false,
response_time_window: Self::sanitize_circuit_breaker_input(response_time_window, "Response time window"),
request_backoff_start: Self::sanitize_circuit_breaker_input(request_backoff_start, "Request initial backoff time window"),
request_backoff_max: Self::sanitize_circuit_breaker_input(request_backoff_max, "Request maximum backoff time window"),
request_backoff_rounds_max,
request_number_of_consecutive_errors,
}
}

fn sanitize_circuit_breaker_input(dur: Duration, name: &'static str) -> Duration {
if dur.as_secs() < 1 {
warn!(target: "on_demand",
"{} is too short must be at least 1 second, configuring it to 1 second", name);
Duration::from_secs(1)
} else {
dur
}
}

// make a test version: this doesn't dispatch pending requests
// until you trigger it manually.
#[cfg(test)]
fn new_test(
cache: Arc<Mutex<Cache>>,
request_ttl: Duration,
request_backoff_start: Duration,
request_backoff_max: Duration,
request_backoff_rounds_max: usize,
request_number_of_consecutive_errors: usize,
) -> Self {
let mut me = OnDemand::new(
cache,
request_ttl,
request_backoff_start,
request_backoff_max,
request_backoff_rounds_max,
request_number_of_consecutive_errors,
);
me.no_immediate_dispatch = true;

me
}

/// Submit a vector of requests to be processed together.
///
/// Fails if back-references are not coherent.
/// The returned vector of responses will correspond to the requests exactly.
pub fn request_raw(&self, ctx: &BasicContext, requests: Vec<Request>)
impl OnDemandRequester for OnDemand {
fn request_raw(&self, ctx: &BasicContext, requests: Vec<Request>)
-> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>
{
let (sender, receiver) = oneshot::channel();
Expand Down Expand Up @@ -475,10 +430,7 @@ impl OnDemand {
Ok(receiver)
}

/// Submit a strongly-typed batch of requests.
///
/// Fails if back-reference are not coherent.
pub fn request<T>(&self, ctx: &BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
fn request<T>(&self, ctx: &BasicContext, requests: T) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
where T: request::RequestAdapter
{
self.request_raw(ctx, requests.make_requests()).map(|recv| OnResponses {
Expand All @@ -487,6 +439,69 @@ impl OnDemand {
})
}

}

impl OnDemand {

/// Create a new `OnDemand` service with the given cache.
pub fn new(
cache: Arc<Mutex<Cache>>,
response_time_window: Duration,
request_backoff_start: Duration,
request_backoff_max: Duration,
request_backoff_rounds_max: usize,
request_number_of_consecutive_errors: usize,
) -> Self {

Self {
pending: RwLock::new(Vec::new()),
peers: RwLock::new(HashMap::new()),
in_transit: RwLock::new(HashMap::new()),
cache,
no_immediate_dispatch: false,
response_time_window: Self::sanitize_circuit_breaker_input(response_time_window, "Response time window"),
request_backoff_start: Self::sanitize_circuit_breaker_input(request_backoff_start, "Request initial backoff time window"),
request_backoff_max: Self::sanitize_circuit_breaker_input(request_backoff_max, "Request maximum backoff time window"),
request_backoff_rounds_max,
request_number_of_consecutive_errors,
}
}

fn sanitize_circuit_breaker_input(dur: Duration, name: &'static str) -> Duration {
if dur.as_secs() < 1 {
warn!(target: "on_demand",
"{} is too short must be at least 1 second, configuring it to 1 second", name);
Duration::from_secs(1)
} else {
dur
}
}

// make a test version: this doesn't dispatch pending requests
// until you trigger it manually.
#[cfg(test)]
fn new_test(
cache: Arc<Mutex<Cache>>,
request_ttl: Duration,
request_backoff_start: Duration,
request_backoff_max: Duration,
request_backoff_rounds_max: usize,
request_number_of_consecutive_errors: usize,
) -> Self {
let mut me = OnDemand::new(
cache,
request_ttl,
request_backoff_start,
request_backoff_max,
request_backoff_rounds_max,
request_number_of_consecutive_errors,
);
me.no_immediate_dispatch = true;

me
}


// maybe dispatch pending requests.
// sometimes
fn attempt_dispatch(&self, ctx: &BasicContext) {
Expand Down
2 changes: 1 addition & 1 deletion ethcore/light/src/on_demand/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ use std::sync::Arc;
use std::time::{Duration, Instant};
use std::thread;

use super::{request, OnDemand, Peer, HeaderRef};
use super::{request, OnDemand, OnDemandRequester, Peer, HeaderRef};

// useful contexts to give the service.
enum Context {
Expand Down
Loading