mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-12-23 14:55:18 +00:00
Initial commit
This commit is contained in:
commit
9058467be1
3
.github/actions-rs/grcov.yml
vendored
Normal file
3
.github/actions-rs/grcov.yml
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
branch: false
|
||||
ignore-not-existing: true
|
||||
llvm: true
|
53
.github/workflows/cc.yml
vendored
Normal file
53
.github/workflows/cc.yml
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
name: code-coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main"]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
unittest-cov:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cargo cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install 1.71.0 toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.71.0
|
||||
override: true
|
||||
|
||||
- name: Run unittest
|
||||
run: cargo test --all-features --no-fail-fast
|
||||
env:
|
||||
CARGO_INCREMENTAL: '0'
|
||||
RUSTC_BOOTSTRAP: '1'
|
||||
RUSTFLAGS: '-Zprofile -Ccodegen-units=1 -Copt-level=0 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests'
|
||||
RUSTDOCFLAGS: '-Zprofile -Ccodegen-units=1 -Copt-level=0 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests'
|
||||
|
||||
- id: coverage
|
||||
uses: actions-rs/grcov@v0.1
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ${{ steps.coverage.outputs.report }}
|
||||
fail_ci_if_error: true
|
100
.github/workflows/rust.yml
vendored
Normal file
100
.github/workflows/rust.yml
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
# Based on https://github.com/actions-rs/meta/blob/master/recipes/quickstart.md
|
||||
#
|
||||
# While our "example" application has the platform-specific code,
|
||||
# for simplicity we are compiling and testing everything on the Ubuntu environment only.
|
||||
# For multi-OS testing see the `cross.yml` workflow.
|
||||
|
||||
name: rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Run cargo check
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
|
||||
test:
|
||||
name: test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cargo cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --release
|
||||
|
||||
lints:
|
||||
name: lints
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Run cargo fmt
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: -- -D warnings
|
50
.github/workflows/tests.yml
vendored
Normal file
50
.github/workflows/tests.yml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
name: functional-test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cargo cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Build
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cd tests
|
||||
python test_all.py
|
7
.gitignore
vendored
Normal file
7
.gitignore
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
.DS_Store
|
||||
/target
|
||||
# src/config.rs
|
||||
/.idea
|
||||
tests/**/__pycache__
|
||||
tests/tmp/**
|
||||
.vscode/*.json
|
6
.gitmodules
vendored
Normal file
6
.gitmodules
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
[submodule "zerog-storage-contracts"]
|
||||
path = zerog-storage-contracts
|
||||
url = git@github.com:zero-gravity-labs/zerog-storage-contracts.git
|
||||
[submodule "zerog-storage-client"]
|
||||
path = zerog-storage-client
|
||||
url = git@github.com:zero-gravity-labs/zerog-storage-client.git
|
8082
Cargo.lock
generated
Normal file
8082
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
32
Cargo.toml
Normal file
32
Cargo.toml
Normal file
@ -0,0 +1,32 @@
|
||||
[workspace]
|
||||
|
||||
members = [
|
||||
"common/channel",
|
||||
"common/directory",
|
||||
"common/hashset_delay",
|
||||
"common/lighthouse_metrics",
|
||||
"common/merkle_tree",
|
||||
"common/task_executor",
|
||||
"common/zgs_version",
|
||||
"common/unused_port",
|
||||
"common/append_merkle",
|
||||
|
||||
"node",
|
||||
"node/chunk_pool",
|
||||
"node/file_location_cache",
|
||||
"node/log_entry_sync",
|
||||
"node/miner",
|
||||
"node/network",
|
||||
"node/router",
|
||||
"node/rpc",
|
||||
"node/shared_types",
|
||||
"node/storage",
|
||||
"node/storage-async",
|
||||
"node/sync",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[patch.crates-io]
|
||||
discv5 = { path = "version-meld/discv5" }
|
||||
eth2_ssz = { path = "version-meld/eth2_ssz" }
|
||||
enr = { path = "version-meld/enr" }
|
201
LICENSE.lighthouse
Normal file
201
LICENSE.lighthouse
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2018 Sigma Prime Pty Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
6
NOTICE
Normal file
6
NOTICE
Normal file
@ -0,0 +1,6 @@
|
||||
ZeroGStorage
|
||||
Copyright 2023 ZeroGStorage
|
||||
|
||||
The Initial Developer of some parts of the framework, which are copied from, derived from, or
|
||||
inspired by Lighthouse, is Sigma Prime Pty Ltd (https://sigmaprime.io).
|
||||
Copyright 2018 - 2022 Sigma Prime Pty Ltd.
|
8
codecov.yml
Normal file
8
codecov.yml
Normal file
@ -0,0 +1,8 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
patch:
|
||||
default:
|
||||
informational: true
|
14
common/append_merkle/Cargo.toml
Normal file
14
common/append_merkle/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "append_merkle"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "=1.0.58", features = ["backtrace"] }
|
||||
ethereum-types = "0.14"
|
||||
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
|
||||
eth2_ssz = "0.4.0"
|
||||
eth2_ssz_derive = "0.3.0"
|
||||
serde = { version = "1.0.137", features = ["derive"] }
|
||||
lazy_static = "1.4.0"
|
||||
tracing = "0.1.36"
|
641
common/append_merkle/src/lib.rs
Normal file
641
common/append_merkle/src/lib.rs
Normal file
@ -0,0 +1,641 @@
|
||||
mod merkle_tree;
|
||||
mod proof;
|
||||
mod sha3;
|
||||
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use tracing::warn;
|
||||
|
||||
pub use crate::merkle_tree::{Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead};
|
||||
pub use proof::{Proof, RangeProof};
|
||||
pub use sha3::Sha3Algorithm;
|
||||
|
||||
pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> {
|
||||
/// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves.
|
||||
layers: Vec<Vec<E>>,
|
||||
/// Keep the delta nodes that can be used to construct a history tree.
|
||||
/// The key is the root node of that version.
|
||||
delta_nodes_map: HashMap<u64, DeltaNodes<E>>,
|
||||
root_to_tx_seq_map: HashMap<E, u64>,
|
||||
|
||||
/// For `last_chunk_merkle` after the first chunk, this is set to `Some(10)` so that
|
||||
/// `revert_to` can reset the state correctly when needed.
|
||||
min_depth: Option<usize>,
|
||||
/// Used to compute the correct padding hash.
|
||||
/// 0 for `pora_chunk_merkle` and 10 for not-first `last_chunk_merkle`.
|
||||
leaf_height: usize,
|
||||
_a: PhantomData<A>,
|
||||
}
|
||||
|
||||
impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
pub fn new(leaves: Vec<E>, leaf_height: usize, start_tx_seq: Option<u64>) -> Self {
|
||||
let mut merkle = Self {
|
||||
layers: vec![leaves],
|
||||
delta_nodes_map: HashMap::new(),
|
||||
root_to_tx_seq_map: HashMap::new(),
|
||||
min_depth: None,
|
||||
leaf_height,
|
||||
_a: Default::default(),
|
||||
};
|
||||
if merkle.leaves() == 0 {
|
||||
if let Some(seq) = start_tx_seq {
|
||||
merkle.delta_nodes_map.insert(
|
||||
seq,
|
||||
DeltaNodes {
|
||||
right_most_nodes: vec![],
|
||||
},
|
||||
);
|
||||
}
|
||||
return merkle;
|
||||
}
|
||||
// Reconstruct the whole tree.
|
||||
merkle.recompute(0, 0, None);
|
||||
// Commit the first version in memory.
|
||||
// TODO(zz): Check when the roots become available.
|
||||
merkle.commit(start_tx_seq);
|
||||
merkle
|
||||
}
|
||||
|
||||
pub fn new_with_subtrees(
|
||||
initial_data: MerkleTreeInitialData<E>,
|
||||
leaf_height: usize,
|
||||
start_tx_seq: Option<u64>,
|
||||
) -> Result<Self> {
|
||||
let mut merkle = Self {
|
||||
layers: vec![vec![]],
|
||||
delta_nodes_map: HashMap::new(),
|
||||
root_to_tx_seq_map: HashMap::new(),
|
||||
min_depth: None,
|
||||
leaf_height,
|
||||
_a: Default::default(),
|
||||
};
|
||||
if initial_data.subtree_list.is_empty() {
|
||||
if let Some(seq) = start_tx_seq {
|
||||
merkle.delta_nodes_map.insert(
|
||||
seq,
|
||||
DeltaNodes {
|
||||
right_most_nodes: vec![],
|
||||
},
|
||||
);
|
||||
}
|
||||
return Ok(merkle);
|
||||
}
|
||||
merkle.append_subtree_list(initial_data.subtree_list)?;
|
||||
merkle.commit(start_tx_seq);
|
||||
for (index, h) in initial_data.known_leaves {
|
||||
merkle.fill_leaf(index, h);
|
||||
}
|
||||
Ok(merkle)
|
||||
}
|
||||
|
||||
/// This is only used for the last chunk, so `leaf_height` is always 0 so far.
|
||||
pub fn new_with_depth(leaves: Vec<E>, depth: usize, start_tx_seq: Option<u64>) -> Self {
|
||||
if leaves.is_empty() {
|
||||
// Create an empty merkle tree with `depth`.
|
||||
let mut merkle = Self {
|
||||
layers: vec![vec![]; depth],
|
||||
delta_nodes_map: HashMap::new(),
|
||||
root_to_tx_seq_map: HashMap::new(),
|
||||
min_depth: Some(depth),
|
||||
leaf_height: 0,
|
||||
_a: Default::default(),
|
||||
};
|
||||
if let Some(seq) = start_tx_seq {
|
||||
merkle.delta_nodes_map.insert(
|
||||
seq,
|
||||
DeltaNodes {
|
||||
right_most_nodes: vec![],
|
||||
},
|
||||
);
|
||||
}
|
||||
merkle
|
||||
} else {
|
||||
let mut layers = vec![vec![]; depth];
|
||||
layers[0] = leaves;
|
||||
let mut merkle = Self {
|
||||
layers,
|
||||
delta_nodes_map: HashMap::new(),
|
||||
root_to_tx_seq_map: HashMap::new(),
|
||||
min_depth: Some(depth),
|
||||
leaf_height: 0,
|
||||
_a: Default::default(),
|
||||
};
|
||||
// Reconstruct the whole tree.
|
||||
merkle.recompute(0, 0, None);
|
||||
// Commit the first version in memory.
|
||||
merkle.commit(start_tx_seq);
|
||||
merkle
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the new merkle root.
|
||||
pub fn append(&mut self, new_leaf: E) {
|
||||
self.layers[0].push(new_leaf);
|
||||
self.recompute_after_append_leaves(self.leaves() - 1);
|
||||
}
|
||||
|
||||
pub fn append_list(&mut self, mut leaf_list: Vec<E>) {
|
||||
let start_index = self.leaves();
|
||||
self.layers[0].append(&mut leaf_list);
|
||||
self.recompute_after_append_leaves(start_index);
|
||||
}
|
||||
|
||||
/// Append a leaf list by providing their intermediate node hash.
|
||||
/// The appended subtree must be aligned. And it's up to the caller to
|
||||
/// append the padding nodes for alignment.
|
||||
/// Other nodes in the subtree will be set to `null` nodes.
|
||||
/// TODO: Optimize to avoid storing the `null` nodes?
|
||||
pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> {
|
||||
let start_index = self.leaves();
|
||||
self.append_subtree_inner(subtree_depth, subtree_root)?;
|
||||
self.recompute_after_append_subtree(start_index, subtree_depth - 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> {
|
||||
for (subtree_depth, subtree_root) in subtree_list {
|
||||
let start_index = self.leaves();
|
||||
self.append_subtree_inner(subtree_depth, subtree_root)?;
|
||||
self.recompute_after_append_subtree(start_index, subtree_depth - 1);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Change the value of the last leaf and return the new merkle root.
|
||||
/// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves.
|
||||
pub fn update_last(&mut self, updated_leaf: E) {
|
||||
if self.layers[0].is_empty() {
|
||||
// Special case for the first data.
|
||||
self.layers[0].push(updated_leaf);
|
||||
} else {
|
||||
*self.layers[0].last_mut().unwrap() = updated_leaf;
|
||||
}
|
||||
self.recompute_after_append_leaves(self.leaves() - 1);
|
||||
}
|
||||
|
||||
/// Fill an unknown `null` leaf with its real value.
|
||||
/// Panics if the leaf changes the merkle root or the index is out of range.
|
||||
/// TODO: Batch computing intermediate nodes.
|
||||
pub fn fill_leaf(&mut self, index: usize, leaf: E) {
|
||||
if self.layers[0][index] == E::null() {
|
||||
self.layers[0][index] = leaf;
|
||||
self.recompute_after_fill_leaves(index, index + 1);
|
||||
} else if self.layers[0][index] != leaf {
|
||||
panic!("Fill with invalid leaf")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gen_range_proof(&self, start_index: usize, end_index: usize) -> Result<RangeProof<E>> {
|
||||
if end_index <= start_index {
|
||||
bail!(
|
||||
"invalid proof range: start={} end={}",
|
||||
start_index,
|
||||
end_index
|
||||
);
|
||||
}
|
||||
// TODO(zz): Optimize range proof.
|
||||
let left_proof = self.gen_proof(start_index)?;
|
||||
let right_proof = self.gen_proof(end_index - 1)?;
|
||||
Ok(RangeProof {
|
||||
left_proof,
|
||||
right_proof,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn check_root(&self, root: &E) -> bool {
|
||||
self.root_to_tx_seq_map.contains_key(root)
|
||||
}
|
||||
|
||||
pub fn leaf_at(&self, position: usize) -> Result<Option<E>> {
|
||||
if position >= self.leaves() {
|
||||
bail!("Out of bound: position={} end={}", position, self.leaves());
|
||||
}
|
||||
if self.layers[0][position] != E::null() {
|
||||
Ok(Some(self.layers[0][position].clone()))
|
||||
} else {
|
||||
// The leaf hash is unknown.
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a list of subtrees that can be used to rebuild the tree.
|
||||
pub fn get_subtrees(&self) -> Vec<(usize, E)> {
|
||||
let mut next_index = 0;
|
||||
let mut subtree_list: Vec<(usize, E)> = Vec::new();
|
||||
while next_index < self.leaves() {
|
||||
let root_tuple = self.first_known_root_at(next_index);
|
||||
let subtree_size = 1 << (root_tuple.0 - 1);
|
||||
let root_start_index = next_index / subtree_size * subtree_size;
|
||||
|
||||
// Previous subtrees are included within the new subtree.
|
||||
// Pop them out and replace with the new one.
|
||||
if root_start_index < next_index {
|
||||
while let Some(last) = subtree_list.pop() {
|
||||
next_index -= 1 << (last.0 - 1);
|
||||
if next_index == root_start_index {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
next_index += subtree_size;
|
||||
subtree_list.push(root_tuple);
|
||||
}
|
||||
subtree_list
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
pub fn commit(&mut self, tx_seq: Option<u64>) {
|
||||
if let Some(tx_seq) = tx_seq {
|
||||
if self.leaves() == 0 {
|
||||
// The state is empty, so we just save the root as `null`.
|
||||
// Note that this root should not be used.
|
||||
self.delta_nodes_map.insert(
|
||||
tx_seq,
|
||||
DeltaNodes {
|
||||
right_most_nodes: vec![],
|
||||
},
|
||||
);
|
||||
return;
|
||||
}
|
||||
let mut right_most_nodes = Vec::new();
|
||||
for layer in &self.layers {
|
||||
right_most_nodes.push((layer.len() - 1, layer.last().unwrap().clone()));
|
||||
}
|
||||
let root = self.root().clone();
|
||||
assert_eq!(root, right_most_nodes.last().unwrap().1);
|
||||
self.delta_nodes_map
|
||||
.insert(tx_seq, DeltaNodes::new(right_most_nodes));
|
||||
self.root_to_tx_seq_map.insert(root, tx_seq);
|
||||
}
|
||||
}
|
||||
|
||||
fn before_extend_layer(&mut self, height: usize) {
|
||||
if height == self.layers.len() {
|
||||
self.layers.push(Vec::new());
|
||||
}
|
||||
}
|
||||
|
||||
fn recompute_after_append_leaves(&mut self, start_index: usize) {
|
||||
self.recompute(start_index, 0, None)
|
||||
}
|
||||
|
||||
fn recompute_after_append_subtree(&mut self, start_index: usize, height: usize) {
|
||||
self.recompute(start_index, height, None)
|
||||
}
|
||||
|
||||
fn recompute_after_fill_leaves(&mut self, start_index: usize, end_index: usize) {
|
||||
self.recompute(start_index, 0, Some(end_index))
|
||||
}
|
||||
|
||||
/// Given a range of changed leaf nodes and recompute the tree.
|
||||
/// Since this tree is append-only, we always compute to the end.
|
||||
fn recompute(
|
||||
&mut self,
|
||||
mut start_index: usize,
|
||||
mut height: usize,
|
||||
mut maybe_end_index: Option<usize>,
|
||||
) {
|
||||
start_index >>= height;
|
||||
maybe_end_index = maybe_end_index.map(|end| end >> height);
|
||||
// Loop until we compute the new root and reach `tree_depth`.
|
||||
while self.layers[height].len() > 1 || height < self.layers.len() - 1 {
|
||||
let next_layer_start_index = start_index >> 1;
|
||||
if start_index % 2 == 1 {
|
||||
start_index -= 1;
|
||||
}
|
||||
|
||||
let mut end_index = maybe_end_index.unwrap_or(self.layers[height].len());
|
||||
if end_index % 2 == 1 && end_index != self.layers[height].len() {
|
||||
end_index += 1;
|
||||
}
|
||||
let mut i = 0;
|
||||
let mut iter = self.layers[height][start_index..end_index].chunks_exact(2);
|
||||
// We cannot modify the parent layer while iterating the child layer,
|
||||
// so just keep the changes and update them later.
|
||||
let mut parent_update = Vec::new();
|
||||
while let Some([left, right]) = iter.next() {
|
||||
// If either left or right is null (unknown), we cannot compute the parent hash.
|
||||
// Note that if we are recompute a range of an existing tree,
|
||||
// we do not need to keep these possibly null parent. This is only saved
|
||||
// for the case of constructing a new tree from the leaves.
|
||||
let parent = if *left == E::null() || *right == E::null() {
|
||||
E::null()
|
||||
} else {
|
||||
A::parent(left, right)
|
||||
};
|
||||
parent_update.push((next_layer_start_index + i, parent));
|
||||
i += 1;
|
||||
}
|
||||
if let [r] = iter.remainder() {
|
||||
// Same as above.
|
||||
let parent = if *r == E::null() {
|
||||
E::null()
|
||||
} else {
|
||||
A::parent_single(r, height + self.leaf_height)
|
||||
};
|
||||
parent_update.push((next_layer_start_index + i, parent));
|
||||
}
|
||||
if !parent_update.is_empty() {
|
||||
self.before_extend_layer(height + 1);
|
||||
}
|
||||
// `parent_update` is in increasing order by `parent_index`, so
|
||||
// we can just overwrite `last_changed_parent_index` with new values.
|
||||
let mut last_changed_parent_index = None;
|
||||
for (parent_index, parent) in parent_update {
|
||||
match parent_index.cmp(&self.layers[height + 1].len()) {
|
||||
Ordering::Less => {
|
||||
// We do not overwrite with null.
|
||||
if parent != E::null() {
|
||||
if self.layers[height + 1][parent_index] == E::null()
|
||||
// The last node in a layer can be updated.
|
||||
|| (self.layers[height + 1][parent_index] != parent
|
||||
&& parent_index == self.layers[height + 1].len() - 1)
|
||||
{
|
||||
self.layers[height + 1][parent_index] = parent;
|
||||
last_changed_parent_index = Some(parent_index);
|
||||
} else if self.layers[height + 1][parent_index] != parent {
|
||||
// Recompute changes a node in the middle. This should be impossible
|
||||
// if the inputs are valid.
|
||||
panic!("Invalid append merkle tree! height={} index={} expected={:?} get={:?}",
|
||||
height + 1, parent_index, self.layers[height + 1][parent_index], parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ordering::Equal => {
|
||||
self.layers[height + 1].push(parent);
|
||||
last_changed_parent_index = Some(parent_index);
|
||||
}
|
||||
Ordering::Greater => {
|
||||
unreachable!("depth={}, parent_index={}", height, parent_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
if last_changed_parent_index.is_none() {
|
||||
break;
|
||||
}
|
||||
maybe_end_index = last_changed_parent_index.map(|i| i + 1);
|
||||
height += 1;
|
||||
start_index = next_layer_start_index;
|
||||
}
|
||||
}
|
||||
|
||||
fn append_subtree_inner(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> {
|
||||
if subtree_depth == 0 {
|
||||
bail!("Subtree depth should not be zero!");
|
||||
}
|
||||
if self.leaves() % (1 << (subtree_depth - 1)) != 0 {
|
||||
warn!(
|
||||
"The current leaves count is not aligned with the merged subtree, \
|
||||
this is only possible during recovery, leaves={}",
|
||||
self.leaves()
|
||||
);
|
||||
}
|
||||
for height in 0..(subtree_depth - 1) {
|
||||
self.before_extend_layer(height);
|
||||
let subtree_layer_size = 1 << (subtree_depth - 1 - height);
|
||||
self.layers[height].append(&mut vec![E::null(); subtree_layer_size]);
|
||||
}
|
||||
self.before_extend_layer(subtree_depth - 1);
|
||||
self.layers[subtree_depth - 1].push(subtree_root);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn validate(&self, proof: &Proof<E>, leaf: &E, position: usize) -> Result<bool> {
|
||||
proof.validate::<A>(leaf, position)?;
|
||||
Ok(self.root_to_tx_seq_map.contains_key(&proof.root()))
|
||||
}
|
||||
|
||||
pub fn revert_to(&mut self, tx_seq: u64) -> Result<()> {
|
||||
if self.layers[0].is_empty() {
|
||||
// Any previous state of an empty tree is always empty.
|
||||
return Ok(());
|
||||
}
|
||||
let delta_nodes = self
|
||||
.delta_nodes_map
|
||||
.get(&tx_seq)
|
||||
.ok_or_else(|| anyhow!("tx_seq unavailable, root={:?}", tx_seq))?;
|
||||
// Dropping the upper layers that are not in the old merkle tree.
|
||||
self.layers.truncate(delta_nodes.right_most_nodes.len());
|
||||
for (height, (last_index, right_most_node)) in
|
||||
delta_nodes.right_most_nodes.iter().enumerate()
|
||||
{
|
||||
self.layers[height].truncate(*last_index + 1);
|
||||
self.layers[height][*last_index] = right_most_node.clone();
|
||||
}
|
||||
self.clear_after(tx_seq);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn at_root_version(&self, root_hash: &E) -> Result<HistoryTree<E>> {
|
||||
let tx_seq = self
|
||||
.root_to_tx_seq_map
|
||||
.get(root_hash)
|
||||
.ok_or_else(|| anyhow!("old root unavailable, root={:?}", root_hash))?;
|
||||
let delta_nodes = self
|
||||
.delta_nodes_map
|
||||
.get(tx_seq)
|
||||
.ok_or_else(|| anyhow!("tx_seq unavailable, tx_seq={:?}", tx_seq))?;
|
||||
if delta_nodes.height() == 0 {
|
||||
bail!("empty tree");
|
||||
}
|
||||
Ok(HistoryTree {
|
||||
layers: &self.layers,
|
||||
delta_nodes,
|
||||
leaf_height: self.leaf_height,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.layers = match self.min_depth {
|
||||
None => vec![vec![]],
|
||||
Some(depth) => vec![vec![]; depth],
|
||||
};
|
||||
}
|
||||
|
||||
fn clear_after(&mut self, tx_seq: u64) {
|
||||
let mut tx_seq = tx_seq + 1;
|
||||
while self.delta_nodes_map.contains_key(&tx_seq) {
|
||||
if let Some(nodes) = self.delta_nodes_map.remove(&tx_seq) {
|
||||
if nodes.height() != 0 {
|
||||
self.root_to_tx_seq_map.remove(nodes.root());
|
||||
}
|
||||
}
|
||||
tx_seq += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the height and the root hash of the first available node from the leaf to the root.
|
||||
/// The caller should ensure that `index` is within range.
|
||||
fn first_known_root_at(&self, index: usize) -> (usize, E) {
|
||||
let mut height = 0;
|
||||
let mut index_in_layer = index;
|
||||
while height < self.layers.len() {
|
||||
let node = self.node(height, index_in_layer);
|
||||
if !node.is_null() {
|
||||
return (height + 1, node.clone());
|
||||
}
|
||||
height += 1;
|
||||
index_in_layer /= 2;
|
||||
}
|
||||
unreachable!("root is always available")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct DeltaNodes<E: HashElement> {
|
||||
/// The right most nodes in a layer and its position.
|
||||
right_most_nodes: Vec<(usize, E)>,
|
||||
}
|
||||
|
||||
impl<E: HashElement> DeltaNodes<E> {
|
||||
fn new(right_most_nodes: Vec<(usize, E)>) -> Self {
|
||||
Self { right_most_nodes }
|
||||
}
|
||||
|
||||
fn get(&self, height: usize, position: usize) -> Result<Option<&E>> {
|
||||
if height >= self.right_most_nodes.len() || position > self.right_most_nodes[height].0 {
|
||||
Err(anyhow!("position out of tree range"))
|
||||
} else if position == self.right_most_nodes[height].0 {
|
||||
Ok(Some(&self.right_most_nodes[height].1))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn layer_len(&self, height: usize) -> usize {
|
||||
self.right_most_nodes[height].0 + 1
|
||||
}
|
||||
|
||||
fn height(&self) -> usize {
|
||||
self.right_most_nodes.len()
|
||||
}
|
||||
|
||||
fn root(&self) -> &E {
|
||||
&self.right_most_nodes.last().unwrap().1
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HistoryTree<'m, E: HashElement> {
|
||||
/// A reference to the global tree nodes.
|
||||
layers: &'m Vec<Vec<E>>,
|
||||
/// The delta nodes that are difference from `layers`.
|
||||
/// This could be a reference, we just take ownership for convenience.
|
||||
delta_nodes: &'m DeltaNodes<E>,
|
||||
|
||||
leaf_height: usize,
|
||||
}
|
||||
|
||||
impl<E: HashElement, A: Algorithm<E>> MerkleTreeRead for AppendMerkleTree<E, A> {
|
||||
type E = E;
|
||||
|
||||
fn node(&self, layer: usize, index: usize) -> &Self::E {
|
||||
&self.layers[layer][index]
|
||||
}
|
||||
|
||||
fn height(&self) -> usize {
|
||||
self.layers.len()
|
||||
}
|
||||
|
||||
fn layer_len(&self, layer_height: usize) -> usize {
|
||||
self.layers[layer_height].len()
|
||||
}
|
||||
|
||||
fn padding_node(&self, height: usize) -> Self::E {
|
||||
E::end_pad(height + self.leaf_height)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> {
|
||||
type E = E;
|
||||
fn node(&self, layer: usize, index: usize) -> &Self::E {
|
||||
match self.delta_nodes.get(layer, index).expect("range checked") {
|
||||
Some(node) => node,
|
||||
None => &self.layers[layer][index],
|
||||
}
|
||||
}
|
||||
|
||||
fn height(&self) -> usize {
|
||||
self.delta_nodes.height()
|
||||
}
|
||||
|
||||
fn layer_len(&self, layer_height: usize) -> usize {
|
||||
self.delta_nodes.layer_len(layer_height)
|
||||
}
|
||||
|
||||
fn padding_node(&self, height: usize) -> Self::E {
|
||||
E::end_pad(height + self.leaf_height)
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! ensure_eq {
|
||||
($given:expr, $expected:expr) => {
|
||||
ensure!(
|
||||
$given == $expected,
|
||||
format!(
|
||||
"equal check fails! {}:{}: {}={:?}, {}={:?}",
|
||||
file!(),
|
||||
line!(),
|
||||
stringify!($given),
|
||||
$given,
|
||||
stringify!($expected),
|
||||
$expected,
|
||||
)
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::merkle_tree::MerkleTreeRead;
|
||||
use crate::sha3::Sha3Algorithm;
|
||||
use crate::AppendMerkleTree;
|
||||
use ethereum_types::H256;
|
||||
|
||||
#[test]
|
||||
fn test_proof() {
|
||||
let n = [1, 2, 6, 1025];
|
||||
for entry_len in n {
|
||||
let mut data = Vec::new();
|
||||
for _ in 0..entry_len {
|
||||
data.push(H256::random());
|
||||
}
|
||||
let mut merkle =
|
||||
AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None);
|
||||
merkle.append_list(data.clone());
|
||||
merkle.commit(Some(0));
|
||||
verify(&data, &merkle);
|
||||
|
||||
data.push(H256::random());
|
||||
merkle.append(*data.last().unwrap());
|
||||
merkle.commit(Some(1));
|
||||
verify(&data, &merkle);
|
||||
|
||||
for _ in 0..6 {
|
||||
data.push(H256::random());
|
||||
}
|
||||
merkle.append_list(data[data.len() - 6..].to_vec());
|
||||
merkle.commit(Some(2));
|
||||
verify(&data, &merkle);
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(data: &Vec<H256>, merkle: &AppendMerkleTree<H256, Sha3Algorithm>) {
|
||||
for i in 0..data.len() {
|
||||
let proof = merkle.gen_proof(i + 1).unwrap();
|
||||
let r = merkle.validate(&proof, &data[i], i + 1);
|
||||
assert!(matches!(r, Ok(true)), "{:?}", r);
|
||||
}
|
||||
for i in (0..data.len()).step_by(6) {
|
||||
let end = std::cmp::min(i + 3, data.len());
|
||||
let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap();
|
||||
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], i + 1);
|
||||
assert!(r.is_ok(), "{:?}", r);
|
||||
}
|
||||
}
|
||||
}
|
131
common/append_merkle/src/merkle_tree.rs
Normal file
131
common/append_merkle/src/merkle_tree.rs
Normal file
@ -0,0 +1,131 @@
|
||||
use crate::sha3::Sha3Algorithm;
|
||||
use crate::Proof;
|
||||
use anyhow::{bail, Result};
|
||||
use ethereum_types::H256;
|
||||
use lazy_static::lazy_static;
|
||||
use ssz::{Decode, Encode};
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use tracing::trace;
|
||||
|
||||
pub trait HashElement:
|
||||
Clone + Debug + Eq + Hash + AsRef<[u8]> + AsMut<[u8]> + Decode + Encode + Send + Sync
|
||||
{
|
||||
fn end_pad(height: usize) -> Self;
|
||||
fn null() -> Self;
|
||||
fn is_null(&self) -> bool {
|
||||
self == &Self::null()
|
||||
}
|
||||
}
|
||||
|
||||
impl HashElement for H256 {
|
||||
fn end_pad(height: usize) -> Self {
|
||||
ZERO_HASHES[height]
|
||||
}
|
||||
|
||||
fn null() -> Self {
|
||||
H256::repeat_byte(1)
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref ZERO_HASHES: [H256; 64] = {
|
||||
let leaf_zero_hash: H256 = Sha3Algorithm::leaf(&[0u8; 256]);
|
||||
let mut list = [H256::zero(); 64];
|
||||
list[0] = leaf_zero_hash;
|
||||
for i in 1..list.len() {
|
||||
list[i] = Sha3Algorithm::parent(&list[i - 1], &list[i - 1]);
|
||||
}
|
||||
list
|
||||
};
|
||||
}
|
||||
|
||||
pub trait Algorithm<E: HashElement> {
|
||||
fn parent(left: &E, right: &E) -> E;
|
||||
fn parent_single(r: &E, height: usize) -> E {
|
||||
Self::parent(r, &E::end_pad(height))
|
||||
}
|
||||
fn leaf(data: &[u8]) -> E;
|
||||
}
|
||||
|
||||
pub trait MerkleTreeRead {
|
||||
type E: HashElement;
|
||||
fn node(&self, layer: usize, index: usize) -> &Self::E;
|
||||
fn height(&self) -> usize;
|
||||
fn layer_len(&self, layer_height: usize) -> usize;
|
||||
fn padding_node(&self, height: usize) -> Self::E;
|
||||
|
||||
fn leaves(&self) -> usize {
|
||||
self.layer_len(0)
|
||||
}
|
||||
|
||||
fn root(&self) -> &Self::E {
|
||||
self.node(self.height() - 1, 0)
|
||||
}
|
||||
|
||||
fn gen_proof(&self, leaf_index: usize) -> Result<Proof<Self::E>> {
|
||||
if leaf_index >= self.leaves() {
|
||||
bail!(
|
||||
"leaf index out of bound: leaf_index={} total_leaves={}",
|
||||
leaf_index,
|
||||
self.leaves()
|
||||
);
|
||||
}
|
||||
if self.node(0, leaf_index) == &Self::E::null() {
|
||||
bail!("Not ready to generate proof for leaf_index={}", leaf_index);
|
||||
}
|
||||
if self.height() == 1 {
|
||||
return Ok(Proof::new(
|
||||
vec![self.root().clone(), self.root().clone()],
|
||||
vec![],
|
||||
));
|
||||
}
|
||||
let mut lemma: Vec<Self::E> = Vec::with_capacity(self.height()); // path + root
|
||||
let mut path: Vec<bool> = Vec::with_capacity(self.height() - 2); // path - 1
|
||||
let mut index_in_layer = leaf_index;
|
||||
lemma.push(self.node(0, leaf_index).clone());
|
||||
for height in 0..(self.height() - 1) {
|
||||
trace!(
|
||||
"gen_proof: height={} index={} hash={:?}",
|
||||
height,
|
||||
index_in_layer,
|
||||
self.node(height, index_in_layer)
|
||||
);
|
||||
if index_in_layer % 2 == 0 {
|
||||
path.push(true);
|
||||
if index_in_layer + 1 == self.layer_len(height) {
|
||||
// TODO: This can be skipped if the tree size is available in validation.
|
||||
lemma.push(self.padding_node(height));
|
||||
} else {
|
||||
lemma.push(self.node(height, index_in_layer + 1).clone());
|
||||
}
|
||||
} else {
|
||||
path.push(false);
|
||||
lemma.push(self.node(height, index_in_layer - 1).clone());
|
||||
}
|
||||
index_in_layer >>= 1;
|
||||
}
|
||||
lemma.push(self.root().clone());
|
||||
Ok(Proof::new(lemma, path))
|
||||
}
|
||||
}
|
||||
|
||||
/// This includes the data to reconstruct an `AppendMerkleTree` root where some nodes
|
||||
/// are `null`. Other intermediate nodes will be computed based on these known nodes.
|
||||
pub struct MerkleTreeInitialData<E: HashElement> {
|
||||
/// A list of `(subtree_depth, root)`.
|
||||
/// The subtrees are continuous so we can compute the tree root with these subtree roots.
|
||||
pub subtree_list: Vec<(usize, E)>,
|
||||
/// A list of `(index, leaf_hash)`.
|
||||
/// These leaves are in some large subtrees of `subtree_list`. 1-node subtrees are also leaves,
|
||||
/// but they will not be duplicated in `known_leaves`.
|
||||
pub known_leaves: Vec<(usize, E)>,
|
||||
}
|
||||
|
||||
impl<E: HashElement> MerkleTreeInitialData<E> {
|
||||
pub fn leaves(&self) -> usize {
|
||||
self.subtree_list.iter().fold(0, |acc, (subtree_depth, _)| {
|
||||
acc + (1 << (subtree_depth - 1))
|
||||
})
|
||||
}
|
||||
}
|
167
common/append_merkle/src/proof.rs
Normal file
167
common/append_merkle/src/proof.rs
Normal file
@ -0,0 +1,167 @@
|
||||
use crate::{ensure_eq, Algorithm, HashElement};
|
||||
use anyhow::{bail, ensure, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode};
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, DeriveEncode, DeriveDecode, Deserialize, Serialize)]
|
||||
pub struct Proof<T: HashElement> {
|
||||
lemma: Vec<T>,
|
||||
path: Vec<bool>,
|
||||
}
|
||||
|
||||
impl<T: HashElement> Proof<T> {
|
||||
/// Creates new MT inclusion proof
|
||||
pub fn new(hash: Vec<T>, path: Vec<bool>) -> Proof<T> {
|
||||
assert_eq!(hash.len() - 2, path.len());
|
||||
Proof { lemma: hash, path }
|
||||
}
|
||||
|
||||
pub fn new_empty() -> Proof<T> {
|
||||
Proof {
|
||||
lemma: vec![],
|
||||
path: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Return proof target leaf
|
||||
pub fn item(&self) -> T {
|
||||
self.lemma.first().unwrap().clone()
|
||||
}
|
||||
|
||||
/// Return tree root
|
||||
pub fn root(&self) -> T {
|
||||
self.lemma.last().unwrap().clone()
|
||||
}
|
||||
|
||||
/// Verifies MT inclusion proof
|
||||
fn validate_integrity<A: Algorithm<T>>(&self) -> bool {
|
||||
let size = self.lemma.len();
|
||||
|
||||
if size < 2 {
|
||||
return false;
|
||||
}
|
||||
let mut h = self.item();
|
||||
|
||||
for i in 1..size - 1 {
|
||||
h = if self.path[i - 1] {
|
||||
A::parent(&h, &self.lemma[i])
|
||||
} else {
|
||||
A::parent(&self.lemma[i], &h)
|
||||
};
|
||||
}
|
||||
|
||||
h == self.root()
|
||||
}
|
||||
|
||||
pub fn validate<A: Algorithm<T>>(&self, item: &T, position: usize) -> Result<()> {
|
||||
if !self.validate_integrity::<A>() {
|
||||
bail!("Invalid proof");
|
||||
}
|
||||
if *item != self.item() {
|
||||
bail!("Proof item unmatch");
|
||||
}
|
||||
if position != self.position() {
|
||||
bail!("Proof position unmatch");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the path of this proof.
|
||||
pub fn path(&self) -> &[bool] {
|
||||
&self.path
|
||||
}
|
||||
|
||||
/// Returns the lemma of this proof.
|
||||
pub fn lemma(&self) -> &[T] {
|
||||
&self.lemma
|
||||
}
|
||||
|
||||
pub fn position(&self) -> usize {
|
||||
let mut pos = 0;
|
||||
for (i, is_left) in self.path.iter().enumerate() {
|
||||
if !is_left {
|
||||
pos += 1 << i;
|
||||
}
|
||||
}
|
||||
pos
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, DeriveEncode, DeriveDecode, Deserialize, Serialize)]
|
||||
pub struct RangeProof<E: HashElement> {
|
||||
pub left_proof: Proof<E>,
|
||||
pub right_proof: Proof<E>,
|
||||
}
|
||||
|
||||
impl<E: HashElement> RangeProof<E> {
|
||||
pub fn new_empty() -> Self {
|
||||
Self {
|
||||
left_proof: Proof::new_empty(),
|
||||
right_proof: Proof::new_empty(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_integrity<A: Algorithm<E>>(&self) -> bool {
|
||||
self.left_proof.validate_integrity::<A>()
|
||||
&& self.right_proof.validate_integrity::<A>()
|
||||
&& self.left_proof.root() == self.right_proof.root()
|
||||
&& self.left_proof.path().len() == self.right_proof.path().len()
|
||||
}
|
||||
|
||||
pub fn root(&self) -> E {
|
||||
self.left_proof.root()
|
||||
}
|
||||
|
||||
pub fn validate<A: Algorithm<E>>(
|
||||
&self,
|
||||
range_leaves: &[E],
|
||||
start_position: usize,
|
||||
) -> Result<()> {
|
||||
if !self.validate_integrity::<A>() {
|
||||
bail!("Invalid range proof");
|
||||
}
|
||||
if range_leaves.is_empty() {
|
||||
bail!("Empty range");
|
||||
}
|
||||
let end_position = start_position + range_leaves.len() - 1;
|
||||
ensure_eq!(self.left_proof.item(), range_leaves[0]);
|
||||
ensure_eq!(
|
||||
self.right_proof.item(),
|
||||
*range_leaves.last().expect("not empty")
|
||||
);
|
||||
ensure_eq!(self.left_proof.position(), start_position);
|
||||
ensure_eq!(self.right_proof.position(), end_position);
|
||||
let tree_depth = self.left_proof.path().len() + 1;
|
||||
// TODO: We can avoid copying the first layer.
|
||||
let mut children_layer = range_leaves.to_vec();
|
||||
for height in 0..(tree_depth - 1) {
|
||||
let mut parent_layer = Vec::new();
|
||||
let start_index = if !self.left_proof.path()[height] {
|
||||
// If the left-most node is the right child, its sibling is not within the data range and should be retrieved from the proof.
|
||||
let parent = A::parent(&self.left_proof.lemma()[height + 1], &children_layer[0]);
|
||||
parent_layer.push(parent);
|
||||
1
|
||||
} else {
|
||||
// The left-most node is the left child, its sibling is just the next child.
|
||||
0
|
||||
};
|
||||
let mut iter = children_layer[start_index..].chunks_exact(2);
|
||||
while let Some([left, right]) = iter.next() {
|
||||
parent_layer.push(A::parent(left, right))
|
||||
}
|
||||
if let [right_most] = iter.remainder() {
|
||||
if self.right_proof.path()[height] {
|
||||
parent_layer.push(A::parent(right_most, &self.right_proof.lemma()[height + 1]));
|
||||
} else {
|
||||
bail!("Unexpected error");
|
||||
}
|
||||
}
|
||||
children_layer = parent_layer;
|
||||
}
|
||||
assert_eq!(children_layer.len(), 1);
|
||||
let computed_root = children_layer.pop().unwrap();
|
||||
ensure_eq!(computed_root, self.root());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
23
common/append_merkle/src/sha3.rs
Normal file
23
common/append_merkle/src/sha3.rs
Normal file
@ -0,0 +1,23 @@
|
||||
use crate::{Algorithm, HashElement};
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
pub struct Sha3Algorithm {}
|
||||
|
||||
impl<E: HashElement> Algorithm<E> for Sha3Algorithm {
|
||||
fn parent(left: &E, right: &E) -> E {
|
||||
let mut h = Keccak::v256();
|
||||
let mut e = E::null();
|
||||
h.update(left.as_ref());
|
||||
h.update(right.as_ref());
|
||||
h.finalize(e.as_mut());
|
||||
e
|
||||
}
|
||||
|
||||
fn leaf(data: &[u8]) -> E {
|
||||
let mut h = Keccak::v256();
|
||||
let mut e = E::null();
|
||||
h.update(data.as_ref());
|
||||
h.finalize(e.as_mut());
|
||||
e
|
||||
}
|
||||
}
|
7
common/channel/Cargo.toml
Normal file
7
common/channel/Cargo.toml
Normal file
@ -0,0 +1,7 @@
|
||||
[package]
|
||||
name = "channel"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.19.2", features = ["sync", "time"] }
|
112
common/channel/src/channel.rs
Normal file
112
common/channel/src/channel.rs
Normal file
@ -0,0 +1,112 @@
|
||||
use crate::error::Error;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc::error::TryRecvError;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tokio::time::timeout;
|
||||
|
||||
const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(3);
|
||||
|
||||
pub type ResponseSender<Res> = oneshot::Sender<Res>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Message<N, Req, Res> {
|
||||
Notification(N),
|
||||
Request(Req, ResponseSender<Res>),
|
||||
}
|
||||
|
||||
pub struct Channel<N, Req, Res> {
|
||||
_phantom: std::marker::PhantomData<(N, Req, Res)>,
|
||||
}
|
||||
|
||||
impl<N, Req, Res> Channel<N, Req, Res> {
|
||||
pub fn unbounded() -> (Sender<N, Req, Res>, Receiver<N, Req, Res>) {
|
||||
let (sender, receiver) = mpsc::unbounded_channel();
|
||||
(Sender { chan: sender }, Receiver { chan: receiver })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Sender<N, Req, Res> {
|
||||
chan: mpsc::UnboundedSender<Message<N, Req, Res>>,
|
||||
}
|
||||
|
||||
impl<N, Req, Res> Clone for Sender<N, Req, Res> {
|
||||
fn clone(&self) -> Self {
|
||||
Sender {
|
||||
chan: self.chan.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, Req, Res> Sender<N, Req, Res> {
|
||||
pub fn notify(&self, msg: N) -> Result<(), Error<N, Req, Res>> {
|
||||
self.chan
|
||||
.send(Message::Notification(msg))
|
||||
.map_err(|e| Error::SendError(e))
|
||||
}
|
||||
|
||||
pub async fn request(&self, request: Req) -> Result<Res, Error<N, Req, Res>> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
self.chan
|
||||
.send(Message::Request(request, sender))
|
||||
.map_err(|e| Error::SendError(e))?;
|
||||
|
||||
timeout(DEFAULT_REQUEST_TIMEOUT, receiver)
|
||||
.await
|
||||
.map_err(|_| Error::TimeoutError)?
|
||||
.map_err(|e| Error::RecvError(e))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Receiver<N, Req, Res> {
|
||||
chan: mpsc::UnboundedReceiver<Message<N, Req, Res>>,
|
||||
}
|
||||
|
||||
impl<N, Req, Res> Receiver<N, Req, Res> {
|
||||
pub async fn recv(&mut self) -> Option<Message<N, Req, Res>> {
|
||||
self.chan.recv().await
|
||||
}
|
||||
|
||||
pub fn try_recv(&mut self) -> Result<Message<N, Req, Res>, TryRecvError> {
|
||||
self.chan.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Notification {}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Request {
|
||||
GetNumber,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
enum Response {
|
||||
GetNumber(u32),
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn request_response() {
|
||||
let (tx, mut rx) = Channel::<Notification, Request, Response>::unbounded();
|
||||
|
||||
let task1 = async move {
|
||||
match rx.recv().await.expect("not dropped") {
|
||||
Message::Notification(_) => {}
|
||||
Message::Request(Request::GetNumber, sender) => {
|
||||
sender.send(Response::GetNumber(42)).expect("not dropped");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let task2 = async move {
|
||||
let result = tx.request(Request::GetNumber).await.expect("not dropped");
|
||||
assert_eq!(result, Response::GetNumber(42));
|
||||
};
|
||||
|
||||
tokio::join!(task1, task2);
|
||||
}
|
||||
}
|
18
common/channel/src/error.rs
Normal file
18
common/channel/src/error.rs
Normal file
@ -0,0 +1,18 @@
|
||||
use crate::Message;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error<N, Req, Res> {
|
||||
SendError(mpsc::error::SendError<Message<N, Req, Res>>),
|
||||
RecvError(oneshot::error::RecvError),
|
||||
TimeoutError,
|
||||
}
|
||||
|
||||
impl<N: Debug, Req: Debug, Res: Debug> Display for Error<N, Req, Res> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "ChannelError: {:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: Debug, Req: Debug, Res: Debug> std::error::Error for Error<N, Req, Res> {}
|
5
common/channel/src/lib.rs
Normal file
5
common/channel/src/lib.rs
Normal file
@ -0,0 +1,5 @@
|
||||
mod channel;
|
||||
pub mod error;
|
||||
pub mod test_util;
|
||||
|
||||
pub use crate::channel::{Channel, Message, Receiver, ResponseSender, Sender};
|
55
common/channel/src/test_util.rs
Normal file
55
common/channel/src/test_util.rs
Normal file
@ -0,0 +1,55 @@
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
ops::{Deref, DerefMut},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::{Message, Receiver};
|
||||
|
||||
pub struct TestReceiver<N, Req, Res> {
|
||||
recv: Receiver<N, Req, Res>,
|
||||
}
|
||||
|
||||
impl<N, Req, Res> From<Receiver<N, Req, Res>> for TestReceiver<N, Req, Res> {
|
||||
fn from(recv: Receiver<N, Req, Res>) -> Self {
|
||||
Self { recv }
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, Req, Res> Deref for TestReceiver<N, Req, Res> {
|
||||
type Target = Receiver<N, Req, Res>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.recv
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, Req, Res> DerefMut for TestReceiver<N, Req, Res> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.recv
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, Req, Res: Debug> TestReceiver<N, Req, Res> {
|
||||
pub async fn expect_response(&mut self, response: Res) {
|
||||
let request = timeout(Duration::from_secs(3), self.recv())
|
||||
.await
|
||||
.expect("Timeout to receive request")
|
||||
.expect("Channel closed");
|
||||
|
||||
match request {
|
||||
Message::Notification(..) => panic!("Unexpected message type"),
|
||||
Message::Request(_, resp_sender) => {
|
||||
resp_sender.send(response).expect("Channel closed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn expect_responses(&mut self, responses: Vec<Res>) {
|
||||
for resp in responses {
|
||||
self.expect_response(resp).await;
|
||||
}
|
||||
}
|
||||
}
|
14
common/contract-interface/Cargo.toml
Normal file
14
common/contract-interface/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "contract-interface"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
build = "build.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
ethers = "^2"
|
||||
serde_json = "1.0.82"
|
||||
|
||||
[features]
|
||||
compile-contracts = []
|
30
common/contract-interface/build.rs
Normal file
30
common/contract-interface/build.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use std::process::Command;
|
||||
|
||||
const INSTALL_ERROR_MESSAGE: &str =
|
||||
"Install dependencies for contract fail, try to run `yarn` in folder 'zerog-storage-contracts'";
|
||||
const COMPILE_ERROR_MESSAGE: &str =
|
||||
"Compile solidity contracts fail, try to run `yarn compile` in folder 'zerog-storage-contracts'";
|
||||
|
||||
fn main() {
|
||||
if cfg!(feature = "compile-contracts") {
|
||||
println!("cargo:rerun-if-changed=../../zerog-storage-contracts/contracts/");
|
||||
println!("cargo:rerun-if-changed=../../zerog-storage-contracts/hardhat.config.ts");
|
||||
|
||||
let output = Command::new("yarn")
|
||||
.arg("--cwd")
|
||||
.arg("../../zerog-storage-contracts")
|
||||
.status()
|
||||
.expect(INSTALL_ERROR_MESSAGE);
|
||||
assert!(output.success(), "{}", INSTALL_ERROR_MESSAGE);
|
||||
|
||||
let output = Command::new("yarn")
|
||||
.arg("--cwd")
|
||||
.arg("../../zerog-storage-contracts")
|
||||
.arg("compile")
|
||||
.status()
|
||||
.expect(COMPILE_ERROR_MESSAGE);
|
||||
assert!(output.success(), "{}", COMPILE_ERROR_MESSAGE);
|
||||
} else {
|
||||
println!("cargo:rerun-if-changed=../../zerog-storage-contracts/artifacts/");
|
||||
}
|
||||
}
|
13
common/contract-interface/src/lib.rs
Normal file
13
common/contract-interface/src/lib.rs
Normal file
@ -0,0 +1,13 @@
|
||||
use ethers::prelude::abigen;
|
||||
|
||||
// run `cargo doc -p contract-interface --open` to read struct definition
|
||||
|
||||
abigen!(
|
||||
ZgsFlow,
|
||||
"../../zerog-storage-contracts/artifacts/contracts/dataFlow/Flow.sol/Flow.json"
|
||||
);
|
||||
|
||||
abigen!(
|
||||
PoraMine,
|
||||
"../../zerog-storage-contracts/artifacts/contracts/test/PoraMineTest.sol/PoraMineTest.json"
|
||||
);
|
9
common/directory/Cargo.toml
Normal file
9
common/directory/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "directory"
|
||||
version = "0.1.0"
|
||||
authors = ["pawan <pawandhananjay@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
12
common/directory/src/lib.rs
Normal file
12
common/directory/src/lib.rs
Normal file
@ -0,0 +1,12 @@
|
||||
/// Names for the default directories.
|
||||
pub const DEFAULT_ROOT_DIR: &str = ".lighthouse";
|
||||
pub const DEFAULT_BEACON_NODE_DIR: &str = "beacon";
|
||||
pub const DEFAULT_NETWORK_DIR: &str = "network";
|
||||
pub const DEFAULT_VALIDATOR_DIR: &str = "validators";
|
||||
pub const DEFAULT_SECRET_DIR: &str = "secrets";
|
||||
pub const DEFAULT_WALLET_DIR: &str = "wallets";
|
||||
|
||||
pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet";
|
||||
|
||||
/// Base directory name for unnamed testnets passed through the --testnet-dir flag
|
||||
pub const CUSTOM_TESTNET_DIR: &str = "custom";
|
12
common/hashset_delay/Cargo.toml
Normal file
12
common/hashset_delay/Cargo.toml
Normal file
@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "hashset_delay"
|
||||
version = "0.2.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
futures = "0.3.21"
|
||||
tokio-util = { version = "0.6.10", features = ["time"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.19.2", features = ["time", "rt-multi-thread", "macros"] }
|
197
common/hashset_delay/src/hashset_delay.rs
Normal file
197
common/hashset_delay/src/hashset_delay.rs
Normal file
@ -0,0 +1,197 @@
|
||||
//NOTE: This is just a specific case of a HashMapDelay.
|
||||
// The code has been copied to make unique `insert` and `insert_at` functions.
|
||||
|
||||
/// The default delay for entries, in seconds. This is only used when `insert()` is used to add
|
||||
/// entries.
|
||||
const DEFAULT_DELAY: u64 = 30;
|
||||
|
||||
use futures::prelude::*;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio_util::time::delay_queue::{self, DelayQueue};
|
||||
|
||||
pub struct HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
/// The given entries.
|
||||
entries: HashMap<K, MapEntry>,
|
||||
/// A queue holding the timeouts of each entry.
|
||||
expirations: DelayQueue<K>,
|
||||
/// The default expiration timeout of an entry.
|
||||
default_entry_timeout: Duration,
|
||||
}
|
||||
|
||||
/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key.
|
||||
struct MapEntry {
|
||||
/// The expiration key for the entry.
|
||||
key: delay_queue::Key,
|
||||
/// The actual entry.
|
||||
value: Instant,
|
||||
}
|
||||
|
||||
impl<K> Default for HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
fn default() -> Self {
|
||||
HashSetDelay::new(Duration::from_secs(DEFAULT_DELAY))
|
||||
}
|
||||
}
|
||||
|
||||
impl<K> HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
/// Creates a new instance of `HashSetDelay`.
|
||||
pub fn new(default_entry_timeout: Duration) -> Self {
|
||||
HashSetDelay {
|
||||
entries: HashMap::new(),
|
||||
expirations: DelayQueue::new(),
|
||||
default_entry_timeout,
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`.
|
||||
pub fn insert(&mut self, key: K) {
|
||||
self.insert_at(key, self.default_entry_timeout);
|
||||
}
|
||||
|
||||
/// Inserts an entry that will expire at a given instant. If the entry already exists, the
|
||||
/// timeout is updated.
|
||||
pub fn insert_at(&mut self, key: K, entry_duration: Duration) {
|
||||
if self.contains(&key) {
|
||||
// update the timeout
|
||||
self.update_timeout(&key, entry_duration);
|
||||
} else {
|
||||
let delay_key = self.expirations.insert(key.clone(), entry_duration);
|
||||
let entry = MapEntry {
|
||||
key: delay_key,
|
||||
value: Instant::now() + entry_duration,
|
||||
};
|
||||
self.entries.insert(key, entry);
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets a reference to an entry if it exists.
|
||||
///
|
||||
/// Returns None if the entry does not exist.
|
||||
pub fn get(&self, key: &K) -> Option<&Instant> {
|
||||
self.entries.get(key).map(|entry| &entry.value)
|
||||
}
|
||||
|
||||
/// Returns true if the key exists, false otherwise.
|
||||
pub fn contains(&self, key: &K) -> bool {
|
||||
self.entries.contains_key(key)
|
||||
}
|
||||
|
||||
/// Returns the length of the mapping.
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Checks if the mapping is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
/// Updates the timeout for a given key. Returns true if the key existed, false otherwise.
|
||||
///
|
||||
/// Panics if the duration is too far in the future.
|
||||
pub fn update_timeout(&mut self, key: &K, timeout: Duration) -> bool {
|
||||
if let Some(entry) = self.entries.get(key) {
|
||||
self.expirations.reset(&entry.key, timeout);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes a key from the map returning the value associated with the key that was in the map.
|
||||
///
|
||||
/// Return false if the key was not in the map.
|
||||
pub fn remove(&mut self, key: &K) -> bool {
|
||||
if let Some(entry) = self.entries.remove(key) {
|
||||
self.expirations.remove(&entry.key);
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Retains only the elements specified by the predicate.
|
||||
///
|
||||
/// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false.
|
||||
pub fn retain<F: FnMut(&K) -> bool>(&mut self, mut f: F) {
|
||||
let expiration = &mut self.expirations;
|
||||
self.entries.retain(|key, entry| {
|
||||
let result = f(key);
|
||||
if !result {
|
||||
expiration.remove(&entry.key);
|
||||
}
|
||||
result
|
||||
})
|
||||
}
|
||||
|
||||
/// Removes all entries from the map.
|
||||
pub fn clear(&mut self) {
|
||||
self.entries.clear();
|
||||
self.expirations.clear();
|
||||
}
|
||||
|
||||
/// Returns a vector of referencing all keys in the map.
|
||||
pub fn keys(&self) -> impl Iterator<Item = &K> {
|
||||
self.entries.keys()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K> Stream for HashSetDelay<K>
|
||||
where
|
||||
K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin,
|
||||
{
|
||||
type Item = Result<K, String>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
match self.expirations.poll_expired(cx) {
|
||||
Poll::Ready(Some(Ok(key))) => match self.entries.remove(key.get_ref()) {
|
||||
Some(_) => Poll::Ready(Some(Ok(key.into_inner()))),
|
||||
None => Poll::Ready(Some(Err("Value no longer exists in expirations".into()))),
|
||||
},
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
Poll::Ready(Some(Err(format!("delay queue error: {:?}", e))))
|
||||
}
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn should_not_panic() {
|
||||
let key = 2u8;
|
||||
|
||||
let mut map = HashSetDelay::default();
|
||||
|
||||
map.insert(key);
|
||||
map.update_timeout(&key, Duration::from_secs(100));
|
||||
|
||||
let fut = |cx: &mut Context| {
|
||||
let _ = map.poll_next_unpin(cx);
|
||||
let _ = map.poll_next_unpin(cx);
|
||||
Poll::Ready(())
|
||||
};
|
||||
|
||||
future::poll_fn(fut).await;
|
||||
|
||||
map.insert(key);
|
||||
map.update_timeout(&key, Duration::from_secs(100));
|
||||
}
|
||||
}
|
12
common/hashset_delay/src/lib.rs
Normal file
12
common/hashset_delay/src/lib.rs
Normal file
@ -0,0 +1,12 @@
|
||||
//! This crate provides a single type (its counter-part HashMapDelay has been removed as it
|
||||
//! currently is not in use in lighthouse):
|
||||
//! - `HashSetDelay`
|
||||
//!
|
||||
//! # HashSetDelay
|
||||
//!
|
||||
//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This
|
||||
//! allows users to add objects and check their expiry deadlines before the `Stream`
|
||||
//! consumes them.
|
||||
|
||||
mod hashset_delay;
|
||||
pub use crate::hashset_delay::HashSetDelay;
|
11
common/lighthouse_metrics/Cargo.toml
Normal file
11
common/lighthouse_metrics/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "lighthouse_metrics"
|
||||
version = "0.2.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.4.0"
|
||||
prometheus = "0.13.1"
|
359
common/lighthouse_metrics/src/lib.rs
Normal file
359
common/lighthouse_metrics/src/lib.rs
Normal file
@ -0,0 +1,359 @@
|
||||
#![allow(clippy::needless_doctest_main)]
|
||||
//! A wrapper around the `prometheus` crate that provides a global, `lazy_static` metrics registry
|
||||
//! and functions to add and use the following components (more info at
|
||||
//! [Prometheus docs](https://prometheus.io/docs/concepts/metric_types/)):
|
||||
//!
|
||||
//! - `Histogram`: used with `start_timer(..)` and `stop_timer(..)` to record durations (e.g.,
|
||||
//! block processing time).
|
||||
//! - `IncCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g.,
|
||||
//! number of block processing requests).
|
||||
//! - `IntGauge`: used to represent an varying integer (e.g., number of attestations per block).
|
||||
//!
|
||||
//! ## Important
|
||||
//!
|
||||
//! Metrics will fail if two items have the same `name`. All metrics must have a unique `name`.
|
||||
//! Because we use a global registry there is no namespace per crate, it's one big global space.
|
||||
//!
|
||||
//! See the [Prometheus naming best practices](https://prometheus.io/docs/practices/naming/) when
|
||||
//! choosing metric names.
|
||||
//!
|
||||
//! ## Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! #[macro_use]
|
||||
//! extern crate lazy_static;
|
||||
//! use lighthouse_metrics::*;
|
||||
//!
|
||||
//! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`.
|
||||
//! lazy_static! {
|
||||
//! pub static ref RUN_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
//! "runs_total",
|
||||
//! "Total number of runs"
|
||||
//! );
|
||||
//! pub static ref CURRENT_VALUE: Result<IntGauge> = try_create_int_gauge(
|
||||
//! "current_value",
|
||||
//! "The current value"
|
||||
//! );
|
||||
//! pub static ref RUN_TIME: Result<Histogram> =
|
||||
//! try_create_histogram("run_seconds", "Time taken (measured to high precision)");
|
||||
//! }
|
||||
//!
|
||||
//!
|
||||
//! fn main() {
|
||||
//! for i in 0..100 {
|
||||
//! inc_counter(&RUN_COUNT);
|
||||
//! let timer = start_timer(&RUN_TIME);
|
||||
//!
|
||||
//! for j in 0..10 {
|
||||
//! set_gauge(&CURRENT_VALUE, j);
|
||||
//! println!("Howdy partner");
|
||||
//! }
|
||||
//!
|
||||
//! stop_timer(timer);
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use prometheus::{HistogramOpts, Opts};
|
||||
use std::time::Duration;
|
||||
|
||||
use prometheus::core::{Atomic, GenericGauge, GenericGaugeVec};
|
||||
pub use prometheus::{
|
||||
proto::{Metric, MetricFamily, MetricType},
|
||||
Encoder, Gauge, GaugeVec, Histogram, HistogramTimer, HistogramVec, IntCounter, IntCounterVec,
|
||||
IntGauge, IntGaugeVec, Result, TextEncoder,
|
||||
};
|
||||
|
||||
/// Collect all the metrics for reporting.
|
||||
pub fn gather() -> Vec<prometheus::proto::MetricFamily> {
|
||||
prometheus::gather()
|
||||
}
|
||||
|
||||
/// Attempts to create an `IntCounter`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_int_counter(name: &str, help: &str) -> Result<IntCounter> {
|
||||
let opts = Opts::new(name, help);
|
||||
let counter = IntCounter::with_opts(opts)?;
|
||||
prometheus::register(Box::new(counter.clone()))?;
|
||||
Ok(counter)
|
||||
}
|
||||
|
||||
/// Attempts to create an `IntGauge`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_int_gauge(name: &str, help: &str) -> Result<IntGauge> {
|
||||
let opts = Opts::new(name, help);
|
||||
let gauge = IntGauge::with_opts(opts)?;
|
||||
prometheus::register(Box::new(gauge.clone()))?;
|
||||
Ok(gauge)
|
||||
}
|
||||
|
||||
/// Attempts to create a `Gauge`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_float_gauge(name: &str, help: &str) -> Result<Gauge> {
|
||||
let opts = Opts::new(name, help);
|
||||
let gauge = Gauge::with_opts(opts)?;
|
||||
prometheus::register(Box::new(gauge.clone()))?;
|
||||
Ok(gauge)
|
||||
}
|
||||
|
||||
/// Attempts to create a `Histogram`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_histogram(name: &str, help: &str) -> Result<Histogram> {
|
||||
let opts = HistogramOpts::new(name, help);
|
||||
let histogram = Histogram::with_opts(opts)?;
|
||||
prometheus::register(Box::new(histogram.clone()))?;
|
||||
Ok(histogram)
|
||||
}
|
||||
|
||||
/// Attempts to create a `HistogramVec`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_histogram_vec(
|
||||
name: &str,
|
||||
help: &str,
|
||||
label_names: &[&str],
|
||||
) -> Result<HistogramVec> {
|
||||
let opts = HistogramOpts::new(name, help);
|
||||
let histogram_vec = HistogramVec::new(opts, label_names)?;
|
||||
prometheus::register(Box::new(histogram_vec.clone()))?;
|
||||
Ok(histogram_vec)
|
||||
}
|
||||
|
||||
/// Attempts to create a `IntGaugeVec`, returning `Err` if the registry does not accept the gauge
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_int_gauge_vec(
|
||||
name: &str,
|
||||
help: &str,
|
||||
label_names: &[&str],
|
||||
) -> Result<IntGaugeVec> {
|
||||
let opts = Opts::new(name, help);
|
||||
let counter_vec = IntGaugeVec::new(opts, label_names)?;
|
||||
prometheus::register(Box::new(counter_vec.clone()))?;
|
||||
Ok(counter_vec)
|
||||
}
|
||||
|
||||
/// Attempts to create a `GaugeVec`, returning `Err` if the registry does not accept the gauge
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_float_gauge_vec(
|
||||
name: &str,
|
||||
help: &str,
|
||||
label_names: &[&str],
|
||||
) -> Result<GaugeVec> {
|
||||
let opts = Opts::new(name, help);
|
||||
let counter_vec = GaugeVec::new(opts, label_names)?;
|
||||
prometheus::register(Box::new(counter_vec.clone()))?;
|
||||
Ok(counter_vec)
|
||||
}
|
||||
|
||||
/// Attempts to create a `IntCounterVec`, returning `Err` if the registry does not accept the gauge
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_int_counter_vec(
|
||||
name: &str,
|
||||
help: &str,
|
||||
label_names: &[&str],
|
||||
) -> Result<IntCounterVec> {
|
||||
let opts = Opts::new(name, help);
|
||||
let counter_vec = IntCounterVec::new(opts, label_names)?;
|
||||
prometheus::register(Box::new(counter_vec.clone()))?;
|
||||
Ok(counter_vec)
|
||||
}
|
||||
|
||||
/// If `int_gauge_vec.is_ok()`, returns a gauge with the given `name`.
|
||||
pub fn get_int_gauge(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str]) -> Option<IntGauge> {
|
||||
if let Ok(int_gauge_vec) = int_gauge_vec {
|
||||
Some(int_gauge_vec.get_metric_with_label_values(name).ok()?)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_gauge<P: Atomic>(
|
||||
gauge_vec: &Result<GenericGaugeVec<P>>,
|
||||
name: &[&str],
|
||||
) -> Option<GenericGauge<P>> {
|
||||
if let Ok(gauge_vec) = gauge_vec {
|
||||
Some(gauge_vec.get_metric_with_label_values(name).ok()?)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_gauge_entry<P: Atomic>(
|
||||
gauge_vec: &Result<GenericGaugeVec<P>>,
|
||||
name: &[&str],
|
||||
value: P::T,
|
||||
) {
|
||||
if let Some(v) = get_gauge(gauge_vec, name) {
|
||||
v.set(value)
|
||||
};
|
||||
}
|
||||
|
||||
/// If `int_gauge_vec.is_ok()`, sets the gauge with the given `name` to the given `value`
|
||||
/// otherwise returns false.
|
||||
pub fn set_int_gauge(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str], value: i64) -> bool {
|
||||
if let Ok(int_gauge_vec) = int_gauge_vec {
|
||||
int_gauge_vec
|
||||
.get_metric_with_label_values(name)
|
||||
.map(|v| {
|
||||
v.set(value);
|
||||
true
|
||||
})
|
||||
.unwrap_or_else(|_| false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// If `int_counter_vec.is_ok()`, returns a counter with the given `name`.
|
||||
pub fn get_int_counter(
|
||||
int_counter_vec: &Result<IntCounterVec>,
|
||||
name: &[&str],
|
||||
) -> Option<IntCounter> {
|
||||
if let Ok(int_counter_vec) = int_counter_vec {
|
||||
Some(int_counter_vec.get_metric_with_label_values(name).ok()?)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Increments the `int_counter_vec` with the given `name`.
|
||||
pub fn inc_counter_vec(int_counter_vec: &Result<IntCounterVec>, name: &[&str]) {
|
||||
if let Some(counter) = get_int_counter(int_counter_vec, name) {
|
||||
counter.inc()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inc_counter_vec_by(int_counter_vec: &Result<IntCounterVec>, name: &[&str], amount: u64) {
|
||||
if let Some(counter) = get_int_counter(int_counter_vec, name) {
|
||||
counter.inc_by(amount);
|
||||
}
|
||||
}
|
||||
|
||||
/// If `histogram_vec.is_ok()`, returns a histogram with the given `name`.
|
||||
pub fn get_histogram(histogram_vec: &Result<HistogramVec>, name: &[&str]) -> Option<Histogram> {
|
||||
if let Ok(histogram_vec) = histogram_vec {
|
||||
Some(histogram_vec.get_metric_with_label_values(name).ok()?)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts a timer on `vec` with the given `name`.
|
||||
pub fn start_timer_vec(vec: &Result<HistogramVec>, name: &[&str]) -> Option<HistogramTimer> {
|
||||
get_histogram(vec, name).map(|h| h.start_timer())
|
||||
}
|
||||
|
||||
/// Starts a timer for the given `Histogram`, stopping when it gets dropped or given to `stop_timer(..)`.
|
||||
pub fn start_timer(histogram: &Result<Histogram>) -> Option<HistogramTimer> {
|
||||
if let Ok(histogram) = histogram {
|
||||
Some(histogram.start_timer())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts a timer on `vec` with the given `name`.
|
||||
pub fn observe_timer_vec(vec: &Result<HistogramVec>, name: &[&str], duration: Duration) {
|
||||
if let Some(h) = get_histogram(vec, name) {
|
||||
h.observe(duration_to_f64(duration))
|
||||
}
|
||||
}
|
||||
|
||||
/// Stops a timer created with `start_timer(..)`.
|
||||
pub fn stop_timer(timer: Option<HistogramTimer>) {
|
||||
if let Some(t) = timer {
|
||||
t.observe_duration()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inc_counter(counter: &Result<IntCounter>) {
|
||||
if let Ok(counter) = counter {
|
||||
counter.inc();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inc_counter_by(counter: &Result<IntCounter>, value: u64) {
|
||||
if let Ok(counter) = counter {
|
||||
counter.inc_by(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_gauge_vec(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str], value: i64) {
|
||||
if let Some(gauge) = get_int_gauge(int_gauge_vec, name) {
|
||||
gauge.set(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inc_gauge_vec(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str]) {
|
||||
if let Some(gauge) = get_int_gauge(int_gauge_vec, name) {
|
||||
gauge.inc();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dec_gauge_vec(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str]) {
|
||||
if let Some(gauge) = get_int_gauge(int_gauge_vec, name) {
|
||||
gauge.dec();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_gauge(gauge: &Result<IntGauge>, value: i64) {
|
||||
if let Ok(gauge) = gauge {
|
||||
gauge.set(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_float_gauge(gauge: &Result<Gauge>, value: f64) {
|
||||
if let Ok(gauge) = gauge {
|
||||
gauge.set(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_float_gauge_vec(gauge_vec: &Result<GaugeVec>, name: &[&str], value: f64) {
|
||||
if let Some(gauge) = get_gauge(gauge_vec, name) {
|
||||
gauge.set(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inc_gauge(gauge: &Result<IntGauge>) {
|
||||
if let Ok(gauge) = gauge {
|
||||
gauge.inc();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dec_gauge(gauge: &Result<IntGauge>) {
|
||||
if let Ok(gauge) = gauge {
|
||||
gauge.dec();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_set_gauge(gauge: &Result<IntGauge>, value_opt: Option<i64>) {
|
||||
if let Some(value) = value_opt {
|
||||
set_gauge(gauge, value)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybe_set_float_gauge(gauge: &Result<Gauge>, value_opt: Option<f64>) {
|
||||
if let Some(value) = value_opt {
|
||||
set_float_gauge(gauge, value)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the value of a `Histogram` manually.
|
||||
pub fn observe(histogram: &Result<Histogram>, value: f64) {
|
||||
if let Ok(histogram) = histogram {
|
||||
histogram.observe(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe_duration(histogram: &Result<Histogram>, duration: Duration) {
|
||||
if let Ok(histogram) = histogram {
|
||||
histogram.observe(duration_to_f64(duration))
|
||||
}
|
||||
}
|
||||
|
||||
fn duration_to_f64(duration: Duration) -> f64 {
|
||||
// This conversion was taken from here:
|
||||
//
|
||||
// https://docs.rs/prometheus/0.5.0/src/prometheus/histogram.rs.html#550-555
|
||||
let nanos = f64::from(duration.subsec_nanos()) / 1e9;
|
||||
duration.as_secs() as f64 + nanos
|
||||
}
|
36
common/merkle_light/Cargo.toml
Normal file
36
common/merkle_light/Cargo.toml
Normal file
@ -0,0 +1,36 @@
|
||||
[package]
|
||||
name = "merkle_light"
|
||||
version = "0.4.0"
|
||||
authors = [
|
||||
"Ivan Prisyazhnyy <john.koepi@gmail.com>"
|
||||
]
|
||||
edition = "2021"
|
||||
|
||||
description = "Light merkle tree implementation with SPV support and dependency agnostic."
|
||||
license = "BSD-3-Clause"
|
||||
homepage = "https://github.com/sitano/merkle_light"
|
||||
repository = "https://github.com/sitano/merkle_light"
|
||||
documentation = "https://sitano.github.io/merkle_light/merkle_light/index.html"
|
||||
readme = "README.md"
|
||||
keywords = ["merkle", "merkle-tree", "no_std"]
|
||||
categories = ["data-structures", "cryptography"]
|
||||
|
||||
[dependencies]
|
||||
|
||||
rayon = "1.5.3"
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = []
|
||||
|
||||
[package.metadata.release]
|
||||
sign-commit = true
|
||||
upload-doc = true
|
||||
doc-branch = "gh-pages"
|
||||
pre-release-commit-message = "Release version {{version}}."
|
||||
pro-release-commit-message = "Start next development iteration {{version}}."
|
||||
tag-message = "Release version {{version}}."
|
||||
doc-commit-message = "Update documentation."
|
||||
dev-version-ext = "pre"
|
113
common/merkle_light/README.md
Normal file
113
common/merkle_light/README.md
Normal file
@ -0,0 +1,113 @@
|
||||
# merkle
|
||||
|
||||
[![Build Status](https://travis-ci.org/sitano/merkle_light.svg?branch=master&style=flat)](https://travis-ci.org/sitano/merkle_light)
|
||||
[![Issues](http://img.shields.io/github/issues/sitano/merkle.svg?style=flat)](https://github.com/sitano/merkle_light/issues)
|
||||
![License](https://img.shields.io/badge/license-bsd3-brightgreen.svg?style=flat)
|
||||
[![Crates.io](https://img.shields.io/crates/v/merkle_light.svg)](https://crates.io/crates/merkle_light)
|
||||
|
||||
*merkle* is a lightweight Rust implementation of a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree).
|
||||
|
||||
## Features
|
||||
|
||||
- external dependency agnostic
|
||||
- `core::hash::Hasher` compatibility
|
||||
- standard types hasher implementations
|
||||
- `#[derive(Hashable)]` support for simple struct
|
||||
- customizable merkle leaf/node hashing algorithm
|
||||
- support for custom hash types (e.g. [u8; 16], [u64; 4], [u128; 2], struct)
|
||||
- customizable hashing algorithm
|
||||
- linear memory layout, no nodes on heap
|
||||
- buildable from iterator, objects or hashes
|
||||
- certificate transparency style merkle hashing support
|
||||
- SPV included
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation is [available](https://sitano.github.io/merkle_light/merkle_light/index.html).
|
||||
|
||||
# Examples
|
||||
|
||||
* `test_sip.rs`: algorithm implementation example for std sip hasher, u64 hash items
|
||||
* `test_xor128.rs`: custom hash example xor128
|
||||
* `test_cmh.rs`: custom merkle hasher implementation example
|
||||
* `crypto_bitcoin_mt.rs`: bitcoin merkle tree using crypto lib
|
||||
* `crypto_chaincore_mt.rs`: chain core merkle tree using crypto lib
|
||||
* `ring_bitcoin_mt.rs`: bitcoin merkle tree using ring lib
|
||||
|
||||
# Quick start
|
||||
|
||||
```
|
||||
extern crate crypto;
|
||||
extern crate merkle_light;
|
||||
|
||||
use std::fmt;
|
||||
use std::hash::Hasher;
|
||||
use std::iter::FromIterator;
|
||||
use crypto::sha3::{Sha3, Sha3Mode};
|
||||
use crypto::digest::Digest;
|
||||
use merkle_light::hash::{Algorithm, Hashable};
|
||||
use merkle_light::merkle::MerkleTree;
|
||||
|
||||
pub struct ExampleAlgorithm(Sha3);
|
||||
|
||||
impl ExampleAlgorithm {
|
||||
pub fn new() -> ExampleAlgorithm {
|
||||
ExampleAlgorithm(Sha3::new(Sha3Mode::Sha3_256))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ExampleAlgorithm {
|
||||
fn default() -> ExampleAlgorithm {
|
||||
ExampleAlgorithm::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for ExampleAlgorithm {
|
||||
#[inline]
|
||||
fn write(&mut self, msg: &[u8]) {
|
||||
self.0.input(msg)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finish(&self) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Algorithm<[u8; 32]> for ExampleAlgorithm {
|
||||
#[inline]
|
||||
fn hash(&mut self) -> [u8; 32] {
|
||||
let mut h = [0u8; 32];
|
||||
self.0.result(&mut h);
|
||||
h
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
self.0.reset();
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut h1 = [0u8; 32];
|
||||
let mut h2 = [0u8; 32];
|
||||
let mut h3 = [0u8; 32];
|
||||
h1[0] = 0x11;
|
||||
h2[0] = 0x22;
|
||||
h3[0] = 0x33;
|
||||
|
||||
let t: MerkleTree<[u8; 32], ExampleAlgorithm> = MerkleTree::from_iter(vec![h1, h2, h3]);
|
||||
println!("{:?}", t.root());
|
||||
}
|
||||
```
|
||||
|
||||
## Bug Reporting
|
||||
|
||||
Please report bugs either as pull requests or as issues in [the issue
|
||||
tracker](https://github.com/sitano/merkle_light). *merkle* has a
|
||||
**full disclosure** vulnerability policy. **Please do NOT attempt to report
|
||||
any security vulnerability in this code privately to anybody.**
|
||||
|
||||
## License
|
||||
|
||||
See [LICENSE](LICENSE).
|
170
common/merkle_light/benches/crypto_sha512.rs
Normal file
170
common/merkle_light/benches/crypto_sha512.rs
Normal file
@ -0,0 +1,170 @@
|
||||
//! cargo bench --features "crypto_bench" --verbose
|
||||
#![cfg(feature = "crypto_bench")]
|
||||
#![feature(test)]
|
||||
#![feature(rand)]
|
||||
#![feature(crypto)]
|
||||
|
||||
mod hash512;
|
||||
|
||||
extern crate crypto;
|
||||
extern crate merkle_light;
|
||||
extern crate rand;
|
||||
extern crate test;
|
||||
|
||||
use crypto::digest::Digest;
|
||||
use crypto::sha2::Sha512;
|
||||
use hash512::Hash512;
|
||||
use merkle_light::hash::{Algorithm, Hashable};
|
||||
use merkle_light::merkle::MerkleTree;
|
||||
use rand::Rng;
|
||||
use std::hash::Hasher;
|
||||
use std::iter::FromIterator;
|
||||
use test::Bencher;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct A(Sha512);
|
||||
|
||||
impl A {
|
||||
fn new() -> A {
|
||||
A(Sha512::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for A {
|
||||
fn default() -> Self {
|
||||
A::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for A {
|
||||
#[inline]
|
||||
fn write(&mut self, msg: &[u8]) {
|
||||
self.0.input(msg)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finish(&self) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Algorithm<Hash512> for A {
|
||||
#[inline]
|
||||
fn hash(&mut self) -> Hash512 {
|
||||
let mut h = [0u8; 64];
|
||||
self.0.result(&mut h);
|
||||
Hash512(h)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
self.0.reset();
|
||||
}
|
||||
}
|
||||
|
||||
fn tree_5() -> Vec<Hash512> {
|
||||
["one", "two", "three", "four"]
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let mut a = A::new();
|
||||
Hashable::hash(x, &mut a);
|
||||
a.hash()
|
||||
})
|
||||
.collect::<Vec<Hash512>>()
|
||||
}
|
||||
|
||||
fn tree_160() -> Vec<Hash512> {
|
||||
let mut values = vec![vec![0u8; 256]; 160];
|
||||
let mut rng = rand::IsaacRng::new_unseeded();
|
||||
|
||||
for mut v in &mut values {
|
||||
rng.fill_bytes(&mut v);
|
||||
}
|
||||
|
||||
values
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let mut a = A::new();
|
||||
a.write(x.as_ref());
|
||||
a.hash()
|
||||
})
|
||||
.collect::<Vec<Hash512>>()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_crypto_sha512(b: &mut Bencher) {
|
||||
let mut h = [0u8; 64];
|
||||
b.iter(|| {
|
||||
let mut x = Sha512::new();
|
||||
x.input("12345".as_ref());
|
||||
x.result(&mut h);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_crypto_sha512_from_data_5(b: &mut Bencher) {
|
||||
let values = tree_5();
|
||||
b.iter(|| MerkleTree::<Hash512, A>::from_iter(values.clone()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_crypto_sha512_from_data_5_proof(b: &mut Bencher) {
|
||||
let values = tree_5();
|
||||
let tree: MerkleTree<Hash512, A> = MerkleTree::from_iter(values.clone());
|
||||
|
||||
b.iter(|| {
|
||||
for i in 0..values.len() {
|
||||
let proof = tree.gen_proof(i);
|
||||
test::black_box(proof);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_crypto_sha512_from_data_5_proof_check(b: &mut Bencher) {
|
||||
let values = tree_5();
|
||||
let tree: MerkleTree<Hash512, A> = MerkleTree::from_iter(values.clone());
|
||||
let proofs = (0..values.len())
|
||||
.map(|i| tree.gen_proof(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
b.iter(|| {
|
||||
for proof in &proofs {
|
||||
test::black_box(proof.validate::<A>());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_crypto_sha512_from_data_160(b: &mut Bencher) {
|
||||
let values = tree_160();
|
||||
b.iter(|| MerkleTree::<Hash512, A>::from_iter(values.clone()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_crypto_sha512_from_data_160_proof(b: &mut Bencher) {
|
||||
let values = tree_160();
|
||||
let tree: MerkleTree<Hash512, A> = MerkleTree::from_iter(values.clone());
|
||||
|
||||
b.iter(|| {
|
||||
for i in 0..values.len() {
|
||||
let proof = tree.gen_proof(i);
|
||||
test::black_box(proof);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_crypto_sha512_from_data_160_proof_check(b: &mut Bencher) {
|
||||
let values = tree_160();
|
||||
let tree: MerkleTree<Hash512, A> = MerkleTree::from_iter(values.clone());
|
||||
let proofs = (0..values.len())
|
||||
.map(|i| tree.gen_proof(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
b.iter(|| {
|
||||
for proof in &proofs {
|
||||
test::black_box(proof.validate::<A>());
|
||||
}
|
||||
});
|
||||
}
|
58
common/merkle_light/benches/hash512/hash512.rs
Normal file
58
common/merkle_light/benches/hash512/hash512.rs
Normal file
@ -0,0 +1,58 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct Hash512(pub [u8; 64]);
|
||||
|
||||
impl Default for Hash512 {
|
||||
fn default() -> Self {
|
||||
Hash512([0u8; 64])
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Hash512 {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Hash512 {
|
||||
#[inline]
|
||||
fn partial_cmp(&self, other: &Hash512) -> Option<Ordering> {
|
||||
PartialOrd::partial_cmp(&&self.0[..], &&other.0[..])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn lt(&self, other: &Hash512) -> bool {
|
||||
PartialOrd::lt(&&self.0[..], &&other.0[..])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn le(&self, other: &Hash512) -> bool {
|
||||
PartialOrd::le(&&self.0[..], &&other.0[..])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn ge(&self, other: &Hash512) -> bool {
|
||||
PartialOrd::ge(&&self.0[..], &&other.0[..])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gt(&self, other: &Hash512) -> bool {
|
||||
PartialOrd::gt(&&self.0[..], &&other.0[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Hash512 {
|
||||
#[inline]
|
||||
fn cmp(&self, other: &Hash512) -> Ordering {
|
||||
Ord::cmp(&&self.0[..], &&other.0[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Hash512 {
|
||||
fn eq(&self, other: &Hash512) -> bool {
|
||||
self.0.as_ref() == other.0.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Hash512 {}
|
3
common/merkle_light/benches/hash512/mod.rs
Normal file
3
common/merkle_light/benches/hash512/mod.rs
Normal file
@ -0,0 +1,3 @@
|
||||
mod hash512;
|
||||
|
||||
pub use hash512::hash512::Hash512;
|
168
common/merkle_light/benches/ring_sha512.rs
Normal file
168
common/merkle_light/benches/ring_sha512.rs
Normal file
@ -0,0 +1,168 @@
|
||||
//! cargo bench --features "crypto_bench" --verbose
|
||||
#![cfg(feature = "crypto_bench")]
|
||||
#![feature(test)]
|
||||
#![feature(rand)]
|
||||
|
||||
mod hash512;
|
||||
mod ringx;
|
||||
|
||||
extern crate merkle_light;
|
||||
extern crate rand;
|
||||
extern crate test;
|
||||
|
||||
use hash512::Hash512;
|
||||
use merkle_light::hash::{Algorithm, Hashable};
|
||||
use merkle_light::merkle::MerkleTree;
|
||||
use rand::Rng;
|
||||
use ringx::Context;
|
||||
use ringx::SHA512;
|
||||
use std::hash::Hasher;
|
||||
use std::iter::FromIterator;
|
||||
use test::Bencher;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct B(Context);
|
||||
|
||||
impl B {
|
||||
fn new() -> B {
|
||||
B(Context::new(&SHA512))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for B {
|
||||
fn default() -> Self {
|
||||
B::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for B {
|
||||
#[inline]
|
||||
fn write(&mut self, msg: &[u8]) {
|
||||
self.0.update(msg)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finish(&self) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Algorithm<Hash512> for B {
|
||||
#[inline]
|
||||
fn hash(&mut self) -> Hash512 {
|
||||
let mut h = [0u8; 64];
|
||||
h.copy_from_slice(self.0.finish().as_ref());
|
||||
Hash512(h)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
self.0.reset();
|
||||
}
|
||||
}
|
||||
|
||||
fn tree_5() -> Vec<Hash512> {
|
||||
["one", "two", "three", "four"]
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let mut a = B::new();
|
||||
Hashable::hash(x, &mut a);
|
||||
a.hash()
|
||||
})
|
||||
.collect::<Vec<Hash512>>()
|
||||
}
|
||||
|
||||
fn tree_160() -> Vec<Hash512> {
|
||||
let mut values = vec![vec![0u8; 256]; 160];
|
||||
let mut rng = rand::IsaacRng::new_unseeded();
|
||||
|
||||
for mut v in &mut values {
|
||||
rng.fill_bytes(&mut v);
|
||||
}
|
||||
|
||||
values
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let mut a = B::new();
|
||||
a.write(x.as_ref());
|
||||
a.hash()
|
||||
})
|
||||
.collect::<Vec<Hash512>>()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ringx_sha512(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut x = Context::new(&SHA512);
|
||||
x.update("12345".as_ref());
|
||||
x.finish();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ringx_sha512_from_data_5(b: &mut Bencher) {
|
||||
let values = tree_5();
|
||||
b.iter(|| MerkleTree::<Hash512, B>::from_iter(values.clone()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ringx_sha512_from_data_5_proof(b: &mut Bencher) {
|
||||
let values = tree_5();
|
||||
let tree: MerkleTree<Hash512, B> = MerkleTree::from_iter(values.clone());
|
||||
|
||||
b.iter(|| {
|
||||
for i in 0..values.len() {
|
||||
let proof = tree.gen_proof(i);
|
||||
test::black_box(proof);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ringx_sha512_from_data_5_proof_check(b: &mut Bencher) {
|
||||
let values = tree_5();
|
||||
let tree: MerkleTree<Hash512, B> = MerkleTree::from_iter(values.clone());
|
||||
let proofs = (0..values.len())
|
||||
.map(|i| tree.gen_proof(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
b.iter(|| {
|
||||
for proof in &proofs {
|
||||
test::black_box(proof.validate::<B>());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ringx_sha512_from_data_160(b: &mut Bencher) {
|
||||
let values = tree_160();
|
||||
b.iter(|| MerkleTree::<Hash512, B>::from_iter(values.clone()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ringx_sha512_from_data_160_proof(b: &mut Bencher) {
|
||||
let values = tree_160();
|
||||
let tree: MerkleTree<Hash512, B> = MerkleTree::from_iter(values.clone());
|
||||
|
||||
b.iter(|| {
|
||||
for i in 0..values.len() {
|
||||
let proof = tree.gen_proof(i);
|
||||
test::black_box(proof);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ringx_sha512_from_data_160_proof_check(b: &mut Bencher) {
|
||||
let values = tree_160();
|
||||
let tree: MerkleTree<Hash512, B> = MerkleTree::from_iter(values.clone());
|
||||
let proofs = (0..values.len())
|
||||
.map(|i| tree.gen_proof(i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
b.iter(|| {
|
||||
for proof in &proofs {
|
||||
test::black_box(proof.validate::<B>());
|
||||
}
|
||||
});
|
||||
}
|
26
common/merkle_light/benches/ringx/init.rs
Normal file
26
common/merkle_light/benches/ringx/init.rs
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2016 Brian Smith.
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
|
||||
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
#[inline(always)]
|
||||
pub fn init_once() {
|
||||
#[cfg(not(target_os = "ios"))]
|
||||
{
|
||||
use std;
|
||||
extern "C" {
|
||||
fn GFp_cpuid_setup();
|
||||
}
|
||||
static INIT: std::sync::Once = std::sync::ONCE_INIT;
|
||||
INIT.call_once(|| unsafe { GFp_cpuid_setup() });
|
||||
}
|
||||
}
|
567
common/merkle_light/benches/ringx/mod.rs
Normal file
567
common/merkle_light/benches/ringx/mod.rs
Normal file
@ -0,0 +1,567 @@
|
||||
//! Improvement version of [`ring::digest::Context`].
|
||||
//!
|
||||
//! [`Context.finish`] now has `(&mut self)` instead of `(mut self)`.
|
||||
//! [`Context`] acquired state `reset` thing.
|
||||
|
||||
#![cfg(feature = "crypto_bench")]
|
||||
#![allow(dead_code)]
|
||||
|
||||
extern crate rand;
|
||||
extern crate ring;
|
||||
extern crate test;
|
||||
|
||||
mod init;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
// XXX: Replace with `const fn` when `const fn` is stable:
|
||||
// https://github.com/rust-lang/rust/issues/24111
|
||||
#[cfg(target_endian = "little")]
|
||||
macro_rules! u32x2 {
|
||||
( $first:expr, $second:expr ) => {
|
||||
((($second as u64) << 32) | ($first as u64))
|
||||
};
|
||||
}
|
||||
|
||||
/// A context for multi-step (Init-Update-Finish) digest calculations.
|
||||
///
|
||||
/// C analog: `EVP_MD_CTX`.
|
||||
pub struct Context {
|
||||
state: State,
|
||||
|
||||
// Note that SHA-512 has a 128-bit input bit counter, but this
|
||||
// implementation only supports up to 2^64-1 input bits for all algorithms,
|
||||
// so a 64-bit counter is more than sufficient.
|
||||
completed_data_blocks: u64,
|
||||
|
||||
// TODO: More explicitly force 64-bit alignment for |pending|.
|
||||
pending: [u8; MAX_BLOCK_LEN],
|
||||
num_pending: usize,
|
||||
|
||||
/// The context's algorithm.
|
||||
pub algorithm: &'static Algorithm,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
/// Constructs a new context.
|
||||
///
|
||||
/// C analogs: `EVP_DigestInit`, `EVP_DigestInit_ex`
|
||||
pub fn new(algorithm: &'static Algorithm) -> Context {
|
||||
init::init_once();
|
||||
|
||||
Context {
|
||||
algorithm,
|
||||
state: algorithm.initial_state,
|
||||
completed_data_blocks: 0,
|
||||
pending: [0u8; MAX_BLOCK_LEN],
|
||||
num_pending: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the digest with all the data in `data`. `update` may be called
|
||||
/// zero or more times until `finish` is called. It must not be called
|
||||
/// after `finish` has been called.
|
||||
///
|
||||
/// C analog: `EVP_DigestUpdate`
|
||||
pub fn update(&mut self, data: &[u8]) {
|
||||
if data.len() < self.algorithm.block_len - self.num_pending {
|
||||
self.pending[self.num_pending..(self.num_pending + data.len())].copy_from_slice(data);
|
||||
self.num_pending += data.len();
|
||||
return;
|
||||
}
|
||||
|
||||
let mut remaining = data;
|
||||
if self.num_pending > 0 {
|
||||
let to_copy = self.algorithm.block_len - self.num_pending;
|
||||
self.pending[self.num_pending..self.algorithm.block_len]
|
||||
.copy_from_slice(&data[..to_copy]);
|
||||
|
||||
unsafe {
|
||||
(self.algorithm.block_data_order)(&mut self.state, self.pending.as_ptr(), 1);
|
||||
}
|
||||
self.completed_data_blocks = self.completed_data_blocks.checked_add(1).unwrap();
|
||||
|
||||
remaining = &remaining[to_copy..];
|
||||
self.num_pending = 0;
|
||||
}
|
||||
|
||||
let num_blocks = remaining.len() / self.algorithm.block_len;
|
||||
let num_to_save_for_later = remaining.len() % self.algorithm.block_len;
|
||||
if num_blocks > 0 {
|
||||
unsafe {
|
||||
(self.algorithm.block_data_order)(&mut self.state, remaining.as_ptr(), num_blocks);
|
||||
}
|
||||
self.completed_data_blocks = self
|
||||
.completed_data_blocks
|
||||
.checked_add(polyfill::slice::u64_from_usize(num_blocks))
|
||||
.unwrap();
|
||||
}
|
||||
if num_to_save_for_later > 0 {
|
||||
self.pending[..num_to_save_for_later]
|
||||
.copy_from_slice(&remaining[(remaining.len() - num_to_save_for_later)..]);
|
||||
self.num_pending = num_to_save_for_later;
|
||||
}
|
||||
}
|
||||
|
||||
/// Finalizes the digest calculation and returns the digest value. `finish`
|
||||
/// consumes the context so it cannot be (mis-)used after `finish` has been
|
||||
/// called.
|
||||
///
|
||||
/// C analogs: `EVP_DigestFinal`, `EVP_DigestFinal_ex`
|
||||
pub fn finish(&mut self) -> Digest {
|
||||
// We know |num_pending < self.algorithm.block_len|, because we would
|
||||
// have processed the block otherwise.
|
||||
|
||||
let mut padding_pos = self.num_pending;
|
||||
self.pending[padding_pos] = 0x80;
|
||||
padding_pos += 1;
|
||||
|
||||
if padding_pos > self.algorithm.block_len - self.algorithm.len_len {
|
||||
polyfill::slice::fill(&mut self.pending[padding_pos..self.algorithm.block_len], 0);
|
||||
unsafe {
|
||||
(self.algorithm.block_data_order)(&mut self.state, self.pending.as_ptr(), 1);
|
||||
}
|
||||
// We don't increase |self.completed_data_blocks| because the
|
||||
// padding isn't data, and so it isn't included in the data length.
|
||||
padding_pos = 0;
|
||||
}
|
||||
|
||||
polyfill::slice::fill(
|
||||
&mut self.pending[padding_pos..(self.algorithm.block_len - 8)],
|
||||
0,
|
||||
);
|
||||
|
||||
// Output the length, in bits, in big endian order.
|
||||
let mut completed_data_bits: u64 = self
|
||||
.completed_data_blocks
|
||||
.checked_mul(polyfill::slice::u64_from_usize(self.algorithm.block_len))
|
||||
.unwrap()
|
||||
.checked_add(polyfill::slice::u64_from_usize(self.num_pending))
|
||||
.unwrap()
|
||||
.checked_mul(8)
|
||||
.unwrap();
|
||||
|
||||
for b in (&mut self.pending[(self.algorithm.block_len - 8)..self.algorithm.block_len])
|
||||
.into_iter()
|
||||
.rev()
|
||||
{
|
||||
*b = completed_data_bits as u8;
|
||||
completed_data_bits /= 0x100;
|
||||
}
|
||||
unsafe {
|
||||
(self.algorithm.block_data_order)(&mut self.state, self.pending.as_ptr(), 1);
|
||||
}
|
||||
|
||||
Digest {
|
||||
algorithm: self.algorithm,
|
||||
value: (self.algorithm.format_output)(&self.state),
|
||||
}
|
||||
}
|
||||
|
||||
/// The algorithm that this context is using.
|
||||
#[inline(always)]
|
||||
pub fn algorithm(&self) -> &'static Algorithm {
|
||||
self.algorithm
|
||||
}
|
||||
|
||||
/// Reset context state.
|
||||
pub fn reset(&mut self) {
|
||||
self.state = self.algorithm.initial_state;
|
||||
self.pending = [0u8; MAX_BLOCK_LEN];
|
||||
self.completed_data_blocks = 0;
|
||||
self.num_pending = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: This should just be `#[derive(Clone)]` but that doesn't work because
|
||||
// `[u8; 128]` doesn't implement `Clone`.
|
||||
impl Clone for Context {
|
||||
fn clone(&self) -> Context {
|
||||
Context {
|
||||
state: self.state,
|
||||
pending: self.pending,
|
||||
completed_data_blocks: self.completed_data_blocks,
|
||||
num_pending: self.num_pending,
|
||||
algorithm: self.algorithm,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the digest of `data` using the given digest algorithm.
|
||||
///
|
||||
/// C analog: `EVP_Digest`
|
||||
///
|
||||
/// # Examples:
|
||||
///
|
||||
/// ```
|
||||
/// # #[cfg(feature = "use_heap")]
|
||||
/// # fn main() {
|
||||
/// use ring::{digest, test};
|
||||
///
|
||||
/// let expected_hex =
|
||||
/// "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b";
|
||||
/// let expected: Vec<u8> = test::from_hex(expected_hex).unwrap();
|
||||
/// let actual = digest::digest(&digest::SHA256, b"hello, world");
|
||||
///
|
||||
/// assert_eq!(&expected, &actual.as_ref());
|
||||
/// # }
|
||||
///
|
||||
/// # #[cfg(not(feature = "use_heap"))]
|
||||
/// # fn main() { }
|
||||
/// ```
|
||||
pub fn digest(algorithm: &'static Algorithm, data: &[u8]) -> Digest {
|
||||
let mut ctx = Context::new(algorithm);
|
||||
ctx.update(data);
|
||||
ctx.finish()
|
||||
}
|
||||
|
||||
/// A calculated digest value.
|
||||
///
|
||||
/// Use `as_ref` to get the value as a `&[u8]`.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct Digest {
|
||||
value: Output,
|
||||
algorithm: &'static Algorithm,
|
||||
}
|
||||
|
||||
impl Digest {
|
||||
/// The algorithm that was used to calculate the digest value.
|
||||
#[inline(always)]
|
||||
pub fn algorithm(&self) -> &'static Algorithm {
|
||||
self.algorithm
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Digest {
|
||||
#[inline(always)]
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&(polyfill::slice::u64_as_u8(&self.value))[..self.algorithm.output_len]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Digest {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "{:?}:", self.algorithm)?;
|
||||
for byte in self.as_ref() {
|
||||
write!(fmt, "{:02x}", byte)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A digest algorithm.
|
||||
///
|
||||
/// C analog: `EVP_MD`
|
||||
pub struct Algorithm {
|
||||
/// C analog: `EVP_MD_size`
|
||||
pub output_len: usize,
|
||||
|
||||
/// The size of the chaining value of the digest function, in bytes. For
|
||||
/// non-truncated algorithms (SHA-1, SHA-256, SHA-512), this is equal to
|
||||
/// `output_len`. For truncated algorithms (e.g. SHA-384, SHA-512/256),
|
||||
/// this is equal to the length before truncation. This is mostly helpful
|
||||
/// for determining the size of an HMAC key that is appropriate for the
|
||||
/// digest algorithm.
|
||||
pub chaining_len: usize,
|
||||
|
||||
/// C analog: `EVP_MD_block_size`
|
||||
pub block_len: usize,
|
||||
|
||||
/// The length of the length in the padding.
|
||||
pub len_len: usize,
|
||||
|
||||
pub block_data_order: unsafe extern "C" fn(state: &mut State, data: *const u8, num: usize),
|
||||
pub format_output: fn(input: &State) -> Output,
|
||||
|
||||
pub initial_state: State,
|
||||
|
||||
pub id: AlgorithmID,
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum AlgorithmID {
|
||||
SHA256,
|
||||
SHA512,
|
||||
SHA512_256,
|
||||
}
|
||||
|
||||
impl PartialEq for Algorithm {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.id == other.id
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Algorithm {}
|
||||
|
||||
impl fmt::Debug for Algorithm {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
// This would have to change if/when we add other algorithms with the
|
||||
// same lengths.
|
||||
let (n, suffix) =
|
||||
if self.output_len == SHA512_256_OUTPUT_LEN && self.block_len == SHA512_BLOCK_LEN {
|
||||
(512, "_256")
|
||||
} else if self.output_len == 20 {
|
||||
(1, "")
|
||||
} else {
|
||||
(self.output_len * 8, "")
|
||||
};
|
||||
write!(fmt, "SHA{}{}", n, suffix)
|
||||
}
|
||||
}
|
||||
|
||||
/// SHA-256 as specified in [FIPS 180-4].
|
||||
///
|
||||
/// [FIPS 180-4]: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
|
||||
pub static SHA256: Algorithm = Algorithm {
|
||||
output_len: SHA256_OUTPUT_LEN,
|
||||
chaining_len: SHA256_OUTPUT_LEN,
|
||||
block_len: 512 / 8,
|
||||
len_len: 64 / 8,
|
||||
block_data_order: GFp_sha256_block_data_order,
|
||||
format_output: sha256_format_output,
|
||||
initial_state: [
|
||||
u32x2!(0x6a09e667u32, 0xbb67ae85u32),
|
||||
u32x2!(0x3c6ef372u32, 0xa54ff53au32),
|
||||
u32x2!(0x510e527fu32, 0x9b05688cu32),
|
||||
u32x2!(0x1f83d9abu32, 0x5be0cd19u32),
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
],
|
||||
id: AlgorithmID::SHA256,
|
||||
};
|
||||
|
||||
/// SHA-512 as specified in [FIPS 180-4].
|
||||
///
|
||||
/// [FIPS 180-4]: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
|
||||
pub static SHA512: Algorithm = Algorithm {
|
||||
output_len: SHA512_OUTPUT_LEN,
|
||||
chaining_len: SHA512_OUTPUT_LEN,
|
||||
block_len: SHA512_BLOCK_LEN,
|
||||
len_len: SHA512_LEN_LEN,
|
||||
block_data_order: GFp_sha512_block_data_order,
|
||||
format_output: sha512_format_output,
|
||||
initial_state: [
|
||||
0x6a09e667f3bcc908,
|
||||
0xbb67ae8584caa73b,
|
||||
0x3c6ef372fe94f82b,
|
||||
0xa54ff53a5f1d36f1,
|
||||
0x510e527fade682d1,
|
||||
0x9b05688c2b3e6c1f,
|
||||
0x1f83d9abfb41bd6b,
|
||||
0x5be0cd19137e2179,
|
||||
],
|
||||
id: AlgorithmID::SHA512,
|
||||
};
|
||||
|
||||
/// SHA-512/256 as specified in [FIPS 180-4].
|
||||
///
|
||||
/// This is *not* the same as just truncating the output of SHA-512, as
|
||||
/// SHA-512/256 has its own initial state distinct from SHA-512's initial
|
||||
/// state.
|
||||
///
|
||||
/// [FIPS 180-4]: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
|
||||
pub static SHA512_256: Algorithm = Algorithm {
|
||||
output_len: SHA512_256_OUTPUT_LEN,
|
||||
chaining_len: SHA512_OUTPUT_LEN,
|
||||
block_len: SHA512_BLOCK_LEN,
|
||||
len_len: SHA512_LEN_LEN,
|
||||
block_data_order: GFp_sha512_block_data_order,
|
||||
format_output: sha512_format_output,
|
||||
initial_state: [
|
||||
0x22312194fc2bf72c,
|
||||
0x9f555fa3c84c64c2,
|
||||
0x2393b86b6f53b151,
|
||||
0x963877195940eabd,
|
||||
0x96283ee2a88effe3,
|
||||
0xbe5e1e2553863992,
|
||||
0x2b0199fc2c85b8aa,
|
||||
0x0eb72ddc81c52ca2,
|
||||
],
|
||||
id: AlgorithmID::SHA512_256,
|
||||
};
|
||||
|
||||
// We use u64 to try to ensure 64-bit alignment/padding.
|
||||
pub type State = [u64; MAX_CHAINING_LEN / 8];
|
||||
|
||||
pub type Output = [u64; MAX_OUTPUT_LEN / 8];
|
||||
|
||||
/// The maximum block length (`Algorithm::block_len`) of all the algorithms in
|
||||
/// this module.
|
||||
pub const MAX_BLOCK_LEN: usize = 1024 / 8;
|
||||
|
||||
/// The maximum output length (`Algorithm::output_len`) of all the algorithms
|
||||
/// in this module.
|
||||
pub const MAX_OUTPUT_LEN: usize = 512 / 8;
|
||||
|
||||
/// The maximum chaining length (`Algorithm::chaining_len`) of all the
|
||||
/// algorithms in this module.
|
||||
pub const MAX_CHAINING_LEN: usize = MAX_OUTPUT_LEN;
|
||||
|
||||
mod polyfill {
|
||||
pub mod slice {
|
||||
use std::slice::from_raw_parts;
|
||||
|
||||
// https://internals.rust-lang.org/t/
|
||||
// safe-trasnsmute-for-slices-e-g-u64-u32-particularly-simd-types/2871
|
||||
#[inline(always)]
|
||||
pub fn u64_as_u32(src: &[u64]) -> &[u32] {
|
||||
unsafe { from_raw_parts(src.as_ptr() as *const u32, src.len() * 2) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn u64_from_usize(x: usize) -> u64 {
|
||||
x as u64
|
||||
}
|
||||
|
||||
// https://internals.rust-lang.org/t/
|
||||
// stabilizing-basic-functions-on-arrays-and-slices/2868
|
||||
#[inline(always)]
|
||||
pub fn fill(dest: &mut [u8], value: u8) {
|
||||
for d in dest {
|
||||
*d = value;
|
||||
}
|
||||
}
|
||||
|
||||
// https://internals.rust-lang.org/t/
|
||||
// safe-trasnsmute-for-slices-e-g-u64-u32-particularly-simd-types/2871
|
||||
#[inline(always)]
|
||||
pub fn u64_as_u8(src: &[u64]) -> &[u8] {
|
||||
unsafe { from_raw_parts(src.as_ptr() as *const u8, src.len() * 8) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sha256_format_output(input: &State) -> Output {
|
||||
let input = &polyfill::slice::u64_as_u32(input)[..8];
|
||||
[
|
||||
u32x2!(input[0].to_be(), input[1].to_be()),
|
||||
u32x2!(input[2].to_be(), input[3].to_be()),
|
||||
u32x2!(input[4].to_be(), input[5].to_be()),
|
||||
u32x2!(input[6].to_be(), input[7].to_be()),
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
]
|
||||
}
|
||||
|
||||
pub fn sha512_format_output(input: &State) -> Output {
|
||||
[
|
||||
input[0].to_be(),
|
||||
input[1].to_be(),
|
||||
input[2].to_be(),
|
||||
input[3].to_be(),
|
||||
input[4].to_be(),
|
||||
input[5].to_be(),
|
||||
input[6].to_be(),
|
||||
input[7].to_be(),
|
||||
]
|
||||
}
|
||||
|
||||
/// The length of the output of SHA-256, in bytes.
|
||||
pub const SHA256_OUTPUT_LEN: usize = 256 / 8;
|
||||
|
||||
/// The length of the output of SHA-512, in bytes.
|
||||
pub const SHA512_OUTPUT_LEN: usize = 512 / 8;
|
||||
|
||||
/// The length of the output of SHA-512/256, in bytes.
|
||||
pub const SHA512_256_OUTPUT_LEN: usize = 256 / 8;
|
||||
|
||||
/// The length of a block for SHA-512-based algorithms, in bytes.
|
||||
const SHA512_BLOCK_LEN: usize = 1024 / 8;
|
||||
|
||||
/// The length of the length field for SHA-512-based algorithms, in bytes.
|
||||
const SHA512_LEN_LEN: usize = 128 / 8;
|
||||
|
||||
extern "C" {
|
||||
fn GFp_sha256_block_data_order(state: &mut State, data: *const u8, num: usize);
|
||||
fn GFp_sha512_block_data_order(state: &mut State, data: *const u8, num: usize);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test_util {
|
||||
use super::*;
|
||||
|
||||
pub static ALL_ALGORITHMS: [&'static Algorithm; 3] = [&SHA256, &SHA512, &SHA512_256];
|
||||
}
|
||||
|
||||
/*
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
mod max_input {
|
||||
use super::super::super::digest;
|
||||
|
||||
macro_rules! max_input_tests {
|
||||
( $algorithm_name:ident ) => {
|
||||
#[allow(non_snake_case)]
|
||||
mod $algorithm_name {
|
||||
use super::super::super::super::digest;
|
||||
|
||||
#[test]
|
||||
fn max_input_test() {
|
||||
super::max_input_test(&digest::$algorithm_name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn too_long_input_test_block() {
|
||||
super::too_long_input_test_block(
|
||||
&digest::$algorithm_name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn too_long_input_test_byte() {
|
||||
super::too_long_input_test_byte(
|
||||
&digest::$algorithm_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn max_input_test(alg: &'static digest::Algorithm) {
|
||||
let mut context = nearly_full_context(alg);
|
||||
let next_input = vec![0u8; alg.block_len - 1];
|
||||
context.update(&next_input);
|
||||
let _ = context.finish(); // no panic
|
||||
}
|
||||
|
||||
fn too_long_input_test_block(alg: &'static digest::Algorithm) {
|
||||
let mut context = nearly_full_context(alg);
|
||||
let next_input = vec![0u8; alg.block_len];
|
||||
context.update(&next_input);
|
||||
let _ = context.finish(); // should panic
|
||||
}
|
||||
|
||||
fn too_long_input_test_byte(alg: &'static digest::Algorithm) {
|
||||
let mut context = nearly_full_context(alg);
|
||||
let next_input = vec![0u8; alg.block_len - 1];
|
||||
context.update(&next_input); // no panic
|
||||
context.update(&[0]);
|
||||
let _ = context.finish(); // should panic
|
||||
}
|
||||
|
||||
fn nearly_full_context(alg: &'static digest::Algorithm)
|
||||
-> digest::Context {
|
||||
// All implementations currently support up to 2^64-1 bits
|
||||
// of input; according to the spec, SHA-384 and SHA-512
|
||||
// support up to 2^128-1, but that's not implemented yet.
|
||||
let max_bytes = 1u64 << (64 - 3);
|
||||
let max_blocks = max_bytes / (alg.block_len as u64);
|
||||
digest::Context {
|
||||
algorithm: alg,
|
||||
state: alg.initial_state,
|
||||
completed_data_blocks: max_blocks - 1,
|
||||
pending: [0u8; digest::MAX_BLOCK_LEN],
|
||||
num_pending: 0,
|
||||
}
|
||||
}
|
||||
|
||||
max_input_tests!(SHA256);
|
||||
max_input_tests!(SHA512);
|
||||
}
|
||||
}*/
|
3
common/merkle_light/clippy.toml
Normal file
3
common/merkle_light/clippy.toml
Normal file
@ -0,0 +1,3 @@
|
||||
disallowed-names = [
|
||||
"unreadable_literal"
|
||||
]
|
4
common/merkle_light/examples/no_std.rs
Normal file
4
common/merkle_light/examples/no_std.rs
Normal file
@ -0,0 +1,4 @@
|
||||
// This is just for no_std testing
|
||||
#[allow(unused_imports)]
|
||||
|
||||
fn main() {}
|
135
common/merkle_light/src/hash.rs
Normal file
135
common/merkle_light/src/hash.rs
Normal file
@ -0,0 +1,135 @@
|
||||
//! Hash infrastructure for items in Merkle Tree.
|
||||
|
||||
use core::hash::Hasher;
|
||||
|
||||
/// A hashable type.
|
||||
///
|
||||
/// Types implementing `Hashable` are able to be [`hash`]ed with an instance of
|
||||
/// [`Hasher`].
|
||||
///
|
||||
/// ## Implementing `Hashable`
|
||||
///
|
||||
/// You can derive `Hashable` with `#[derive(Hashable)]` if all fields implement `Hashable`.
|
||||
/// The resulting hash will be the combination of the values from calling
|
||||
/// [`hash`] on each field.
|
||||
///
|
||||
/// ```text
|
||||
/// #[macro_use]
|
||||
/// extern crate merkle_light_derive;
|
||||
/// extern crate merkle_light;
|
||||
///
|
||||
/// use merkle_light::hash::Hashable;
|
||||
///
|
||||
/// fn main() {
|
||||
/// #[derive(Hashable)]
|
||||
/// struct Foo {
|
||||
/// name: String,
|
||||
/// country: String,
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// If you need more control over how a value is hashed, you can of course
|
||||
/// implement the `Hashable` trait yourself:
|
||||
///
|
||||
/// ```
|
||||
/// extern crate merkle_light;
|
||||
///
|
||||
/// use merkle_light::hash::Hashable;
|
||||
/// use std::hash::Hasher;
|
||||
/// use std::collections::hash_map::DefaultHasher;
|
||||
///
|
||||
/// fn main() {
|
||||
/// struct Person {
|
||||
/// id: u32,
|
||||
/// name: String,
|
||||
/// phone: u64,
|
||||
/// }
|
||||
///
|
||||
/// impl<H: Hasher> Hashable<H> for Person {
|
||||
/// fn hash(&self, state: &mut H) {
|
||||
/// self.id.hash(state);
|
||||
/// self.name.hash(state);
|
||||
/// self.phone.hash(state);
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let foo = Person{
|
||||
/// id: 1,
|
||||
/// name: String::from("blah"),
|
||||
/// phone: 2,
|
||||
/// };
|
||||
///
|
||||
/// let mut hr = DefaultHasher::new();
|
||||
/// foo.hash(&mut hr);
|
||||
/// assert_eq!(hr.finish(), 7101638158313343130)
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ## `Hashable` and `Eq`
|
||||
///
|
||||
/// When implementing both `Hashable` and [`Eq`], it is important that the following
|
||||
/// property holds:
|
||||
///
|
||||
/// ```text
|
||||
/// k1 == k2 -> hash(k1) == hash(k2)
|
||||
/// ```
|
||||
///
|
||||
/// In other words, if two keys are equal, their hashes must also be equal.
|
||||
pub trait Hashable<H: Hasher> {
|
||||
/// Feeds this value into the given [`Hasher`].
|
||||
///
|
||||
/// [`Hasher`]: trait.Hasher.html
|
||||
fn hash(&self, state: &mut H);
|
||||
|
||||
/// Feeds a slice of this type into the given [`Hasher`].
|
||||
///
|
||||
/// [`Hasher`]: trait.Hasher.html
|
||||
fn hash_slice(data: &[Self], state: &mut H)
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
for piece in data {
|
||||
piece.hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for hashing an arbitrary stream of bytes for calculating merkle tree
|
||||
/// nodes.
|
||||
///
|
||||
/// T is a hash item must be of known size at compile time, globally ordered, with
|
||||
/// default value as a neutral element of the hash space. Neutral element is
|
||||
/// interpreted as 0 or nil and required for evaluation of merkle tree.
|
||||
///
|
||||
/// [`Algorithm`] breaks the [`Hasher`] contract at `finish()`, but that is intended.
|
||||
/// This trait extends [`Hasher`] with `hash -> T` and `reset` state methods,
|
||||
/// plus implements default behavior of evaluation of MT interior nodes.
|
||||
pub trait Algorithm<T>: Hasher + Default
|
||||
where
|
||||
T: Clone + AsRef<[u8]>,
|
||||
{
|
||||
/// Returns the hash value for the data stream written so far.
|
||||
fn hash(&mut self) -> T;
|
||||
|
||||
/// Reset Hasher state.
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
*self = Self::default();
|
||||
}
|
||||
|
||||
/// Returns hash value for MT leaf (prefix 0x00).
|
||||
#[inline]
|
||||
fn leaf(&mut self, leaf: T) -> T {
|
||||
self.write(leaf.as_ref());
|
||||
self.hash()
|
||||
}
|
||||
|
||||
/// Returns hash value for MT interior node (prefix 0x01).
|
||||
#[inline]
|
||||
fn node(&mut self, left: T, right: T) -> T {
|
||||
self.write(left.as_ref());
|
||||
self.write(right.as_ref());
|
||||
self.hash()
|
||||
}
|
||||
}
|
188
common/merkle_light/src/hash_impl.rs
Normal file
188
common/merkle_light/src/hash_impl.rs
Normal file
@ -0,0 +1,188 @@
|
||||
extern crate alloc;
|
||||
|
||||
use crate::hash::Hashable;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use core::hash::Hasher;
|
||||
use core::mem;
|
||||
use core::slice;
|
||||
|
||||
macro_rules! impl_write {
|
||||
($(($ty:ident, $meth:ident),)*) => {$(
|
||||
impl<H: Hasher> Hashable<H> for $ty {
|
||||
fn hash(&self, state: &mut H) {
|
||||
state.$meth(*self)
|
||||
}
|
||||
|
||||
#[allow(trivial_casts, unsafe_code)]
|
||||
fn hash_slice(data: &[$ty], state: &mut H) {
|
||||
let newlen = data.len() * mem::size_of::<$ty>();
|
||||
let ptr = data.as_ptr() as *const u8;
|
||||
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
|
||||
}
|
||||
}
|
||||
)*}
|
||||
}
|
||||
|
||||
impl_write! {
|
||||
(u8, write_u8),
|
||||
(u16, write_u16),
|
||||
(u32, write_u32),
|
||||
(u64, write_u64),
|
||||
(usize, write_usize),
|
||||
(i8, write_i8),
|
||||
(i16, write_i16),
|
||||
(i32, write_i32),
|
||||
(i64, write_i64),
|
||||
(isize, write_isize),
|
||||
// unstable: (u128, write_u128),
|
||||
// unstable: (i128, write_i128),
|
||||
}
|
||||
|
||||
macro_rules! impl_array {
|
||||
($ty:ident $($N:expr)+) => {$(
|
||||
impl<H: Hasher> Hashable<H> for [$ty; $N] {
|
||||
fn hash(&self, state: &mut H) {
|
||||
state.write(self.as_ref())
|
||||
}
|
||||
|
||||
#[allow(trivial_casts, unsafe_code)]
|
||||
fn hash_slice(data: &[[$ty; $N]], state: &mut H) {
|
||||
let newlen = data.len() * mem::size_of::<[$ty; $N]>();
|
||||
let ptr = data.as_ptr() as *const u8;
|
||||
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
|
||||
}
|
||||
}
|
||||
)*}
|
||||
}
|
||||
|
||||
impl_array! { u8
|
||||
1 2 3 4 5 6 7 8 9 10
|
||||
11 12 13 14 15 16 17 18 19 20
|
||||
21 22 23 24 25 26 27 28 29 30
|
||||
31 32 33 34 35 36 37 38 39 40
|
||||
41 42 43 44 45 46 47 48 49 50
|
||||
51 52 53 54 55 56 57 58 59 60
|
||||
61 62 63 64
|
||||
}
|
||||
|
||||
impl<H: Hasher> Hashable<H> for Vec<u8> {
|
||||
fn hash(&self, state: &mut H) {
|
||||
state.write(self.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> Hashable<H> for bool {
|
||||
fn hash(&self, state: &mut H) {
|
||||
state.write_u8(*self as u8)
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> Hashable<H> for char {
|
||||
fn hash(&self, state: &mut H) {
|
||||
state.write_u32(*self as u32)
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> Hashable<H> for str {
|
||||
fn hash(&self, state: &mut H) {
|
||||
state.write(self.as_bytes());
|
||||
// empty str nope: state.write_u8(0xff)
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> Hashable<H> for String {
|
||||
fn hash(&self, state: &mut H) {
|
||||
state.write(self.as_bytes());
|
||||
// empty str nope: state.write_u8(0xff)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_hash_tuple {
|
||||
() => (
|
||||
impl<H: Hasher> Hashable<H> for () {
|
||||
fn hash(&self, _: &mut H) {}
|
||||
}
|
||||
);
|
||||
|
||||
( $($name:ident)+) => (
|
||||
impl<Z: Hasher, $($name: Hashable<Z>),*> Hashable<Z> for ($($name,)*)
|
||||
where
|
||||
last_type!($($name,)+): ?Sized
|
||||
{
|
||||
#[allow(non_snake_case)]
|
||||
fn hash(&self, state: &mut Z) {
|
||||
let ($(ref $name,)*) = *self;
|
||||
$($name.hash(state);)*
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! last_type {
|
||||
($a:ident,) => { $a };
|
||||
($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
|
||||
}
|
||||
|
||||
impl_hash_tuple! {}
|
||||
impl_hash_tuple! { A }
|
||||
impl_hash_tuple! { A B }
|
||||
impl_hash_tuple! { A B C }
|
||||
impl_hash_tuple! { A B C D }
|
||||
impl_hash_tuple! { A B C D E }
|
||||
impl_hash_tuple! { A B C D E F }
|
||||
impl_hash_tuple! { A B C D E F G }
|
||||
impl_hash_tuple! { A B C D E F G H }
|
||||
impl_hash_tuple! { A B C D E F G H I }
|
||||
impl_hash_tuple! { A B C D E F G H I J }
|
||||
impl_hash_tuple! { A B C D E F G H I J K }
|
||||
impl_hash_tuple! { A B C D E F G H I J K L }
|
||||
|
||||
impl<H: Hasher, T: Hashable<H>> Hashable<H> for [T] {
|
||||
fn hash(&self, state: &mut H) {
|
||||
self.len().hash(state);
|
||||
Hashable::hash_slice(self, state)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, H: Hasher, T: ?Sized + Hashable<H>> Hashable<H> for &'a T {
|
||||
fn hash(&self, state: &mut H) {
|
||||
(**self).hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, H: Hasher, T: ?Sized + Hashable<H>> Hashable<H> for &'a mut T {
|
||||
fn hash(&self, state: &mut H) {
|
||||
(**self).hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher, T: ?Sized> Hashable<H> for *const T {
|
||||
#[allow(trivial_casts, unsafe_code)]
|
||||
fn hash(&self, state: &mut H) {
|
||||
if mem::size_of::<Self>() == mem::size_of::<usize>() {
|
||||
// Thin pointer
|
||||
state.write_usize(*self as *const () as usize);
|
||||
} else {
|
||||
// Fat pointer
|
||||
let (a, b) = unsafe { *(self as *const Self as *const (usize, usize)) };
|
||||
state.write_usize(a);
|
||||
state.write_usize(b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher, T: ?Sized> Hashable<H> for *mut T {
|
||||
#[allow(trivial_casts, unsafe_code)]
|
||||
fn hash(&self, state: &mut H) {
|
||||
if mem::size_of::<Self>() == mem::size_of::<usize>() {
|
||||
// Thin pointer
|
||||
state.write_usize(*self as *const () as usize);
|
||||
} else {
|
||||
// Fat pointer
|
||||
let (a, b) = unsafe { *(self as *const Self as *const (usize, usize)) };
|
||||
state.write_usize(a);
|
||||
state.write_usize(b);
|
||||
}
|
||||
}
|
||||
}
|
178
common/merkle_light/src/lib.rs
Normal file
178
common/merkle_light/src/lib.rs
Normal file
@ -0,0 +1,178 @@
|
||||
//! light _Merkle Tree_ implementation.
|
||||
//!
|
||||
//! Merkle tree (MT) implemented as a full binary tree allocated as a vec
|
||||
//! of statically sized hashes to give hashes more locality. MT specialized
|
||||
//! to the extent of hashing algorithm and hash item. [`Hashable`] trait is
|
||||
//! compatible to the `std::hash::Hasher` and supports custom hash algorithms.
|
||||
//! Implementation does not depend on any external crypto libraries, and tries
|
||||
//! to be as performant as possible.
|
||||
//!
|
||||
//! This tree implementation uses encoding scheme as in _Certificate Transparency_
|
||||
//! by default. Encoding scheme for leafs and nodes can be overridden though.
|
||||
//! [RFC 6962](https://tools.ietf.org/html/rfc6962):
|
||||
//!
|
||||
//! ```text
|
||||
//! MTH({d(0)}) = ALG(0x00 || d(0)).
|
||||
//! For n > 1, let k be the largest power of two smaller than n (i.e.,
|
||||
//! k < n <= 2k). The Merkle tree Hash of an n-element list D[n] is then
|
||||
//! defined recursively as
|
||||
//! MTH(D[n]) = ALG(0x01 || MTH(D[0:k]) || MTH(D[k:n])),
|
||||
//! ```
|
||||
//!
|
||||
//! Link: [](https://en.wikipedia.org/wiki/Merkle_tree)
|
||||
//!
|
||||
//! # Implementation choices
|
||||
//!
|
||||
//! Main idea is the whole code must obtain specialization at compile time with
|
||||
//! minimum allocations calls, hashes must be of fixed size arrays known at
|
||||
//! compile time, hash algorithm must be a trait and must not depend on any
|
||||
//! external cryptographic libraries and the lib itself must somehow mimic std Rust api.
|
||||
//!
|
||||
//! Standard way in Rust is to hash objects with a `std::hash::Hasher`, and mainly
|
||||
//! that is the reason behind the choice of the abstractions:
|
||||
//!
|
||||
//! `Object : Hashable<H> -> Hasher + Algorithm <- Merkle Tree`
|
||||
//!
|
||||
//! Custom [`merkle::hash::Hashable`] trait allows implementations differ
|
||||
//! from [`std::collection`] related hashes, different implementations for
|
||||
//! different hashing algorithms / schemas and conforms object-safety trait rules.
|
||||
//!
|
||||
//! [`Algorithm`] complements [`Hasher`] to be reusable and follows the idea
|
||||
//! that the result hash is a mapping of the data stream.
|
||||
//!
|
||||
//! [`Algorithm.hash`] had to change its signature to be `&mut self` (`&self`) because
|
||||
//! most of the cryptographic digest algorithms breaks current state on finalization
|
||||
//! into unusable. `ring` libra tho contains interfaces incompatible to
|
||||
//! `start-update-finish-reset` lifecycle. It requires either `cloning()` its state
|
||||
//! on finalization, or `Cell`-ing via unsafe.
|
||||
//!
|
||||
//! Turning back to having [`Algorithm.write(&mut self, &[u8])`] instead of
|
||||
//! `write(T)` allows to relax [`Algorithm`] trait [`Hasher`] constraint, even tho
|
||||
//! works together well still.
|
||||
//!
|
||||
//! # Interface
|
||||
//!
|
||||
//! ```text
|
||||
//! - build_tree (items) -> tree
|
||||
//! - get_root -> hash
|
||||
//! - gen_proof -> proof
|
||||
//! - validate_proof (proof, leaf, root) -> bool
|
||||
//! ```
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! [`test_cmh.rs`]: custom merkle hasher implementation example
|
||||
//! [`crypto_bitcoin_mt.rs`]: bitcoin merkle tree using crypto lib
|
||||
//! [`crypto_chaincore_mt.rs`]: chain core merkle tree using crypto lib
|
||||
//! [`ring_bitcoin_mt.rs`]: bitcoin merkle tree using ring lib
|
||||
//!
|
||||
//! # Quick start
|
||||
//!
|
||||
//! ```
|
||||
//! #[cfg(feature = "chaincore")]
|
||||
//! extern crate crypto;
|
||||
//! extern crate merkle_light;
|
||||
//!
|
||||
//! #[cfg(feature = "chaincore")]
|
||||
//! mod example {
|
||||
//! use std::fmt;
|
||||
//! use std::hash::Hasher;
|
||||
//! use std::iter::FromIterator;
|
||||
//! use crypto::sha3::{Sha3, Sha3Mode};
|
||||
//! use crypto::digest::Digest;
|
||||
//! use merkle_light::hash::{Algorithm, Hashable};
|
||||
//!
|
||||
//! pub struct ExampleAlgorithm(Sha3);
|
||||
//!
|
||||
//! impl ExampleAlgorithm {
|
||||
//! pub fn new() -> ExampleAlgorithm {
|
||||
//! ExampleAlgorithm(Sha3::new(Sha3Mode::Sha3_256))
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl Default for ExampleAlgorithm {
|
||||
//! fn default() -> ExampleAlgorithm {
|
||||
//! ExampleAlgorithm::new()
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl Hasher for ExampleAlgorithm {
|
||||
//! #[inline]
|
||||
//! fn write(&mut self, msg: &[u8]) {
|
||||
//! self.0.input(msg)
|
||||
//! }
|
||||
//!
|
||||
//! #[inline]
|
||||
//! fn finish(&self) -> u64 {
|
||||
//! unimplemented!()
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl Algorithm<[u8; 32]> for ExampleAlgorithm {
|
||||
//! #[inline]
|
||||
//! fn hash(&mut self) -> [u8; 32] {
|
||||
//! let mut h = [0u8; 32];
|
||||
//! self.0.result(&mut h);
|
||||
//! h
|
||||
//! }
|
||||
//!
|
||||
//! #[inline]
|
||||
//! fn reset(&mut self) {
|
||||
//! self.0.reset();
|
||||
//! }
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! fn main() {
|
||||
//! #[cfg(feature = "chaincore")]
|
||||
//! {
|
||||
//! use example::ExampleAlgorithm;
|
||||
//! use merkle_light::merkle::MerkleTree;
|
||||
//! use std::iter::FromIterator;
|
||||
//!
|
||||
//! let mut h1 = [0u8; 32];
|
||||
//! let mut h2 = [0u8; 32];
|
||||
//! let mut h3 = [0u8; 32];
|
||||
//! h1[0] = 0x11;
|
||||
//! h2[0] = 0x22;
|
||||
//! h3[0] = 0x33;
|
||||
//!
|
||||
//! let t: MerkleTree<[u8; 32], ExampleAlgorithm> = MerkleTree::from_iter(vec![h1, h2, h3]);
|
||||
//! println!("{:?}", t.root());
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
#![deny(
|
||||
missing_docs,
|
||||
unused_qualifications,
|
||||
missing_debug_implementations,
|
||||
missing_copy_implementations,
|
||||
trivial_casts,
|
||||
trivial_numeric_casts,
|
||||
unsafe_code,
|
||||
unstable_features,
|
||||
unused_import_braces
|
||||
)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![cfg_attr(feature = "nightly", allow(unstable_features))]
|
||||
|
||||
/// Hash infrastructure for items in Merkle tree.
|
||||
pub mod hash;
|
||||
|
||||
/// Common implementations for [`Hashable`].
|
||||
mod hash_impl;
|
||||
|
||||
/// Merkle tree inclusion proof
|
||||
pub mod proof;
|
||||
|
||||
/// Merkle tree abstractions, implementation and algorithms.
|
||||
pub mod merkle;
|
||||
|
||||
/// Tests data.
|
||||
#[cfg(test)]
|
||||
mod test_item;
|
||||
|
||||
/// Tests for Merkle Hasher Customization
|
||||
#[cfg(test)]
|
||||
mod test_cmh;
|
305
common/merkle_light/src/merkle.rs
Normal file
305
common/merkle_light/src/merkle.rs
Normal file
@ -0,0 +1,305 @@
|
||||
extern crate alloc;
|
||||
|
||||
use crate::hash::{Algorithm, Hashable};
|
||||
use crate::proof::Proof;
|
||||
use alloc::vec::Vec;
|
||||
use core::iter::FromIterator;
|
||||
use core::marker::PhantomData;
|
||||
use core::ops;
|
||||
use rayon::prelude::*;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// Merkle Tree.
|
||||
///
|
||||
/// All leafs and nodes are stored in a linear array (vec).
|
||||
///
|
||||
/// A merkle tree is a tree in which every non-leaf node is the hash of its
|
||||
/// children nodes. A diagram depicting how it works:
|
||||
///
|
||||
/// ```text
|
||||
/// root = h1234 = h(h12 + h34)
|
||||
/// / \
|
||||
/// h12 = h(h1 + h2) h34 = h(h3 + h4)
|
||||
/// / \ / \
|
||||
/// h1 = h(tx1) h2 = h(tx2) h3 = h(tx3) h4 = h(tx4)
|
||||
/// ```
|
||||
///
|
||||
/// In memory layout:
|
||||
///
|
||||
/// ```text
|
||||
/// [h1 h2 h3 h4 h12 h34 root]
|
||||
/// ```
|
||||
///
|
||||
/// Merkle root is always the last element in the array.
|
||||
///
|
||||
/// The number of inputs is not always a power of two which results in a
|
||||
/// balanced tree structure as above. In that case, parent nodes with no
|
||||
/// children are also zero and parent nodes with only a single left node
|
||||
/// are calculated by concatenating the left node with itself before hashing.
|
||||
/// Since this function uses nodes that are pointers to the hashes, empty nodes
|
||||
/// will be nil.
|
||||
///
|
||||
/// TODO: Ord
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct MerkleTree<T: Ord + Clone + AsRef<[u8]> + Sync + Send, A: Algorithm<T>> {
|
||||
data: Vec<T>,
|
||||
leafs: usize,
|
||||
height: usize,
|
||||
link_map: BTreeMap<usize, usize>,
|
||||
_a: PhantomData<A>,
|
||||
}
|
||||
|
||||
impl<T: Ord + Clone + Debug + Default + AsRef<[u8]> + Sync + Send, A: Algorithm<T>>
|
||||
MerkleTree<T, A>
|
||||
{
|
||||
/// Creates new merkle from a sequence of hashes.
|
||||
pub fn new<I: IntoIterator<Item = T>>(data: I) -> MerkleTree<T, A> {
|
||||
Self::from_iter(data)
|
||||
}
|
||||
|
||||
/// Creates new merkle tree from a list of hashable objects.
|
||||
pub fn from_data<O: Hashable<A>, I: IntoIterator<Item = O>>(data: I) -> MerkleTree<T, A> {
|
||||
let mut a = A::default();
|
||||
Self::from_iter(data.into_iter().map(|x| {
|
||||
a.reset();
|
||||
x.hash(&mut a);
|
||||
a.hash()
|
||||
}))
|
||||
}
|
||||
|
||||
fn build(&mut self) {
|
||||
let mut width = self.leafs;
|
||||
|
||||
// build tree
|
||||
let mut layer_start: usize = 0;
|
||||
let mut layer_end: usize = width;
|
||||
while width > 1 {
|
||||
// if there is odd num of elements, fill in a NULL.
|
||||
if width & 1 == 1 {
|
||||
self.data.push(Self::null_node());
|
||||
width += 1;
|
||||
layer_end += 1;
|
||||
}
|
||||
|
||||
let layer: Vec<_> = (layer_start..layer_end)
|
||||
.into_par_iter()
|
||||
.step_by(2)
|
||||
.map(|i| {
|
||||
let mut a = A::default();
|
||||
// If the right child is not NULL, the left child is ensured to be not NULL.
|
||||
let mut link_map_update = None;
|
||||
let h = if self.data[i + 1] != Self::null_node() {
|
||||
a.node(self.data[i].clone(), self.data[i + 1].clone())
|
||||
} else {
|
||||
// If a child is NULL, the parent should be a linking node to the actual node hash.
|
||||
let parent_index = (i - layer_start) / 2 + layer_end;
|
||||
if self.data[i] == Self::null_node() {
|
||||
// If both are NULL, the left child must be a linking node.
|
||||
let linked_to = *self.link_map.get(&i).unwrap();
|
||||
link_map_update = Some((parent_index, linked_to, Some(i)));
|
||||
Self::null_node()
|
||||
} else {
|
||||
match self.link_map.get(&(i + 1)) {
|
||||
// Right child is linked to a hash, so we just compute the parent hash.
|
||||
Some(index) => {
|
||||
assert_ne!(self.data[*index], Self::null_node());
|
||||
a.node(self.data[i].clone(), self.data[*index].clone())
|
||||
}
|
||||
// Right child is NULL, so link the parent to the left child which has a hash stored.
|
||||
None => {
|
||||
link_map_update = Some((parent_index, i, None));
|
||||
Self::null_node()
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
(h, link_map_update)
|
||||
})
|
||||
.collect();
|
||||
for (node, maybe_link_map_update) in layer {
|
||||
self.data.push(node);
|
||||
if let Some((from, to, maybe_remove)) = maybe_link_map_update {
|
||||
self.link_map.insert(from, to);
|
||||
if let Some(remove) = maybe_remove {
|
||||
self.link_map.remove(&remove);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
layer_start = layer_end;
|
||||
width >>= 1;
|
||||
layer_end += width;
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate merkle tree inclusion proof for leaf `i`
|
||||
pub fn gen_proof(&self, i: usize) -> Proof<T> {
|
||||
if self.leafs == 1 {
|
||||
assert_eq!(i, 0);
|
||||
return Proof::new(vec![self.root()], vec![]);
|
||||
}
|
||||
|
||||
assert!(i < self.leafs); // i in [0 .. self.leafs)
|
||||
|
||||
let mut lemma: Vec<T> = Vec::with_capacity(self.height + 1); // path + root
|
||||
let mut path: Vec<bool> = Vec::with_capacity(self.height - 1); // path - 1
|
||||
|
||||
let mut base = 0;
|
||||
let mut j = i;
|
||||
|
||||
// level 1 width
|
||||
let mut width = self.leafs;
|
||||
if width & 1 == 1 {
|
||||
width += 1;
|
||||
}
|
||||
|
||||
lemma.push(self.data[j].clone());
|
||||
while base + 1 < self.len() {
|
||||
let proof_hash_index = if j & 1 == 0 {
|
||||
// j is left
|
||||
let right_index = base + j + 1;
|
||||
if self.data[right_index] == Self::null_node() {
|
||||
match self.link_map.get(&right_index) {
|
||||
// A link node, so the proof uses the linked hash.
|
||||
Some(index) => {
|
||||
assert_ne!(self.data[*index], Self::null_node());
|
||||
Some(*index)
|
||||
}
|
||||
// A NULL node, just skip.
|
||||
None => None,
|
||||
}
|
||||
} else {
|
||||
Some(right_index)
|
||||
}
|
||||
} else {
|
||||
// j is right
|
||||
Some(base + j - 1)
|
||||
};
|
||||
if let Some(index) = proof_hash_index {
|
||||
lemma.push(self.data[index].clone());
|
||||
path.push(j & 1 == 0);
|
||||
}
|
||||
|
||||
base += width;
|
||||
width >>= 1;
|
||||
if width & 1 == 1 {
|
||||
width += 1;
|
||||
}
|
||||
j >>= 1;
|
||||
}
|
||||
|
||||
// root is final
|
||||
lemma.push(self.root());
|
||||
Proof::new(lemma, path)
|
||||
}
|
||||
|
||||
/// Returns merkle root
|
||||
pub fn root(&self) -> T {
|
||||
self.data[self.data.len() - 1].clone()
|
||||
}
|
||||
|
||||
/// Returns number of elements in the tree.
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Returns `true` if the vector contains no elements.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
/// Returns height of the tree
|
||||
pub fn height(&self) -> usize {
|
||||
self.height
|
||||
}
|
||||
|
||||
/// Returns original number of elements the tree was built upon.
|
||||
pub fn leafs(&self) -> usize {
|
||||
self.leafs
|
||||
}
|
||||
|
||||
/// Extracts a slice containing the entire vector.
|
||||
///
|
||||
/// Equivalent to `&s[..]`.
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
self
|
||||
}
|
||||
|
||||
fn null_node() -> T {
|
||||
T::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Ord + Clone + Debug + Default + AsRef<[u8]> + Sync + Send, A: Algorithm<T>> FromIterator<T>
|
||||
for MerkleTree<T, A>
|
||||
{
|
||||
/// Creates new merkle tree from an iterator over hashable objects.
|
||||
fn from_iter<I: IntoIterator<Item = T>>(into: I) -> Self {
|
||||
let iter = into.into_iter();
|
||||
let mut data: Vec<T> = match iter.size_hint().1 {
|
||||
Some(e) => {
|
||||
let pow = next_pow2(e);
|
||||
let size = 2 * pow - 1;
|
||||
Vec::with_capacity(size)
|
||||
}
|
||||
None => Vec::new(),
|
||||
};
|
||||
|
||||
// leafs
|
||||
let mut a = A::default();
|
||||
for item in iter {
|
||||
a.reset();
|
||||
data.push(a.leaf(item));
|
||||
}
|
||||
|
||||
let leafs = data.len();
|
||||
let pow = next_pow2(leafs);
|
||||
let size = 2 * pow - 1;
|
||||
|
||||
// assert!(leafs > 1);
|
||||
|
||||
let mut mt: MerkleTree<T, A> = MerkleTree {
|
||||
data,
|
||||
leafs,
|
||||
height: log2_pow2(size + 1),
|
||||
link_map: Default::default(),
|
||||
_a: PhantomData,
|
||||
};
|
||||
|
||||
mt.build();
|
||||
mt
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Ord + Clone + AsRef<[u8]> + Sync + Send, A: Algorithm<T>> ops::Deref for MerkleTree<T, A> {
|
||||
type Target = [T];
|
||||
|
||||
fn deref(&self) -> &[T] {
|
||||
self.data.deref()
|
||||
}
|
||||
}
|
||||
|
||||
/// `next_pow2` returns next highest power of two from a given number if
|
||||
/// it is not already a power of two.
|
||||
///
|
||||
/// [](http://locklessinc.com/articles/next_pow2/)
|
||||
/// [](https://stackoverflow.com/questions/466204/rounding-up-to-next-power-of-2/466242#466242)
|
||||
pub fn next_pow2(mut n: usize) -> usize {
|
||||
n -= 1;
|
||||
n |= n >> 1;
|
||||
n |= n >> 2;
|
||||
n |= n >> 4;
|
||||
n |= n >> 8;
|
||||
n |= n >> 16;
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
{
|
||||
n |= n >> 32;
|
||||
}
|
||||
n + 1
|
||||
}
|
||||
|
||||
/// find power of 2 of a number which is power of 2
|
||||
pub fn log2_pow2(n: usize) -> usize {
|
||||
n.trailing_zeros() as usize
|
||||
}
|
80
common/merkle_light/src/proof.rs
Normal file
80
common/merkle_light/src/proof.rs
Normal file
@ -0,0 +1,80 @@
|
||||
extern crate alloc;
|
||||
|
||||
use crate::hash::Algorithm;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
/// Merkle tree inclusion proof for data element, for which item = Leaf(Hash(Data Item)).
|
||||
///
|
||||
/// Lemma layout:
|
||||
///
|
||||
/// ```text
|
||||
/// [ item h1x h2y h3z ... root ]
|
||||
/// ```
|
||||
///
|
||||
/// Proof validation is positioned hash against lemma path to match root hash.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Proof<T: Eq + Clone + AsRef<[u8]>> {
|
||||
lemma: Vec<T>,
|
||||
path: Vec<bool>,
|
||||
}
|
||||
|
||||
impl<T: Eq + Clone + AsRef<[u8]>> Proof<T> {
|
||||
/// Creates new MT inclusion proof
|
||||
pub fn new(hash: Vec<T>, path: Vec<bool>) -> Proof<T> {
|
||||
if hash.len() > 2 {
|
||||
assert_eq!(hash.len() - 2, path.len());
|
||||
} else {
|
||||
assert_eq!(hash.len(), 1);
|
||||
assert_eq!(path, vec![]);
|
||||
}
|
||||
Proof { lemma: hash, path }
|
||||
}
|
||||
|
||||
/// Return proof target leaf
|
||||
pub fn item(&self) -> T {
|
||||
self.lemma.first().unwrap().clone()
|
||||
}
|
||||
|
||||
/// Return tree root
|
||||
pub fn root(&self) -> T {
|
||||
self.lemma.last().unwrap().clone()
|
||||
}
|
||||
|
||||
/// Verifies MT inclusion proof
|
||||
pub fn validate<A: Algorithm<T>>(&self) -> bool {
|
||||
let size = self.lemma.len();
|
||||
|
||||
// Special case for a single node.
|
||||
if size == 1 && self.path == vec![] {
|
||||
return true;
|
||||
}
|
||||
|
||||
if size < 2 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut h = self.item();
|
||||
let mut a = A::default();
|
||||
|
||||
for i in 1..size - 1 {
|
||||
a.reset();
|
||||
h = if self.path[i - 1] {
|
||||
a.node(h, self.lemma[i].clone())
|
||||
} else {
|
||||
a.node(self.lemma[i].clone(), h)
|
||||
};
|
||||
}
|
||||
|
||||
h == self.root()
|
||||
}
|
||||
|
||||
/// Returns the path of this proof.
|
||||
pub fn path(&self) -> &[bool] {
|
||||
&self.path
|
||||
}
|
||||
|
||||
/// Returns the lemma of this proof.
|
||||
pub fn lemma(&self) -> &[T] {
|
||||
&self.lemma
|
||||
}
|
||||
}
|
76
common/merkle_light/src/test_cmh.rs
Normal file
76
common/merkle_light/src/test_cmh.rs
Normal file
@ -0,0 +1,76 @@
|
||||
#![cfg(test)]
|
||||
|
||||
use crate::hash::{Algorithm, Hashable};
|
||||
use crate::merkle::MerkleTree;
|
||||
use crate::test_item::Item;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::Hasher;
|
||||
use std::iter::FromIterator;
|
||||
|
||||
/// Custom merkle hash util test
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct CMH(DefaultHasher);
|
||||
|
||||
impl CMH {
|
||||
pub fn new() -> CMH {
|
||||
CMH(DefaultHasher::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for CMH {
|
||||
#[inline]
|
||||
fn write(&mut self, msg: &[u8]) {
|
||||
self.0.write(msg)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finish(&self) -> u64 {
|
||||
self.0.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Algorithm<Item> for CMH {
|
||||
#[inline]
|
||||
fn hash(&mut self) -> Item {
|
||||
Item(self.finish())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
*self = CMH::default()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn leaf(&mut self, leaf: Item) -> Item {
|
||||
Item(leaf.0 & 0xff)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn node(&mut self, left: Item, right: Item) -> Item {
|
||||
self.write(&[1u8]);
|
||||
self.write(left.as_ref());
|
||||
self.write(&[2u8]);
|
||||
self.write(right.as_ref());
|
||||
Item(self.hash().0 & 0xffff)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_merkle_hasher() {
|
||||
let mut a = CMH::new();
|
||||
let mt: MerkleTree<Item, CMH> = MerkleTree::from_iter([1, 2, 3, 4, 5].iter().map(|x| {
|
||||
a.reset();
|
||||
x.hash(&mut a);
|
||||
a.hash()
|
||||
}));
|
||||
|
||||
assert_eq!(
|
||||
mt.as_slice()
|
||||
.iter()
|
||||
.take(mt.leafs())
|
||||
.filter(|&&x| x.0 > 255)
|
||||
.count(),
|
||||
0
|
||||
);
|
||||
assert_eq!(mt.as_slice().iter().filter(|&&x| x.0 > 65535).count(), 0);
|
||||
}
|
39
common/merkle_light/src/test_item.rs
Normal file
39
common/merkle_light/src/test_item.rs
Normal file
@ -0,0 +1,39 @@
|
||||
#![cfg(test)]
|
||||
#![allow(unsafe_code)]
|
||||
|
||||
use crate::hash::{Algorithm, Hashable};
|
||||
use std::mem;
|
||||
use std::slice;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default, Debug)]
|
||||
pub struct Item(pub u64);
|
||||
|
||||
impl AsRef<[u8]> for Item {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
unsafe { slice::from_raw_parts(mem::transmute(&self.0), 8) }
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<u64> for Item {
|
||||
fn eq(&self, other: &u64) -> bool {
|
||||
self.0 == *other
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for Item {
|
||||
fn from(x: u64) -> Self {
|
||||
Item(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Item> for u64 {
|
||||
fn from(val: Item) -> Self {
|
||||
val.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Algorithm<Item>> Hashable<A> for Item {
|
||||
fn hash(&self, state: &mut A) {
|
||||
state.write_u64(self.0)
|
||||
}
|
||||
}
|
13
common/merkle_tree/Cargo.toml
Normal file
13
common/merkle_tree/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "merkle_tree"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
merkle_light = { path = "../../common/merkle_light" }
|
||||
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.3"
|
88
common/merkle_tree/src/lib.rs
Normal file
88
common/merkle_tree/src/lib.rs
Normal file
@ -0,0 +1,88 @@
|
||||
use merkle_light::hash::Algorithm;
|
||||
use std::hash::Hasher;
|
||||
use tiny_keccak::{Hasher as KeccakHasher, Keccak};
|
||||
|
||||
// TODO: Option here is only used for compatibility with `tiny_keccak` and `merkle_light`.
|
||||
#[derive(Clone)]
|
||||
pub struct RawLeafSha3Algorithm(Option<Keccak>);
|
||||
|
||||
impl RawLeafSha3Algorithm {
|
||||
fn new() -> RawLeafSha3Algorithm {
|
||||
RawLeafSha3Algorithm(Some(Keccak::v256()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RawLeafSha3Algorithm {
|
||||
fn default() -> RawLeafSha3Algorithm {
|
||||
RawLeafSha3Algorithm::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for RawLeafSha3Algorithm {
|
||||
#[inline]
|
||||
fn write(&mut self, msg: &[u8]) {
|
||||
self.0.as_mut().unwrap().update(msg)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finish(&self) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
pub type CryptoSHA256Hash = [u8; 32];
|
||||
|
||||
impl Algorithm<CryptoSHA256Hash> for RawLeafSha3Algorithm {
|
||||
#[inline]
|
||||
fn hash(&mut self) -> CryptoSHA256Hash {
|
||||
let mut h = [0u8; 32];
|
||||
self.0.take().unwrap().finalize(&mut h);
|
||||
h
|
||||
}
|
||||
|
||||
fn leaf(&mut self, leaf: CryptoSHA256Hash) -> CryptoSHA256Hash {
|
||||
// Leave the leaf node untouched so we can save the subtree root
|
||||
// just as the leaf node for the top tree.
|
||||
// `LEAF` is prepended for `Chunk` hash computation.
|
||||
leaf
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn node(&mut self, left: CryptoSHA256Hash, right: CryptoSHA256Hash) -> CryptoSHA256Hash {
|
||||
self.write(left.as_ref());
|
||||
self.write(right.as_ref());
|
||||
self.hash()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::RawLeafSha3Algorithm;
|
||||
use merkle_light::{hash::Algorithm, merkle::MerkleTree};
|
||||
use std::hash::Hasher;
|
||||
|
||||
#[test]
|
||||
fn test_root() {
|
||||
let results = [
|
||||
[
|
||||
86, 124, 71, 168, 121, 121, 77, 212, 137, 162, 16, 222, 193, 125, 49, 204, 89, 25,
|
||||
188, 66, 125, 19, 141, 113, 106, 129, 7, 224, 37, 226, 219, 203,
|
||||
],
|
||||
[
|
||||
41, 66, 83, 171, 49, 203, 249, 13, 187, 190, 247, 85, 167, 95, 241, 96, 29, 167,
|
||||
144, 227, 92, 54, 95, 83, 14, 124, 26, 28, 169, 4, 220, 248,
|
||||
],
|
||||
];
|
||||
for (test_index, n_chunk) in [6, 7].into_iter().enumerate() {
|
||||
let mut data = Vec::with_capacity(n_chunk);
|
||||
for _ in 0..n_chunk {
|
||||
let mut a = RawLeafSha3Algorithm::default();
|
||||
a.write(&[0; 256]);
|
||||
data.push(a.hash());
|
||||
}
|
||||
let mt = MerkleTree::<_, RawLeafSha3Algorithm>::new(data);
|
||||
println!("{:?} {}", mt.root(), hex::encode(mt.root()));
|
||||
assert_eq!(results[test_index], mt.root());
|
||||
}
|
||||
}
|
||||
}
|
6
common/spec/Cargo.toml
Normal file
6
common/spec/Cargo.toml
Normal file
@ -0,0 +1,6 @@
|
||||
[package]
|
||||
name = "zgs_spec"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
18
common/spec/src/lib.rs
Normal file
18
common/spec/src/lib.rs
Normal file
@ -0,0 +1,18 @@
|
||||
pub const KB: usize = 1024;
|
||||
pub const MB: usize = 1024 * KB;
|
||||
pub const GB: usize = 1024 * MB;
|
||||
pub const TB: usize = 1024 * GB;
|
||||
|
||||
pub const BYTES_PER_SECTOR: usize = 256;
|
||||
pub const BYTES_PER_SEAL: usize = 4 * KB;
|
||||
pub const BYTES_PER_SCRATCHPAD: usize = 64 * KB;
|
||||
pub const BYTES_PER_LOAD: usize = 256 * KB;
|
||||
pub const BYTES_PER_PRICING: usize = 8 * GB;
|
||||
pub const BYTES_PER_MAX_MINING_RANGE: usize = 8 * TB;
|
||||
|
||||
pub const SECTORS_PER_LOAD: usize = BYTES_PER_LOAD / BYTES_PER_SECTOR;
|
||||
pub const SECTORS_PER_SEAL: usize = BYTES_PER_SEAL / BYTES_PER_SECTOR;
|
||||
pub const SECTORS_PER_PRICING: usize = BYTES_PER_PRICING / BYTES_PER_SECTOR;
|
||||
pub const SECTORS_PER_MAX_MINING_RANGE: usize = BYTES_PER_MAX_MINING_RANGE / BYTES_PER_SECTOR;
|
||||
|
||||
pub const SEALS_PER_LOAD: usize = BYTES_PER_LOAD / BYTES_PER_SEAL;
|
13
common/task_executor/Cargo.toml
Normal file
13
common/task_executor/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "task_executor"
|
||||
version = "0.1.0"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
exit-future = "0.2.0"
|
||||
futures = "0.3.21"
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
||||
tokio = { version = "1.19.2", features = ["rt"] }
|
||||
tracing = "0.1.35"
|
369
common/task_executor/src/lib.rs
Normal file
369
common/task_executor/src/lib.rs
Normal file
@ -0,0 +1,369 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
mod metrics;
|
||||
pub mod test_utils;
|
||||
|
||||
use futures::channel::mpsc::Sender;
|
||||
use futures::prelude::*;
|
||||
use std::sync::Weak;
|
||||
use tokio::runtime::{Handle, Runtime};
|
||||
|
||||
pub use tokio::task::JoinHandle;
|
||||
|
||||
/// Provides a reason when node is shut down.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub enum ShutdownReason {
|
||||
/// The node shut down successfully.
|
||||
Success(&'static str),
|
||||
/// The node shut down due to an error condition.
|
||||
Failure(&'static str),
|
||||
}
|
||||
|
||||
impl ShutdownReason {
|
||||
pub fn message(&self) -> &'static str {
|
||||
match self {
|
||||
ShutdownReason::Success(msg) => msg,
|
||||
ShutdownReason::Failure(msg) => msg,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides a `Handle` by either:
|
||||
///
|
||||
/// 1. Holding a `Weak<Runtime>` and calling `Runtime::handle`.
|
||||
/// 2. Directly holding a `Handle` and cloning it.
|
||||
///
|
||||
/// This enum allows the `TaskExecutor` to work in production where a `Weak<Runtime>` is directly
|
||||
/// accessible and in testing where the `Runtime` is hidden outside our scope.
|
||||
#[derive(Clone)]
|
||||
pub enum HandleProvider {
|
||||
Runtime(Weak<Runtime>),
|
||||
Handle(Handle),
|
||||
}
|
||||
|
||||
impl From<Handle> for HandleProvider {
|
||||
fn from(handle: Handle) -> Self {
|
||||
HandleProvider::Handle(handle)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Weak<Runtime>> for HandleProvider {
|
||||
fn from(weak_runtime: Weak<Runtime>) -> Self {
|
||||
HandleProvider::Runtime(weak_runtime)
|
||||
}
|
||||
}
|
||||
|
||||
impl HandleProvider {
|
||||
/// Returns a `Handle` to a `Runtime`.
|
||||
///
|
||||
/// May return `None` if the weak reference to the `Runtime` has been dropped (this generally
|
||||
/// means Lighthouse is shutting down).
|
||||
pub fn handle(&self) -> Option<Handle> {
|
||||
match self {
|
||||
HandleProvider::Runtime(weak_runtime) => weak_runtime
|
||||
.upgrade()
|
||||
.map(|runtime| runtime.handle().clone()),
|
||||
HandleProvider::Handle(handle) => Some(handle.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper over a runtime handle which can spawn async and blocking tasks.
|
||||
#[derive(Clone)]
|
||||
pub struct TaskExecutor {
|
||||
/// The handle to the runtime on which tasks are spawned
|
||||
handle_provider: HandleProvider,
|
||||
/// The receiver exit future which on receiving shuts down the task
|
||||
exit: exit_future::Exit,
|
||||
/// Sender given to tasks, so that if they encounter a state in which execution cannot
|
||||
/// continue they can request that everything shuts down.
|
||||
///
|
||||
/// The task must provide a reason for shutting down.
|
||||
signal_tx: Sender<ShutdownReason>,
|
||||
}
|
||||
|
||||
impl TaskExecutor {
|
||||
/// Create a new task executor.
|
||||
///
|
||||
/// ## Note
|
||||
///
|
||||
/// This function should only be used during testing. In production, prefer to obtain an
|
||||
/// instance of `Self` via a `environment::RuntimeContext` (see the `lighthouse/environment`
|
||||
/// crate).
|
||||
pub fn new<T: Into<HandleProvider>>(
|
||||
handle: T,
|
||||
exit: exit_future::Exit,
|
||||
signal_tx: Sender<ShutdownReason>,
|
||||
) -> Self {
|
||||
Self {
|
||||
handle_provider: handle.into(),
|
||||
exit,
|
||||
signal_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Clones the task executor adding a service name.
|
||||
pub fn clone_with_name(&self) -> Self {
|
||||
TaskExecutor {
|
||||
handle_provider: self.handle_provider.clone(),
|
||||
exit: self.exit.clone(),
|
||||
signal_tx: self.signal_tx.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// A convenience wrapper for `Self::spawn` which ignores a `Result` as long as both `Ok`/`Err`
|
||||
/// are of type `()`.
|
||||
///
|
||||
/// The purpose of this function is to create a compile error if some function which previously
|
||||
/// returned `()` starts returning something else. Such a case may otherwise result in
|
||||
/// accidental error suppression.
|
||||
pub fn spawn_ignoring_error(
|
||||
&self,
|
||||
task: impl Future<Output = Result<(), ()>> + Send + 'static,
|
||||
name: &'static str,
|
||||
) {
|
||||
self.spawn(task.map(|_| ()), name)
|
||||
}
|
||||
|
||||
/// Spawn a task to monitor the completion of another task.
|
||||
///
|
||||
/// If the other task exits by panicking, then the monitor task will shut down the executor.
|
||||
fn spawn_monitor<R: Send>(
|
||||
&self,
|
||||
task_handle: impl Future<Output = Result<R, tokio::task::JoinError>> + Send + 'static,
|
||||
name: &'static str,
|
||||
) {
|
||||
let mut shutdown_sender = self.shutdown_sender();
|
||||
|
||||
if let Some(handle) = self.handle() {
|
||||
handle.spawn(async move {
|
||||
let timer = metrics::start_timer_vec(&metrics::TASKS_HISTOGRAM, &[name]);
|
||||
if let Err(join_error) = task_handle.await {
|
||||
if let Ok(panic) = join_error.try_into_panic() {
|
||||
let message = panic.downcast_ref::<&str>().unwrap_or(&"<none>");
|
||||
|
||||
error!(
|
||||
task_name = name,
|
||||
message = message,
|
||||
advice = "Please check above for a backtrace and notify \
|
||||
the developers",
|
||||
"Task panic. This is a bug!",
|
||||
);
|
||||
let _ = shutdown_sender
|
||||
.try_send(ShutdownReason::Failure("Panic (fatal error)"));
|
||||
}
|
||||
}
|
||||
drop(timer);
|
||||
});
|
||||
} else {
|
||||
debug!("Couldn't spawn monitor task. Runtime shutting down")
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a future on the tokio runtime.
|
||||
///
|
||||
/// The future is wrapped in an `exit_future::Exit`. The task is cancelled when the corresponding
|
||||
/// exit_future `Signal` is fired/dropped.
|
||||
///
|
||||
/// The future is monitored via another spawned future to ensure that it doesn't panic. In case
|
||||
/// of a panic, the executor will be shut down via `self.signal_tx`.
|
||||
///
|
||||
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||
pub fn spawn(&self, task: impl Future<Output = ()> + Send + 'static, name: &'static str) {
|
||||
if let Some(task_handle) = self.spawn_handle(task, name) {
|
||||
self.spawn_monitor(task_handle, name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a future on the tokio runtime. This function does not wrap the task in an `exit_future::Exit`
|
||||
/// like [spawn](#method.spawn).
|
||||
/// The caller of this function is responsible for wrapping up the task with an `exit_future::Exit` to
|
||||
/// ensure that the task gets canceled appropriately.
|
||||
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||
///
|
||||
/// This is useful in cases where the future to be spawned needs to do additional cleanup work when
|
||||
/// the task is completed/canceled (e.g. writing local variables to disk) or the task is created from
|
||||
/// some framework which does its own cleanup (e.g. a hyper server).
|
||||
pub fn spawn_without_exit(
|
||||
&self,
|
||||
task: impl Future<Output = ()> + Send + 'static,
|
||||
name: &'static str,
|
||||
) {
|
||||
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
|
||||
let int_gauge_1 = int_gauge.clone();
|
||||
let future = task.then(move |_| {
|
||||
int_gauge_1.dec();
|
||||
futures::future::ready(())
|
||||
});
|
||||
|
||||
int_gauge.inc();
|
||||
if let Some(handle) = self.handle() {
|
||||
handle.spawn(future);
|
||||
} else {
|
||||
debug!("Couldn't spawn task. Runtime shutting down");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a blocking task on a dedicated tokio thread pool wrapped in an exit future.
|
||||
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||
pub fn spawn_blocking<F>(&self, task: F, name: &'static str)
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
if let Some(task_handle) = self.spawn_blocking_handle(task, name) {
|
||||
self.spawn_monitor(task_handle, name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit` returning an optional
|
||||
/// join handle to the future.
|
||||
/// The task is canceled when the corresponding exit_future `Signal` is fired/dropped.
|
||||
///
|
||||
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||
pub fn spawn_handle<R: Send + 'static>(
|
||||
&self,
|
||||
task: impl Future<Output = R> + Send + 'static,
|
||||
name: &'static str,
|
||||
) -> Option<tokio::task::JoinHandle<Option<R>>> {
|
||||
let exit = self.exit.clone();
|
||||
|
||||
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
|
||||
// Task is shutdown before it completes if `exit` receives
|
||||
let int_gauge_1 = int_gauge.clone();
|
||||
let future = future::select(Box::pin(task), exit).then(move |either| {
|
||||
let result = match either {
|
||||
future::Either::Left((value, _)) => {
|
||||
trace!(task = name, "Async task completed");
|
||||
Some(value)
|
||||
}
|
||||
future::Either::Right(_) => {
|
||||
debug!(task = name, "Async task shutdown, exit received");
|
||||
None
|
||||
}
|
||||
};
|
||||
int_gauge_1.dec();
|
||||
futures::future::ready(result)
|
||||
});
|
||||
|
||||
int_gauge.inc();
|
||||
if let Some(handle) = self.handle() {
|
||||
Some(handle.spawn(future))
|
||||
} else {
|
||||
debug!("Couldn't spawn task. Runtime shutting down");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a blocking task on a dedicated tokio thread pool wrapped in an exit future returning
|
||||
/// a join handle to the future.
|
||||
/// If the runtime doesn't exist, this will return None.
|
||||
/// The Future returned behaves like the standard JoinHandle which can return an error if the
|
||||
/// task failed.
|
||||
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||
pub fn spawn_blocking_handle<F, R>(
|
||||
&self,
|
||||
task: F,
|
||||
name: &'static str,
|
||||
) -> Option<impl Future<Output = Result<R, tokio::task::JoinError>>>
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Send + 'static,
|
||||
{
|
||||
let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]);
|
||||
metrics::inc_gauge_vec(&metrics::BLOCKING_TASKS_COUNT, &[name]);
|
||||
|
||||
let join_handle = if let Some(handle) = self.handle() {
|
||||
handle.spawn_blocking(task)
|
||||
} else {
|
||||
debug!("Couldn't spawn task. Runtime shutting down");
|
||||
return None;
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
let result = match join_handle.await {
|
||||
Ok(result) => {
|
||||
trace!(task = name, "Blocking task completed");
|
||||
Ok(result)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(error = %e, "Blocking task ended unexpectedly");
|
||||
Err(e)
|
||||
}
|
||||
};
|
||||
drop(timer);
|
||||
metrics::dec_gauge_vec(&metrics::BLOCKING_TASKS_COUNT, &[name]);
|
||||
result
|
||||
};
|
||||
|
||||
Some(future)
|
||||
}
|
||||
|
||||
/// Block the current (non-async) thread on the completion of some future.
|
||||
///
|
||||
/// ## Warning
|
||||
///
|
||||
/// This method is "dangerous" since calling it from an async thread will result in a panic! Any
|
||||
/// use of this outside of testing should be very deeply considered as Lighthouse has been
|
||||
/// burned by this function in the past.
|
||||
///
|
||||
/// Determining what is an "async thread" is rather challenging; just because a function isn't
|
||||
/// marked as `async` doesn't mean it's not being called from an `async` function or there isn't
|
||||
/// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to
|
||||
/// @paulhauner if you plan to use this function in production. He has put metrics in here to
|
||||
/// track any use of it, so don't think you can pull a sneaky one on him.
|
||||
pub fn block_on_dangerous<F: Future>(
|
||||
&self,
|
||||
future: F,
|
||||
name: &'static str,
|
||||
) -> Option<F::Output> {
|
||||
let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]);
|
||||
metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]);
|
||||
let handle = self.handle()?;
|
||||
let exit = self.exit.clone();
|
||||
|
||||
debug!(name = "name", "Starting block_on task");
|
||||
|
||||
handle.block_on(async {
|
||||
let output = tokio::select! {
|
||||
output = future => {
|
||||
debug!(
|
||||
name = "name",
|
||||
"Completed block_on task"
|
||||
);
|
||||
Some(output)
|
||||
},
|
||||
_ = exit => {
|
||||
debug!(
|
||||
name = "name",
|
||||
"Cancelled block_on task"
|
||||
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
metrics::dec_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]);
|
||||
drop(timer);
|
||||
output
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a `Handle` to the current runtime.
|
||||
pub fn handle(&self) -> Option<Handle> {
|
||||
self.handle_provider.handle()
|
||||
}
|
||||
|
||||
/// Returns a copy of the `exit_future::Exit`.
|
||||
pub fn exit(&self) -> exit_future::Exit {
|
||||
self.exit.clone()
|
||||
}
|
||||
|
||||
/// Get a channel to request shutting down.
|
||||
pub fn shutdown_sender(&self) -> Sender<ShutdownReason> {
|
||||
self.signal_tx.clone()
|
||||
}
|
||||
}
|
36
common/task_executor/src/metrics.rs
Normal file
36
common/task_executor/src/metrics.rs
Normal file
@ -0,0 +1,36 @@
|
||||
/// Handles async task metrics
|
||||
use lazy_static::lazy_static;
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref ASYNC_TASKS_COUNT: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"async_tasks_count",
|
||||
"Total number of async tasks spawned using spawn",
|
||||
&["async_task_count"]
|
||||
);
|
||||
pub static ref BLOCKING_TASKS_COUNT: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"blocking_tasks_count",
|
||||
"Total number of async tasks spawned using spawn_blocking",
|
||||
&["blocking_task_count"]
|
||||
);
|
||||
pub static ref BLOCKING_TASKS_HISTOGRAM: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"blocking_tasks_histogram",
|
||||
"Time taken by blocking tasks",
|
||||
&["blocking_task_hist"]
|
||||
);
|
||||
pub static ref BLOCK_ON_TASKS_COUNT: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"block_on_tasks_count",
|
||||
"Total number of block_on_dangerous tasks spawned",
|
||||
&["name"]
|
||||
);
|
||||
pub static ref BLOCK_ON_TASKS_HISTOGRAM: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"block_on_tasks_histogram",
|
||||
"Time taken by block_on_dangerous tasks",
|
||||
&["name"]
|
||||
);
|
||||
pub static ref TASKS_HISTOGRAM: Result<HistogramVec> = try_create_histogram_vec(
|
||||
"async_tasks_time_histogram",
|
||||
"Time taken by async tasks",
|
||||
&["async_task_hist"]
|
||||
);
|
||||
}
|
51
common/task_executor/src/test_utils.rs
Normal file
51
common/task_executor/src/test_utils.rs
Normal file
@ -0,0 +1,51 @@
|
||||
use crate::TaskExecutor;
|
||||
use std::sync::Arc;
|
||||
use tokio::runtime;
|
||||
|
||||
/// Whilst the `TestRuntime` is not necessarily useful in itself, it provides the necessary
|
||||
/// components for creating a `TaskExecutor` during tests.
|
||||
///
|
||||
/// May create its own runtime or use an existing one.
|
||||
///
|
||||
/// ## Warning
|
||||
///
|
||||
/// This struct should never be used in production, only testing.
|
||||
pub struct TestRuntime {
|
||||
runtime: Option<Arc<tokio::runtime::Runtime>>,
|
||||
_runtime_shutdown: exit_future::Signal,
|
||||
pub task_executor: TaskExecutor,
|
||||
}
|
||||
|
||||
impl Default for TestRuntime {
|
||||
/// If called *inside* an existing runtime, instantiates `Self` using a handle to that runtime. If
|
||||
/// called *outside* any existing runtime, create a new `Runtime` and keep it alive until the
|
||||
/// `Self` is dropped.
|
||||
fn default() -> Self {
|
||||
let (runtime_shutdown, exit) = exit_future::signal();
|
||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||
|
||||
let runtime = Arc::new(
|
||||
runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
let handle = runtime.handle().clone();
|
||||
|
||||
let task_executor = TaskExecutor::new(handle, exit, shutdown_tx);
|
||||
|
||||
Self {
|
||||
runtime: Some(runtime),
|
||||
_runtime_shutdown: runtime_shutdown,
|
||||
task_executor,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestRuntime {
|
||||
fn drop(&mut self) {
|
||||
if let Some(runtime) = self.runtime.take() {
|
||||
Arc::try_unwrap(runtime).unwrap().shutdown_background()
|
||||
}
|
||||
}
|
||||
}
|
8
common/unused_port/Cargo.toml
Normal file
8
common/unused_port/Cargo.toml
Normal file
@ -0,0 +1,8 @@
|
||||
[package]
|
||||
name = "unused_port"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
55
common/unused_port/src/lib.rs
Normal file
55
common/unused_port/src/lib.rs
Normal file
@ -0,0 +1,55 @@
|
||||
use std::net::{TcpListener, UdpSocket};
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum Transport {
|
||||
Tcp,
|
||||
Udp,
|
||||
}
|
||||
|
||||
/// A convenience function for `unused_port(Transport::Tcp)`.
|
||||
pub fn unused_tcp_port() -> Result<u16, String> {
|
||||
unused_port(Transport::Tcp)
|
||||
}
|
||||
|
||||
/// A convenience function for `unused_port(Transport::Tcp)`.
|
||||
pub fn unused_udp_port() -> Result<u16, String> {
|
||||
unused_port(Transport::Udp)
|
||||
}
|
||||
|
||||
/// A bit of hack to find an unused port.
|
||||
///
|
||||
/// Does not guarantee that the given port is unused after the function exits, just that it was
|
||||
/// unused before the function started (i.e., it does not reserve a port).
|
||||
///
|
||||
/// ## Notes
|
||||
///
|
||||
/// It is possible that users are unable to bind to the ports returned by this function as the OS
|
||||
/// has a buffer period where it doesn't allow binding to the same port even after the socket is
|
||||
/// closed. We might have to use SO_REUSEADDR socket option from `std::net2` crate in that case.
|
||||
pub fn unused_port(transport: Transport) -> Result<u16, String> {
|
||||
let local_addr = match transport {
|
||||
Transport::Tcp => {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| {
|
||||
format!("Failed to create TCP listener to find unused port: {:?}", e)
|
||||
})?;
|
||||
listener.local_addr().map_err(|e| {
|
||||
format!(
|
||||
"Failed to read TCP listener local_addr to find unused port: {:?}",
|
||||
e
|
||||
)
|
||||
})?
|
||||
}
|
||||
Transport::Udp => {
|
||||
let socket = UdpSocket::bind("127.0.0.1:0")
|
||||
.map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?;
|
||||
socket.local_addr().map_err(|e| {
|
||||
format!(
|
||||
"Failed to read UDP socket local_addr to find unused port: {:?}",
|
||||
e
|
||||
)
|
||||
})?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(local_addr.port())
|
||||
}
|
9
common/zgs_seal/Cargo.toml
Normal file
9
common/zgs_seal/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "zgs_seal"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
ethereum-types = "0.14"
|
||||
zgs_spec = {path = "../spec"}
|
||||
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
|
62
common/zgs_seal/src/lib.rs
Normal file
62
common/zgs_seal/src/lib.rs
Normal file
@ -0,0 +1,62 @@
|
||||
use ethereum_types::H256;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
pub fn compute_first_mask_seed(
|
||||
miner_id: &H256,
|
||||
context_digest: &H256,
|
||||
start_sector: u64,
|
||||
) -> [u8; 96] {
|
||||
let mut output = [0u8; 96];
|
||||
output[0..32].copy_from_slice(&miner_id.0);
|
||||
output[32..64].copy_from_slice(&context_digest.0);
|
||||
output[88..96].clone_from_slice(&start_sector.to_be_bytes());
|
||||
output
|
||||
}
|
||||
|
||||
fn keccak(input: impl AsRef<[u8]>) -> [u8; 32] {
|
||||
let mut hasher = Keccak::v256();
|
||||
let mut output = [0u8; 32];
|
||||
hasher.update(input.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
output
|
||||
}
|
||||
|
||||
pub fn seal_with_mask(data: &mut [u8], first_mask: [u8; 32]) {
|
||||
assert!(data.len() % 32 == 0);
|
||||
let mut mask = first_mask;
|
||||
for word in data.chunks_mut(32) {
|
||||
word.iter_mut().zip(mask.iter()).for_each(|(x, y)| *x ^= *y);
|
||||
mask = keccak(&*word);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unseal_with_mask(data: &mut [u8], first_mask: [u8; 32]) {
|
||||
assert!(data.len() % 32 == 0);
|
||||
|
||||
let mut mask = first_mask;
|
||||
data.chunks_exact_mut(32).for_each(|x| {
|
||||
let next_mask = keccak(&*x);
|
||||
x.iter_mut()
|
||||
.zip(mask.iter())
|
||||
.for_each(|(x, mask)| *x ^= *mask);
|
||||
mask = next_mask;
|
||||
})
|
||||
}
|
||||
|
||||
pub fn seal_with_mask_seed(data: &mut [u8], first_mask_seed: impl AsRef<[u8]>) {
|
||||
seal_with_mask(data, keccak(first_mask_seed))
|
||||
}
|
||||
|
||||
pub fn unseal_with_mask_seed(data: &mut [u8], first_mask_seed: impl AsRef<[u8]>) {
|
||||
unseal_with_mask(data, keccak(first_mask_seed))
|
||||
}
|
||||
|
||||
pub fn seal(data: &mut [u8], miner_id: &H256, context_digest: &H256, start_sector: u64) {
|
||||
let first_mask_seed = compute_first_mask_seed(miner_id, context_digest, start_sector);
|
||||
seal_with_mask_seed(data, first_mask_seed)
|
||||
}
|
||||
|
||||
pub fn unseal(data: &mut [u8], miner_id: &H256, context_digest: &H256, start_sector: u64) {
|
||||
let first_mask_seed = compute_first_mask_seed(miner_id, context_digest, start_sector);
|
||||
unseal_with_mask_seed(data, first_mask_seed)
|
||||
}
|
14
common/zgs_version/Cargo.toml
Normal file
14
common/zgs_version/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "zgs_version"
|
||||
version = "0.1.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
git-version = "0.3.5"
|
||||
target_info = "0.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
regex = "1.5.6"
|
47
common/zgs_version/src/lib.rs
Normal file
47
common/zgs_version/src/lib.rs
Normal file
@ -0,0 +1,47 @@
|
||||
use git_version::git_version;
|
||||
use target_info::Target;
|
||||
|
||||
/// Returns the current version of this build of Lighthouse.
|
||||
///
|
||||
/// A plus-sign (`+`) is appended to the git commit if the tree is dirty.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// `Lighthouse/v1.5.1-67da032+`
|
||||
pub const VERSION: &str = git_version!(
|
||||
args = [
|
||||
"--always",
|
||||
"--dirty=+",
|
||||
"--abbrev=7",
|
||||
// NOTE: using --match instead of --exclude for compatibility with old Git
|
||||
"--match=thiswillnevermatchlol"
|
||||
],
|
||||
prefix = "zgs/v0.0.1-",
|
||||
fallback = "unknown"
|
||||
);
|
||||
|
||||
/// Returns `VERSION`, but with platform information appended to the end.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// `zgs/v0.0.1-67da032+/x86_64-linux`
|
||||
pub fn version_with_platform() -> String {
|
||||
format!("{}/{}-{}", VERSION, Target::arch(), Target::os())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use regex::Regex;
|
||||
|
||||
#[test]
|
||||
fn version_formatting() {
|
||||
let re =
|
||||
Regex::new(r"^zgs/v[0-9]+\.[0-9]+\.[0-9]+(-rc.[0-9])?-[[:xdigit:]]{7}\+?$").unwrap();
|
||||
assert!(
|
||||
re.is_match(VERSION),
|
||||
"version doesn't match regex: {}",
|
||||
VERSION
|
||||
);
|
||||
}
|
||||
}
|
70
doc/install.md
Normal file
70
doc/install.md
Normal file
@ -0,0 +1,70 @@
|
||||
# Install
|
||||
ZeroGStorage requires Rust 1.71.0 and Go to build.
|
||||
|
||||
## Install Rust
|
||||
|
||||
We recommend installing Rust through [rustup](https://www.rustup.rs/).
|
||||
|
||||
* Linux
|
||||
|
||||
Install Rust
|
||||
```shell
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
rustup install 1.65.0
|
||||
```
|
||||
|
||||
Other dependencies
|
||||
* Ubuntu
|
||||
```shell
|
||||
sudo apt-get install clang cmake build-essential
|
||||
```
|
||||
|
||||
* Mac
|
||||
|
||||
Install Rust
|
||||
```shell
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
rustup install 1.65.0
|
||||
```
|
||||
|
||||
```shell
|
||||
brew install llvm cmake
|
||||
```
|
||||
|
||||
* Windows
|
||||
Download and run the rustup installer from [this link](https://static.rust-lang.org/rustup/dist/i686-pc-windows-gnu/rustup-init.exe).
|
||||
Install LLVM, pre-built binaries can be downloaded from [this link](https://releases.llvm.org/download.html).
|
||||
|
||||
## Install Go
|
||||
* Linux
|
||||
```shell
|
||||
# Download the Go installer
|
||||
wget https://go.dev/dl/go1.19.3.linux-amd64.tar.gz
|
||||
|
||||
# Extract the archive
|
||||
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.19.3.linux-amd64.tar.gz
|
||||
|
||||
# Add /usr/local/go/bin to the PATH environment variable by adding the following line to your ~/.profile.
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
```
|
||||
|
||||
* Mac
|
||||
|
||||
Download the Go installer from https://go.dev/dl/go1.19.3.darwin-amd64.pkg.
|
||||
Open the package file you downloaded and follow the prompts to install Go.
|
||||
|
||||
* Windows
|
||||
Download the Go installer from https://go.dev/dl/go1.19.3.windows-amd64.msi.
|
||||
Open the MSI file you downloaded and follow the prompts to install Go.
|
||||
|
||||
|
||||
## Build from source
|
||||
```shell
|
||||
# Download code
|
||||
$ git clone https://github.com/zero-gravity-labs/zerog-storage-rust.git
|
||||
$ cd zerog-storage-rust
|
||||
$ git submodule update --init
|
||||
|
||||
# Build in release mode
|
||||
$ cargo build --release
|
||||
```
|
91
doc/run.md
Normal file
91
doc/run.md
Normal file
@ -0,0 +1,91 @@
|
||||
# Run
|
||||
|
||||
## Deploy contract: Token, Flow and Mine contracts
|
||||
|
||||
### Setup Environment
|
||||
|
||||
Install dependencies Node.js, yarn, hardhat.
|
||||
|
||||
* Linux
|
||||
* Ubuntu
|
||||
```shell
|
||||
# node >=12.18
|
||||
sudo apt install npm
|
||||
sudo npm install --global yarn
|
||||
sudo npm install --global hardhat
|
||||
```
|
||||
|
||||
* Mac
|
||||
```shell
|
||||
brew install node
|
||||
sudo npm install --global yarn
|
||||
sudo npm install --global hardhat
|
||||
```
|
||||
|
||||
* Windows
|
||||
Download and install node from [here](https://nodejs.org/en/download/)
|
||||
```shell
|
||||
npm install --global yarn
|
||||
npm install --global hardhat
|
||||
```
|
||||
|
||||
### Download contract source code
|
||||
```shell
|
||||
git clone https://github.com/zero-gravity-labs/zerog-storage-contracts.git
|
||||
cd zerog-storage-contracts
|
||||
```
|
||||
|
||||
Add target network to your hardhat.config.js, i.e.
|
||||
```shell
|
||||
# example
|
||||
networks: {
|
||||
targetNetwork: {
|
||||
url: "******",
|
||||
accounts: [
|
||||
"******",
|
||||
],
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
### Compile
|
||||
```shell
|
||||
yarn
|
||||
yarn compile
|
||||
```
|
||||
|
||||
### Deploy contract
|
||||
```shell
|
||||
npx hardhat run scripts/deploy.ts --network targetnetwork
|
||||
```
|
||||
|
||||
Keep contracts addresses
|
||||
|
||||
## Run ZeroGStorage
|
||||
Update coinfig run/config.toml as required:
|
||||
|
||||
```shell
|
||||
# p2p port
|
||||
network_libp2p_port
|
||||
|
||||
# rpc endpoint
|
||||
rpc_listen_address
|
||||
|
||||
# peer nodes
|
||||
network_libp2p_nodes
|
||||
|
||||
# flow contract address
|
||||
log_contract_address
|
||||
|
||||
# mine contract address
|
||||
mine_contract_address
|
||||
|
||||
# layer one blockchain rpc endpoint
|
||||
blockchain_rpc_endpoint
|
||||
```
|
||||
|
||||
Run node
|
||||
```shell
|
||||
cd run
|
||||
../target/release/zgs_node --config config.toml
|
||||
```
|
1
node/.gitignore
vendored
Normal file
1
node/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/target
|
42
node/Cargo.toml
Normal file
42
node/Cargo.toml
Normal file
@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "zgs_node"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
build = "build.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "=1.0.58", features = ["backtrace"] }
|
||||
clap = { version = "3.2.5", features = ["cargo"] }
|
||||
ctrlc = "3.2.2"
|
||||
error-chain = "0.12.4"
|
||||
ethereum-types = "0.14"
|
||||
exit-future = "0.2.0"
|
||||
futures = "0.3.21"
|
||||
file_location_cache = { path = "file_location_cache" }
|
||||
zgs_version = { path = "../common/zgs_version" }
|
||||
log_entry_sync = { path = "./log_entry_sync" }
|
||||
miner = { path = "./miner" }
|
||||
network = { path = "./network" }
|
||||
router = { path = "./router" }
|
||||
rpc = { path = "./rpc" }
|
||||
shared_types = { path = "./shared_types" }
|
||||
storage = { path = "./storage" }
|
||||
storage-async = { path = "./storage-async" }
|
||||
sync = { path = "./sync" }
|
||||
task_executor = { path = "../common/task_executor" }
|
||||
tokio = { version = "1.19.2", features = ["full"] }
|
||||
tokio-stream = { version = "0.1.9", features = ["sync"] }
|
||||
toml = "0.5.9"
|
||||
tracing = "0.1.35"
|
||||
tracing-subscriber = { version = "0.3.11", features = ["env-filter"] }
|
||||
tracing-appender = { version = "0.2.2" }
|
||||
chunk_pool = { path = "./chunk_pool" }
|
||||
itertools = "0.10.5"
|
||||
serde = { version = "1.0.137", features = ["derive"] }
|
||||
duration-str = "0.5.1"
|
||||
config = "0.13.1"
|
||||
|
||||
[dependencies.libp2p]
|
||||
version = "0.45.1"
|
||||
default-features = true
|
||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
13
node/build.rs
Normal file
13
node/build.rs
Normal file
@ -0,0 +1,13 @@
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=../zerog-storage-client");
|
||||
|
||||
let status = Command::new("go")
|
||||
.current_dir("../zerog-storage-client")
|
||||
.args(vec!["build", "-o", "../target"])
|
||||
.status()
|
||||
.unwrap();
|
||||
|
||||
println!("build zerog-storage-client with status {}", status);
|
||||
}
|
15
node/chunk_pool/Cargo.toml
Normal file
15
node/chunk_pool/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "chunk_pool"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "=1.0.58", features = ["backtrace"] }
|
||||
shared_types = { path = "../shared_types" }
|
||||
storage-async = { path = "../storage-async" }
|
||||
log_entry_sync = { path = "../log_entry_sync" }
|
||||
network = { path = "../network" }
|
||||
tokio = { version = "1.19.2", features = ["sync"] }
|
||||
async-lock = "2.5.0"
|
||||
hashlink = "0.8.0"
|
||||
tracing = "0.1.35"
|
93
node/chunk_pool/src/handler.rs
Normal file
93
node/chunk_pool/src/handler.rs
Normal file
@ -0,0 +1,93 @@
|
||||
use super::mem_pool::MemoryChunkPool;
|
||||
use crate::mem_pool::FileID;
|
||||
use anyhow::Result;
|
||||
use network::NetworkMessage;
|
||||
use shared_types::ChunkArray;
|
||||
use std::sync::Arc;
|
||||
use storage_async::Store;
|
||||
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
|
||||
|
||||
/// Handle the cached file when uploaded completely and verified from blockchain.
|
||||
/// Generally, the file will be persisted into log store.
|
||||
pub struct ChunkPoolHandler {
|
||||
receiver: UnboundedReceiver<FileID>,
|
||||
mem_pool: Arc<MemoryChunkPool>,
|
||||
log_store: Store,
|
||||
sender: UnboundedSender<NetworkMessage>,
|
||||
}
|
||||
|
||||
impl ChunkPoolHandler {
|
||||
pub(crate) fn new(
|
||||
receiver: UnboundedReceiver<FileID>,
|
||||
mem_pool: Arc<MemoryChunkPool>,
|
||||
log_store: Store,
|
||||
sender: UnboundedSender<NetworkMessage>,
|
||||
) -> Self {
|
||||
ChunkPoolHandler {
|
||||
receiver,
|
||||
mem_pool,
|
||||
log_store,
|
||||
sender,
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes memory cached chunks into store and finalize transaction.
|
||||
/// Note, a separate thread should be spawned to call this method.
|
||||
pub async fn handle(&mut self) -> Result<bool> {
|
||||
let id = match self.receiver.recv().await {
|
||||
Some(id) => id,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
debug!(?id, "Received task to finalize transaction");
|
||||
|
||||
// TODO(qhz): remove from memory pool after transaction finalized,
|
||||
// when store support to write chunks with reference.
|
||||
if let Some(file) = self.mem_pool.remove_cached_file(&id.root).await {
|
||||
// If there is still cache of chunks, write them into store
|
||||
let mut segments: Vec<ChunkArray> = file.segments.into_values().collect();
|
||||
while let Some(seg) = segments.pop() {
|
||||
if !self
|
||||
.log_store
|
||||
.put_chunks_with_tx_hash(id.tx_id.seq, id.tx_id.hash, seg)
|
||||
.await?
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self
|
||||
.log_store
|
||||
.finalize_tx_with_hash(id.tx_id.seq, id.tx_id.hash)
|
||||
.await?
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
debug!(?id, "Transaction finalized");
|
||||
|
||||
// always remove file from pool after transaction finalized
|
||||
self.mem_pool.remove_file(&id.root).await;
|
||||
|
||||
let msg = NetworkMessage::AnnounceLocalFile { tx_id: id.tx_id };
|
||||
if let Err(e) = self.sender.send(msg) {
|
||||
error!(
|
||||
"Failed to send NetworkMessage::AnnounceLocalFile message, tx_seq={}, err={}",
|
||||
id.tx_id.seq, e
|
||||
);
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
info!("Worker started to finalize transactions");
|
||||
|
||||
loop {
|
||||
if let Err(e) = self.handle().await {
|
||||
warn!("Failed to write chunks or finalize transaction, {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
38
node/chunk_pool/src/lib.rs
Normal file
38
node/chunk_pool/src/lib.rs
Normal file
@ -0,0 +1,38 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
mod handler;
|
||||
mod mem_pool;
|
||||
|
||||
pub use handler::ChunkPoolHandler;
|
||||
pub use mem_pool::{FileID, MemoryChunkPool, SegmentInfo};
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Config {
|
||||
pub write_window_size: usize,
|
||||
pub max_cached_chunks_all: usize,
|
||||
pub max_writings: usize,
|
||||
pub expiration_time_secs: u64,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn expiration_time(&self) -> Duration {
|
||||
Duration::from_secs(self.expiration_time_secs)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unbounded(
|
||||
config: Config,
|
||||
log_store: storage_async::Store,
|
||||
network_send: tokio::sync::mpsc::UnboundedSender<network::NetworkMessage>,
|
||||
) -> (Arc<MemoryChunkPool>, ChunkPoolHandler) {
|
||||
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel();
|
||||
|
||||
let mem_pool = Arc::new(MemoryChunkPool::new(config, log_store.clone(), sender));
|
||||
let handler = ChunkPoolHandler::new(receiver, mem_pool.clone(), log_store, network_send);
|
||||
|
||||
(mem_pool, handler)
|
||||
}
|
158
node/chunk_pool/src/mem_pool/chunk_cache.rs
Normal file
158
node/chunk_pool/src/mem_pool/chunk_cache.rs
Normal file
@ -0,0 +1,158 @@
|
||||
use super::FileID;
|
||||
use crate::{Config, SegmentInfo};
|
||||
use anyhow::{bail, Result};
|
||||
use hashlink::LinkedHashMap;
|
||||
use shared_types::{bytes_to_chunks, ChunkArray, DataRoot, Transaction, CHUNK_SIZE};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Add;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Used to cache chunks in memory pool and persist into db once log entry
|
||||
/// retrieved from blockchain.
|
||||
pub struct MemoryCachedFile {
|
||||
pub id: FileID,
|
||||
/// Window to control the cache of each file
|
||||
pub segments: HashMap<usize, ChunkArray>,
|
||||
/// Total number of chunks for the cache file, which is updated from log entry.
|
||||
pub total_chunks: usize,
|
||||
/// Used for garbage collection. It is updated when new segment uploaded.
|
||||
expired_at: Instant,
|
||||
/// Number of chunks that's currently cached for this file
|
||||
pub cached_chunk_num: usize,
|
||||
}
|
||||
|
||||
impl MemoryCachedFile {
|
||||
fn new(root: DataRoot, timeout: Duration) -> Self {
|
||||
MemoryCachedFile {
|
||||
id: FileID {
|
||||
root,
|
||||
tx_id: Default::default(),
|
||||
},
|
||||
segments: HashMap::default(),
|
||||
total_chunks: 0,
|
||||
expired_at: Instant::now().add(timeout),
|
||||
cached_chunk_num: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates file with transaction once log entry retrieved from blockchain.
|
||||
/// So that, write memory cached segments into database.
|
||||
pub fn update_with_tx(&mut self, tx: &Transaction) {
|
||||
self.total_chunks = bytes_to_chunks(tx.size as usize);
|
||||
self.id.tx_id = tx.id();
|
||||
}
|
||||
|
||||
fn update_expiration_time(&mut self, timeout: Duration) {
|
||||
self.expired_at = Instant::now().add(timeout);
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn is_completed(&self) -> bool {
|
||||
self.total_chunks > 0 && self.cached_chunk_num >= self.total_chunks
|
||||
}
|
||||
|
||||
fn should_flush(&self) -> bool {
|
||||
self.total_chunks > 0 && self.cached_chunk_num > 0
|
||||
}
|
||||
}
|
||||
|
||||
/// ChunkPoolCache is used to cache small files that log entry not retrieved
|
||||
/// from L1 blockchain yet.
|
||||
pub struct ChunkPoolCache {
|
||||
config: Config,
|
||||
/// All cached files.
|
||||
/// Note, file root is used as key instead of `tx_seq`, since log entry
|
||||
/// not retrieved yet.
|
||||
files: LinkedHashMap<DataRoot, MemoryCachedFile>,
|
||||
/// Total number of chunks that cached in the memory pool.
|
||||
pub total_chunks: usize,
|
||||
}
|
||||
|
||||
impl ChunkPoolCache {
|
||||
pub fn new(config: Config) -> Self {
|
||||
ChunkPoolCache {
|
||||
config,
|
||||
files: LinkedHashMap::default(),
|
||||
total_chunks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_file(&self, root: &DataRoot) -> Option<&MemoryCachedFile> {
|
||||
self.files.get(root)
|
||||
}
|
||||
|
||||
pub fn get_file_mut(&mut self, root: &DataRoot) -> Option<&mut MemoryCachedFile> {
|
||||
self.files.get_mut(root)
|
||||
}
|
||||
|
||||
pub fn remove_file(&mut self, root: &DataRoot) -> Option<MemoryCachedFile> {
|
||||
let file = self.files.remove(root)?;
|
||||
self.update_total_chunks_when_remove_file(&file);
|
||||
Some(file)
|
||||
}
|
||||
|
||||
/// Remove files that no new segment uploaded for a long time.
|
||||
///
|
||||
/// Note, when log sync delayed, files may be also garbage collected if the
|
||||
/// entire file uploaded. Because, it is hard to check if log sync delayed
|
||||
/// or user upload an invalid file, e.g. for attack purpose.
|
||||
///
|
||||
/// Once garbage collected, user could simply upload the entire file again,
|
||||
/// which is fast enough due to small file size.
|
||||
fn garbage_collect(&mut self) {
|
||||
let now = Instant::now();
|
||||
|
||||
while let Some((_, file)) = self.files.front() {
|
||||
if file.expired_at > now {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some((r, f)) = self.files.pop_front() {
|
||||
self.update_total_chunks_when_remove_file(&f);
|
||||
debug!("Garbage collected for file {}", r);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_total_chunks_when_remove_file(&mut self, file: &MemoryCachedFile) {
|
||||
assert!(self.total_chunks >= file.cached_chunk_num);
|
||||
self.total_chunks -= file.cached_chunk_num;
|
||||
}
|
||||
|
||||
/// Caches the specified segment in memory.
|
||||
///
|
||||
/// Returns if there are cached segments and log entry also retrieved.
|
||||
pub fn cache_segment(&mut self, seg_info: SegmentInfo) -> Result<bool> {
|
||||
// always GC at first
|
||||
self.garbage_collect();
|
||||
|
||||
let file = self
|
||||
.files
|
||||
.entry(seg_info.root)
|
||||
.or_insert_with(|| MemoryCachedFile::new(seg_info.root, self.config.expiration_time()));
|
||||
|
||||
// Segment already cached in memory. Directly return OK
|
||||
if file.segments.contains_key(&seg_info.seg_index) {
|
||||
return Ok(file.should_flush());
|
||||
}
|
||||
|
||||
// Otherwise, just cache segment in memory
|
||||
let num_chunks = seg_info.seg_data.len() / CHUNK_SIZE;
|
||||
|
||||
// Limits the cached chunks in the memory pool.
|
||||
if self.total_chunks + num_chunks > self.config.max_cached_chunks_all {
|
||||
bail!(
|
||||
"exceeds the maximum cached chunks of whole pool: {}",
|
||||
self.config.max_cached_chunks_all
|
||||
);
|
||||
}
|
||||
|
||||
// Cache segment and update the counter for cached chunks.
|
||||
self.total_chunks += num_chunks;
|
||||
file.cached_chunk_num += num_chunks;
|
||||
file.update_expiration_time(self.config.expiration_time());
|
||||
file.segments.insert(seg_info.seg_index, seg_info.into());
|
||||
|
||||
Ok(file.should_flush())
|
||||
}
|
||||
}
|
332
node/chunk_pool/src/mem_pool/chunk_pool_inner.rs
Normal file
332
node/chunk_pool/src/mem_pool/chunk_pool_inner.rs
Normal file
@ -0,0 +1,332 @@
|
||||
use super::chunk_cache::{ChunkPoolCache, MemoryCachedFile};
|
||||
use super::chunk_write_control::ChunkPoolWriteCtrl;
|
||||
use super::FileID;
|
||||
use crate::Config;
|
||||
use anyhow::{bail, Result};
|
||||
use async_lock::Mutex;
|
||||
use log_entry_sync::LogSyncEvent;
|
||||
use shared_types::{
|
||||
bytes_to_chunks, compute_segment_size, ChunkArray, DataRoot, Transaction, CHUNK_SIZE,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use storage_async::Store;
|
||||
use tokio::sync::broadcast::{error::RecvError, Receiver};
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
struct Inner {
|
||||
config: Config,
|
||||
segment_cache: ChunkPoolCache,
|
||||
write_control: ChunkPoolWriteCtrl,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
fn new(config: Config) -> Self {
|
||||
Inner {
|
||||
config,
|
||||
segment_cache: ChunkPoolCache::new(config),
|
||||
write_control: ChunkPoolWriteCtrl::new(config),
|
||||
}
|
||||
}
|
||||
|
||||
fn after_flush_cache(&mut self) {
|
||||
assert!(self.write_control.total_writings > 0);
|
||||
self.write_control.total_writings -= 1;
|
||||
}
|
||||
|
||||
/// Return the tx seq and all segments that belong to the root.
|
||||
fn get_all_cached_segments_to_write(
|
||||
&mut self,
|
||||
root: &DataRoot,
|
||||
) -> Result<(FileID, Vec<ChunkArray>)> {
|
||||
// Limits the number of writing threads.
|
||||
if self.write_control.total_writings >= self.config.max_writings {
|
||||
bail!("too many data writing: {}", self.config.max_writings);
|
||||
}
|
||||
|
||||
let file = match self.segment_cache.remove_file(root) {
|
||||
Some(f) => f,
|
||||
None => bail!("file not found to write into store {:?}", root),
|
||||
};
|
||||
let id = file.id;
|
||||
let segs = file.segments.into_values().collect();
|
||||
|
||||
self.write_control.total_writings += 1;
|
||||
|
||||
Ok((id, segs))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SegmentInfo {
|
||||
pub root: DataRoot,
|
||||
pub seg_data: Vec<u8>,
|
||||
pub seg_index: usize,
|
||||
pub chunks_per_segment: usize,
|
||||
}
|
||||
|
||||
impl From<SegmentInfo> for ChunkArray {
|
||||
fn from(seg_info: SegmentInfo) -> Self {
|
||||
let start_index = seg_info.seg_index * seg_info.chunks_per_segment;
|
||||
ChunkArray {
|
||||
data: seg_info.seg_data,
|
||||
start_index: start_index as u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Caches data chunks in memory before the entire file uploaded to storage node
|
||||
/// and data root verified on blockchain.
|
||||
pub struct MemoryChunkPool {
|
||||
inner: Mutex<Inner>,
|
||||
log_store: Store,
|
||||
sender: UnboundedSender<FileID>,
|
||||
}
|
||||
|
||||
impl MemoryChunkPool {
|
||||
pub(crate) fn new(config: Config, log_store: Store, sender: UnboundedSender<FileID>) -> Self {
|
||||
MemoryChunkPool {
|
||||
inner: Mutex::new(Inner::new(config)),
|
||||
log_store,
|
||||
sender,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_segment_size(&self, segment: &Vec<u8>) -> Result<()> {
|
||||
if segment.is_empty() {
|
||||
bail!("data is empty");
|
||||
}
|
||||
|
||||
if segment.len() % CHUNK_SIZE != 0 {
|
||||
bail!("invalid data length");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cache_chunks(&self, seg_info: SegmentInfo) -> Result<()> {
|
||||
let root = seg_info.root;
|
||||
debug!("cache_chunks, root={:?} index={}", root, seg_info.seg_index);
|
||||
let should_flush = self
|
||||
.inner
|
||||
.lock()
|
||||
.await
|
||||
.segment_cache
|
||||
.cache_segment(seg_info)?;
|
||||
|
||||
// store and finalize the cached file if completed
|
||||
if should_flush {
|
||||
debug!("cache_chunk: flush cached chunks");
|
||||
self.write_all_cached_chunks_and_finalize(root).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn write_chunks(
|
||||
&self,
|
||||
seg_info: SegmentInfo,
|
||||
file_id: FileID,
|
||||
file_size: usize,
|
||||
) -> Result<()> {
|
||||
let total_chunks = bytes_to_chunks(file_size);
|
||||
|
||||
debug!(
|
||||
"Begin to write segment, root={}, segment_size={}, segment_index={}",
|
||||
seg_info.root,
|
||||
seg_info.seg_data.len(),
|
||||
seg_info.seg_index,
|
||||
);
|
||||
|
||||
//Write the segment in window
|
||||
let (total_segments, _) = compute_segment_size(total_chunks, seg_info.chunks_per_segment);
|
||||
self.inner.lock().await.write_control.write_segment(
|
||||
file_id,
|
||||
seg_info.seg_index,
|
||||
total_segments,
|
||||
)?;
|
||||
|
||||
// Write memory cached segments into store.
|
||||
// TODO(qhz): error handling
|
||||
// 1. Push the failed segment back to front. (enhance store to return Err(ChunkArray))
|
||||
// 2. Put the incompleted segments back to memory pool.
|
||||
let seg = ChunkArray {
|
||||
data: seg_info.seg_data,
|
||||
start_index: (seg_info.seg_index * seg_info.chunks_per_segment) as u64,
|
||||
};
|
||||
|
||||
match self
|
||||
.log_store
|
||||
.put_chunks_with_tx_hash(file_id.tx_id.seq, file_id.tx_id.hash, seg)
|
||||
.await
|
||||
{
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
self.inner
|
||||
.lock()
|
||||
.await
|
||||
.write_control
|
||||
.on_write_failed(&seg_info.root, seg_info.seg_index);
|
||||
// remove the file if transaction reverted
|
||||
self.inner
|
||||
.lock()
|
||||
.await
|
||||
.write_control
|
||||
.remove_file(&seg_info.root);
|
||||
bail!("Transaction reverted, please upload again");
|
||||
}
|
||||
Err(e) => {
|
||||
self.inner
|
||||
.lock()
|
||||
.await
|
||||
.write_control
|
||||
.on_write_failed(&seg_info.root, seg_info.seg_index);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
let all_uploaded = self
|
||||
.inner
|
||||
.lock()
|
||||
.await
|
||||
.write_control
|
||||
.on_write_succeeded(&seg_info.root, seg_info.seg_index);
|
||||
|
||||
// Notify to finalize transaction asynchronously.
|
||||
if all_uploaded {
|
||||
if let Err(e) = self.sender.send(file_id) {
|
||||
// Channel receiver will not be dropped until program exit.
|
||||
bail!("channel send error: {}", e);
|
||||
}
|
||||
debug!("Queue to finalize transaction for file {}", seg_info.root);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates the cached file info when log entry retrieved from blockchain.
|
||||
pub async fn update_file_info(&self, tx: &Transaction) -> Result<bool> {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Do nothing if file not uploaded yet.
|
||||
let file = match inner.segment_cache.get_file_mut(&tx.data_merkle_root) {
|
||||
Some(f) => f,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
// Update the file info with transaction.
|
||||
file.update_with_tx(tx);
|
||||
|
||||
// File partially uploaded and it's up to user thread
|
||||
// to write chunks into store and finalize transaction.
|
||||
if file.cached_chunk_num < file.total_chunks {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Otherwise, notify to write all memory cached chunks and finalize transaction.
|
||||
let file_id = FileID {
|
||||
root: tx.data_merkle_root,
|
||||
tx_id: tx.id(),
|
||||
};
|
||||
if let Err(e) = self.sender.send(file_id) {
|
||||
// Channel receiver will not be dropped until program exit.
|
||||
bail!("channel send error: {}", e);
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub async fn monitor_log_entry(chunk_pool: Arc<Self>, mut receiver: Receiver<LogSyncEvent>) {
|
||||
info!("Start to monitor log entry");
|
||||
|
||||
loop {
|
||||
match receiver.recv().await {
|
||||
Ok(LogSyncEvent::ReorgDetected { .. }) => {}
|
||||
Ok(LogSyncEvent::Reverted { .. }) => {}
|
||||
Ok(LogSyncEvent::TxSynced { tx }) => {
|
||||
if let Err(_e) = chunk_pool.update_file_info(&tx).await {
|
||||
error!(
|
||||
"Failed to update file info. tx seq={}, tx_root={}",
|
||||
tx.seq, tx.data_merkle_root
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(RecvError::Closed) => {
|
||||
// program terminated
|
||||
info!("Completed to monitor log entry");
|
||||
return;
|
||||
}
|
||||
Err(RecvError::Lagged(lagged)) => {
|
||||
error!(%lagged, "Lagged messages: (Lagged)");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn remove_cached_file(&self, root: &DataRoot) -> Option<MemoryCachedFile> {
|
||||
self.inner.lock().await.segment_cache.remove_file(root)
|
||||
}
|
||||
|
||||
pub(crate) async fn remove_file(&self, root: &DataRoot) -> bool {
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.segment_cache.remove_file(root).is_some()
|
||||
|| inner.write_control.remove_file(root).is_some()
|
||||
}
|
||||
|
||||
pub async fn check_already_has_cache(&self, root: &DataRoot) -> bool {
|
||||
self.inner
|
||||
.lock()
|
||||
.await
|
||||
.segment_cache
|
||||
.get_file(root)
|
||||
.is_some()
|
||||
}
|
||||
|
||||
async fn write_all_cached_chunks_and_finalize(&self, root: DataRoot) -> Result<()> {
|
||||
let (file, mut segments) = self
|
||||
.inner
|
||||
.lock()
|
||||
.await
|
||||
.get_all_cached_segments_to_write(&root)?;
|
||||
|
||||
while let Some(seg) = segments.pop() {
|
||||
// TODO(qhz): error handling
|
||||
// 1. Push the failed segment back to front. (enhance store to return Err(ChunkArray))
|
||||
// 2. Put the incompleted segments back to memory pool.
|
||||
match self
|
||||
.log_store
|
||||
.put_chunks_with_tx_hash(file.tx_id.seq, file.tx_id.hash, seg)
|
||||
.await
|
||||
{
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
self.inner.lock().await.after_flush_cache();
|
||||
bail!("Transaction reverted, please upload again");
|
||||
}
|
||||
Err(e) => {
|
||||
self.inner.lock().await.after_flush_cache();
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.inner.lock().await.after_flush_cache();
|
||||
|
||||
if let Err(e) = self.sender.send(file) {
|
||||
// Channel receiver will not be dropped until program exit.
|
||||
bail!("channel send error: {}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_uploaded_seg_num(&self, root: &DataRoot) -> (usize, bool) {
|
||||
let inner = self.inner.lock().await;
|
||||
|
||||
if let Some(file) = inner.segment_cache.get_file(root) {
|
||||
(file.cached_chunk_num, true)
|
||||
} else if let Some(file) = inner.write_control.get_file(root) {
|
||||
(file.uploaded_seg_num(), false)
|
||||
} else {
|
||||
(0, false)
|
||||
}
|
||||
}
|
||||
}
|
200
node/chunk_pool/src/mem_pool/chunk_write_control.rs
Normal file
200
node/chunk_pool/src/mem_pool/chunk_write_control.rs
Normal file
@ -0,0 +1,200 @@
|
||||
use super::FileID;
|
||||
use crate::Config;
|
||||
use anyhow::{bail, Result};
|
||||
use shared_types::DataRoot;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// The segment status in sliding window
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
enum SlotStatus {
|
||||
Writing, // segment in writing
|
||||
Finished, // segment uploaded in store
|
||||
}
|
||||
|
||||
/// Sliding window is used to control the concurrent uploading process of a file.
|
||||
/// Bounded window allows segments to be uploaded concurrenly, while having a capacity
|
||||
/// limit on writing threads per file. Meanwhile, the left_boundary field records
|
||||
/// how many segments have been uploaded.
|
||||
struct CtrlWindow {
|
||||
size: usize,
|
||||
left_boundary: usize,
|
||||
slots: HashMap<usize, SlotStatus>,
|
||||
}
|
||||
|
||||
impl CtrlWindow {
|
||||
fn new(size: usize) -> Self {
|
||||
CtrlWindow {
|
||||
size,
|
||||
left_boundary: 0,
|
||||
slots: HashMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the specified slot by `index` has been already uploaded.
|
||||
/// Note, this function do not check about the right boundary.
|
||||
fn check_duplicate(&self, index: usize) -> bool {
|
||||
index < self.left_boundary || self.slots.contains_key(&index)
|
||||
}
|
||||
|
||||
/// Should call check_duplicate and handle the duplicated case before calling this function.
|
||||
/// This function assumes that there are no duplicate slots in the window.
|
||||
fn start_writing(&mut self, index: usize) -> Result<()> {
|
||||
assert!(index >= self.left_boundary);
|
||||
|
||||
if index >= self.left_boundary + self.size {
|
||||
bail!(
|
||||
"index exceeds window limit, index = {}, left_boundary = {}, window_size = {}",
|
||||
index,
|
||||
self.left_boundary,
|
||||
self.size
|
||||
);
|
||||
}
|
||||
|
||||
assert!(!self.slots.contains_key(&index));
|
||||
self.slots.insert(index, SlotStatus::Writing);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rollback_writing(&mut self, index: usize) {
|
||||
let slot_status = self.slots.remove(&index);
|
||||
assert_eq!(slot_status, Some(SlotStatus::Writing));
|
||||
}
|
||||
|
||||
fn finish_writing(&mut self, index: usize) {
|
||||
let old_status = self.slots.insert(index, SlotStatus::Finished);
|
||||
assert_eq!(old_status, Some(SlotStatus::Writing));
|
||||
|
||||
// move forward if leftmost slot completed
|
||||
let mut left_boundary = self.left_boundary;
|
||||
while let Some(&SlotStatus::Finished) = self.slots.get(&left_boundary) {
|
||||
self.slots.remove(&left_boundary);
|
||||
left_boundary += 1;
|
||||
}
|
||||
|
||||
self.left_boundary = left_boundary;
|
||||
}
|
||||
}
|
||||
|
||||
/// To track the file uploading progress.
|
||||
pub struct FileWriteCtrl {
|
||||
pub id: FileID,
|
||||
total_segments: usize,
|
||||
window: CtrlWindow,
|
||||
}
|
||||
|
||||
impl FileWriteCtrl {
|
||||
fn new(id: FileID, total_segments: usize, window_size: usize) -> Self {
|
||||
FileWriteCtrl {
|
||||
id,
|
||||
total_segments,
|
||||
window: CtrlWindow::new(window_size),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uploaded_seg_num(&self) -> usize {
|
||||
self.window.left_boundary
|
||||
}
|
||||
}
|
||||
|
||||
/// ChunkPoolWriteCtrl is used to track uploading progress for all files,
|
||||
/// and limits the maximum number of threads to write segments into store.
|
||||
pub struct ChunkPoolWriteCtrl {
|
||||
config: Config,
|
||||
/// Windows to control writing processes of files
|
||||
files: HashMap<DataRoot, FileWriteCtrl>,
|
||||
/// Total number of threads that are writing chunks into store.
|
||||
pub total_writings: usize,
|
||||
}
|
||||
|
||||
impl ChunkPoolWriteCtrl {
|
||||
pub fn new(config: Config) -> Self {
|
||||
ChunkPoolWriteCtrl {
|
||||
files: HashMap::default(),
|
||||
total_writings: 0,
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_file(&self, root: &DataRoot) -> Option<&FileWriteCtrl> {
|
||||
self.files.get(root)
|
||||
}
|
||||
|
||||
pub fn remove_file(&mut self, root: &DataRoot) -> Option<FileWriteCtrl> {
|
||||
self.files.remove(root)
|
||||
}
|
||||
|
||||
pub fn write_segment(
|
||||
&mut self,
|
||||
id: FileID,
|
||||
seg_index: usize,
|
||||
total_segments: usize,
|
||||
) -> Result<()> {
|
||||
let file_ctrl = self.files.entry(id.root).or_insert_with(|| {
|
||||
FileWriteCtrl::new(id, total_segments, self.config.write_window_size)
|
||||
});
|
||||
|
||||
// ensure the tx_id not changed during file uploading
|
||||
if file_ctrl.id != id {
|
||||
self.files.remove(&id.root);
|
||||
bail!("Transaction reverted when uploading segments, please try again");
|
||||
}
|
||||
|
||||
if file_ctrl.total_segments != total_segments {
|
||||
bail!(
|
||||
"file size in segment doesn't match with file size declared in previous segment. Previous total segments:{}, current total segments:{}s",
|
||||
file_ctrl.total_segments,
|
||||
total_segments
|
||||
);
|
||||
}
|
||||
|
||||
// Segment already uploaded.
|
||||
if file_ctrl.window.check_duplicate(seg_index) {
|
||||
bail!("segment has already been uploaded or is being uploaded");
|
||||
}
|
||||
|
||||
// Limits the number of writing threads.
|
||||
if self.total_writings >= self.config.max_writings {
|
||||
bail!("too many data writing: {}", self.config.max_writings);
|
||||
}
|
||||
|
||||
file_ctrl.window.start_writing(seg_index)?;
|
||||
|
||||
self.total_writings += 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn on_write_succeeded(&mut self, root: &DataRoot, seg_index: usize) -> bool {
|
||||
let file_ctrl = match self.files.get_mut(root) {
|
||||
Some(w) => w,
|
||||
None => return false,
|
||||
};
|
||||
|
||||
file_ctrl.window.finish_writing(seg_index);
|
||||
|
||||
assert!(self.total_writings > 0);
|
||||
self.total_writings -= 1;
|
||||
|
||||
debug!(
|
||||
"Succeeded to write segment, root={}, seg_index={}, total_writings={}",
|
||||
root, seg_index, self.total_writings
|
||||
);
|
||||
|
||||
// All chunks of file written into store.
|
||||
file_ctrl.window.left_boundary >= file_ctrl.total_segments
|
||||
}
|
||||
|
||||
pub fn on_write_failed(&mut self, root: &DataRoot, seg_index: usize) {
|
||||
let file_ctrl = match self.files.get_mut(root) {
|
||||
Some(w) => w,
|
||||
None => return,
|
||||
};
|
||||
|
||||
//Rollback the segment status if failed
|
||||
file_ctrl.window.rollback_writing(seg_index);
|
||||
|
||||
assert!(self.total_writings > 0);
|
||||
self.total_writings -= 1;
|
||||
}
|
||||
}
|
15
node/chunk_pool/src/mem_pool/mod.rs
Normal file
15
node/chunk_pool/src/mem_pool/mod.rs
Normal file
@ -0,0 +1,15 @@
|
||||
mod chunk_cache;
|
||||
mod chunk_pool_inner;
|
||||
mod chunk_write_control;
|
||||
|
||||
pub use chunk_pool_inner::MemoryChunkPool;
|
||||
pub use chunk_pool_inner::SegmentInfo;
|
||||
|
||||
use shared_types::DataRoot;
|
||||
use shared_types::TxID;
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
|
||||
pub struct FileID {
|
||||
pub root: DataRoot,
|
||||
pub tx_id: TxID,
|
||||
}
|
13
node/file_location_cache/Cargo.toml
Normal file
13
node/file_location_cache/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "file_location_cache"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
hashlink = "0.8.0"
|
||||
network = { path = "../network" }
|
||||
parking_lot = "0.12.1"
|
||||
rand = "0.8.5"
|
||||
tracing = "0.1.35"
|
||||
priority-queue = "1.2.3"
|
||||
shared_types = { path = "../shared_types" }
|
532
node/file_location_cache/src/file_location_cache.rs
Normal file
532
node/file_location_cache/src/file_location_cache.rs
Normal file
@ -0,0 +1,532 @@
|
||||
use crate::Config;
|
||||
use network::types::SignedAnnounceFile;
|
||||
use network::PeerId;
|
||||
use parking_lot::Mutex;
|
||||
use priority_queue::PriorityQueue;
|
||||
use rand::seq::IteratorRandom;
|
||||
use shared_types::{timestamp_now, TxID};
|
||||
use std::cmp::Reverse;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Caches limited announcements of specified file from different peers.
|
||||
struct AnnouncementCache {
|
||||
/// Maximum number of announcements in cache.
|
||||
capacity: usize,
|
||||
|
||||
/// Timeout in seconds to expire the cached announcement.
|
||||
/// This is because file may be removed from the announced
|
||||
/// storage node.
|
||||
timeout_secs: u32,
|
||||
|
||||
/// All cached announcements that mapped from peer id to announcement.
|
||||
/// Note, only cache the latest announcement for each peer.
|
||||
items: HashMap<PeerId, SignedAnnounceFile>,
|
||||
|
||||
/// All announcements are prioritized by timestamp.
|
||||
/// The top element is the oldest announcement.
|
||||
priorities: PriorityQueue<PeerId, Reverse<u32>>,
|
||||
}
|
||||
|
||||
impl AnnouncementCache {
|
||||
fn new(capacity: usize, timeout_secs: u32) -> Self {
|
||||
assert!(capacity > 0);
|
||||
|
||||
AnnouncementCache {
|
||||
capacity,
|
||||
timeout_secs,
|
||||
items: Default::default(),
|
||||
priorities: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the priority of the oldest announcement if any.
|
||||
fn peek_priority(&self) -> Option<Reverse<u32>> {
|
||||
let (_, &Reverse(ts)) = self.priorities.peek()?;
|
||||
Some(Reverse(ts))
|
||||
}
|
||||
|
||||
/// Removes the oldest announcement if any.
|
||||
fn pop(&mut self) -> Option<SignedAnnounceFile> {
|
||||
let (peer_id, _) = self.priorities.pop()?;
|
||||
self.items.remove(&peer_id)
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.items.len()
|
||||
}
|
||||
|
||||
/// Garbage collects expired announcements.
|
||||
fn garbage_collect(&mut self) -> usize {
|
||||
let mut collected = 0;
|
||||
let now = timestamp_now();
|
||||
|
||||
while let Some((_, &Reverse(ts))) = self.priorities.peek() {
|
||||
if ts + self.timeout_secs > now {
|
||||
break;
|
||||
}
|
||||
|
||||
self.pop();
|
||||
collected += 1;
|
||||
}
|
||||
|
||||
collected
|
||||
}
|
||||
|
||||
fn do_insert_or_update(&mut self, announcement: SignedAnnounceFile) {
|
||||
let peer_id = announcement.peer_id.clone().into();
|
||||
self.priorities
|
||||
.push(peer_id, Reverse(announcement.timestamp));
|
||||
self.items.insert(peer_id, announcement);
|
||||
}
|
||||
|
||||
/// Insert the specified `announcement` into cache.
|
||||
fn insert(&mut self, announcement: SignedAnnounceFile) {
|
||||
self.garbage_collect();
|
||||
|
||||
let peer_id = announcement.peer_id.clone().into();
|
||||
|
||||
if let Some(existing) = self.items.get(&peer_id) {
|
||||
// ignore older announcement
|
||||
if announcement.timestamp <= existing.timestamp {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// insert or update
|
||||
self.do_insert_or_update(announcement);
|
||||
|
||||
// remove oldest one if capacity exceeded
|
||||
if self.items.len() > self.capacity {
|
||||
self.pop();
|
||||
}
|
||||
}
|
||||
|
||||
/// Randomly pick an announcement if any.
|
||||
fn random(&mut self) -> (Option<SignedAnnounceFile>, usize) {
|
||||
let collected = self.garbage_collect();
|
||||
|
||||
// all announcements garbage collected
|
||||
if self.items.is_empty() {
|
||||
return (None, collected);
|
||||
}
|
||||
|
||||
let choosed = self
|
||||
.items
|
||||
.iter()
|
||||
.choose(&mut rand::thread_rng())
|
||||
.map(|(_, item)| item.clone());
|
||||
|
||||
(choosed, collected)
|
||||
}
|
||||
|
||||
/// Returns all announcements.
|
||||
fn all(&mut self) -> (Vec<SignedAnnounceFile>, usize) {
|
||||
let collected = self.garbage_collect();
|
||||
let result = self.items.values().cloned().collect();
|
||||
(result, collected)
|
||||
}
|
||||
}
|
||||
|
||||
/// Caches announcements for different files.
|
||||
struct FileCache {
|
||||
/// Cache configuration.
|
||||
config: Config,
|
||||
|
||||
/// Total number of announcements cached for all files.
|
||||
total_announcements: usize,
|
||||
|
||||
/// All cached files that mapped from `tx_id` to `AnnouncementCache`.
|
||||
files: HashMap<TxID, AnnouncementCache>,
|
||||
|
||||
/// All files are prioritized by timestamp.
|
||||
/// The top element is the `AnnouncementCache` that has the oldest announcement.
|
||||
priorities: PriorityQueue<TxID, Reverse<u32>>,
|
||||
}
|
||||
|
||||
impl FileCache {
|
||||
fn new(config: Config) -> Self {
|
||||
FileCache {
|
||||
config,
|
||||
total_announcements: 0,
|
||||
files: Default::default(),
|
||||
priorities: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert the specified `announcement` into cache.
|
||||
fn insert(&mut self, announcement: SignedAnnounceFile) {
|
||||
let tx_id = announcement.tx_id;
|
||||
|
||||
let item = self.files.entry(tx_id).or_insert_with(|| {
|
||||
AnnouncementCache::new(
|
||||
self.config.max_entries_per_file,
|
||||
self.config.entry_expiration_time_secs,
|
||||
)
|
||||
});
|
||||
|
||||
assert!(self.total_announcements >= item.len());
|
||||
self.total_announcements -= item.len();
|
||||
|
||||
item.insert(announcement);
|
||||
|
||||
if let Some(priority) = item.peek_priority() {
|
||||
self.priorities.push(tx_id, priority);
|
||||
}
|
||||
|
||||
self.total_announcements += item.len();
|
||||
if self.total_announcements > self.config.max_entries_total {
|
||||
self.pop();
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the oldest file announcement.
|
||||
fn pop(&mut self) -> Option<SignedAnnounceFile> {
|
||||
let (&tx_id, _) = self.priorities.peek()?;
|
||||
let item = self.files.get_mut(&tx_id)?;
|
||||
|
||||
let result = item.pop()?;
|
||||
|
||||
self.update_on_announcement_cache_changed(&tx_id, 1);
|
||||
|
||||
Some(result)
|
||||
}
|
||||
|
||||
/// Randomly pick a announcement of specified file by `tx_id`.
|
||||
fn random(&mut self, tx_id: TxID) -> Option<SignedAnnounceFile> {
|
||||
let item = self.files.get_mut(&tx_id)?;
|
||||
let (result, collected) = item.random();
|
||||
self.update_on_announcement_cache_changed(&tx_id, collected);
|
||||
result
|
||||
}
|
||||
|
||||
fn update_on_announcement_cache_changed(&mut self, tx_id: &TxID, removed: usize) {
|
||||
if removed == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
assert!(self.total_announcements >= removed);
|
||||
self.total_announcements -= removed;
|
||||
|
||||
let item = match self.files.get_mut(tx_id) {
|
||||
Some(v) => v,
|
||||
None => return,
|
||||
};
|
||||
|
||||
if let Some(priority) = item.peek_priority() {
|
||||
// update priority if changed
|
||||
self.priorities.change_priority(tx_id, priority);
|
||||
} else {
|
||||
// remove entry if empty
|
||||
self.files.remove(tx_id);
|
||||
self.priorities.remove(tx_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns all the announcements of specified file by `tx_id`.
|
||||
fn all(&mut self, tx_id: TxID) -> Option<Vec<SignedAnnounceFile>> {
|
||||
let item = self.files.get_mut(&tx_id)?;
|
||||
let (result, collected) = item.all();
|
||||
self.update_on_announcement_cache_changed(&tx_id, collected);
|
||||
Some(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FileLocationCache {
|
||||
cache: Mutex<FileCache>,
|
||||
}
|
||||
|
||||
impl Default for FileLocationCache {
|
||||
fn default() -> Self {
|
||||
FileLocationCache {
|
||||
cache: Mutex::new(FileCache::new(Default::default())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FileLocationCache {
|
||||
pub fn insert(&self, announcement: SignedAnnounceFile) {
|
||||
self.cache.lock().insert(announcement);
|
||||
}
|
||||
|
||||
pub fn get_one(&self, tx_id: TxID) -> Option<SignedAnnounceFile> {
|
||||
self.cache.lock().random(tx_id)
|
||||
}
|
||||
|
||||
pub fn get_all(&self, tx_id: TxID) -> Vec<SignedAnnounceFile> {
|
||||
self.cache.lock().all(tx_id).unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::cmp::Reverse;
|
||||
|
||||
use network::{types::SignedAnnounceFile, PeerId};
|
||||
use shared_types::{timestamp_now, TxID};
|
||||
|
||||
use crate::{test_util::AnnounceFileBuilder, Config};
|
||||
|
||||
use super::{AnnouncementCache, FileCache};
|
||||
|
||||
fn create_file(peer_id: Option<PeerId>, timestamp: u32) -> SignedAnnounceFile {
|
||||
let builder = AnnounceFileBuilder::default().with_timestamp(timestamp);
|
||||
if let Some(id) = peer_id {
|
||||
builder.with_peer_id(id).build()
|
||||
} else {
|
||||
builder.build()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_peek_priority() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
// empty by default
|
||||
assert_eq!(cache.peek_priority(), None);
|
||||
|
||||
// one file with timestamp `now - 5`
|
||||
let t1 = now - 5;
|
||||
cache.insert(create_file(None, t1));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(t1)));
|
||||
|
||||
// newly file with timestamp `now - 4`
|
||||
let t2 = now - 4;
|
||||
cache.insert(create_file(None, t2));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(t1)));
|
||||
|
||||
// old file with timestamp `now - 6`
|
||||
let t3 = now - 6;
|
||||
cache.insert(create_file(None, t3));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(t3)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_pop_len() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
// empty by default
|
||||
assert_eq!(cache.pop(), None);
|
||||
assert_eq!(cache.len(), 0);
|
||||
|
||||
cache.insert(create_file(None, now - 2));
|
||||
cache.insert(create_file(None, now - 3));
|
||||
cache.insert(create_file(None, now - 1));
|
||||
assert_eq!(cache.len(), 3);
|
||||
|
||||
// pop from oldest to newest
|
||||
assert_eq!(cache.pop().unwrap().timestamp, now - 3);
|
||||
assert_eq!(cache.pop().unwrap().timestamp, now - 2);
|
||||
assert_eq!(cache.pop().unwrap().timestamp, now - 1);
|
||||
assert_eq!(cache.pop(), None);
|
||||
assert_eq!(cache.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_garbage_collect() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
assert_eq!(cache.garbage_collect(), 0);
|
||||
|
||||
cache.do_insert_or_update(create_file(None, now - 5000));
|
||||
cache.do_insert_or_update(create_file(None, now - 5001));
|
||||
cache.do_insert_or_update(create_file(None, now - 2000));
|
||||
cache.do_insert_or_update(create_file(None, now + 10));
|
||||
|
||||
// gc for expired only
|
||||
assert_eq!(cache.garbage_collect(), 2);
|
||||
assert_eq!(cache.len(), 2);
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 2000)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_gc() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
// prepare expired items
|
||||
cache.do_insert_or_update(create_file(None, now - 5000));
|
||||
cache.do_insert_or_update(create_file(None, now - 5001));
|
||||
|
||||
// insert with gc
|
||||
cache.insert(create_file(None, now - 1));
|
||||
|
||||
assert_eq!(cache.len(), 1);
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_ignore_older() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// insert `now - 2`
|
||||
cache.insert(create_file(Some(peer_id), now - 2));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 2)));
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
// ignore for older announcement of the same peer
|
||||
cache.insert(create_file(Some(peer_id), now - 3));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 2)));
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
// however, older announcement allowed from other peer
|
||||
cache.insert(create_file(None, now - 3));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 3)));
|
||||
assert_eq!(cache.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_overwrite() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
// insert `now - 2`
|
||||
cache.insert(create_file(Some(peer_id), now - 2));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 2)));
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
// overwrite with newly item
|
||||
cache.insert(create_file(Some(peer_id), now - 1));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 1)));
|
||||
assert_eq!(cache.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_insert_cap_exceeded() {
|
||||
let mut cache = AnnouncementCache::new(3, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
cache.insert(create_file(None, now - 2));
|
||||
cache.insert(create_file(None, now - 3));
|
||||
cache.insert(create_file(None, now - 4));
|
||||
|
||||
// oldest `now - 5` will be removed
|
||||
cache.insert(create_file(None, now - 5));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 4)));
|
||||
assert_eq!(cache.len(), 3);
|
||||
|
||||
// oldest `now - 4` will be removed
|
||||
cache.insert(create_file(None, now - 1));
|
||||
assert_eq!(cache.peek_priority(), Some(Reverse(now - 3)));
|
||||
assert_eq!(cache.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_random() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
assert_eq!(cache.random().0, None);
|
||||
|
||||
cache.insert(create_file(None, now - 1));
|
||||
assert_eq!(cache.random().0.unwrap().timestamp, now - 1);
|
||||
|
||||
cache.insert(create_file(None, now - 2));
|
||||
cache.insert(create_file(None, now - 3));
|
||||
let picked = cache.random().0.unwrap().timestamp;
|
||||
assert!(picked >= now - 3 && picked < now);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_annoucement_cache_all() {
|
||||
let mut cache = AnnouncementCache::new(100, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
assert_eq!(cache.all().0, vec![]);
|
||||
|
||||
cache.insert(create_file(None, now - 1));
|
||||
cache.insert(create_file(None, now - 2));
|
||||
cache.insert(create_file(None, now - 3));
|
||||
|
||||
assert_all_files(cache.all().0, vec![now - 3, now - 2, now - 1])
|
||||
}
|
||||
|
||||
fn assert_all_files(files: Vec<SignedAnnounceFile>, sorted_timestamps: Vec<u32>) {
|
||||
let mut timestamps: Vec<u32> = files.iter().map(|f| f.timestamp).collect();
|
||||
timestamps.sort();
|
||||
assert_eq!(timestamps, sorted_timestamps);
|
||||
}
|
||||
|
||||
fn create_file_cache(total_entries: usize, file_entries: usize, timeout: u32) -> FileCache {
|
||||
FileCache::new(Config {
|
||||
max_entries_total: total_entries,
|
||||
max_entries_per_file: file_entries,
|
||||
entry_expiration_time_secs: timeout,
|
||||
})
|
||||
}
|
||||
|
||||
fn create_file_2(tx_id: TxID, peer_id: PeerId, timestamp: u32) -> SignedAnnounceFile {
|
||||
AnnounceFileBuilder::default()
|
||||
.with_tx_id(tx_id)
|
||||
.with_peer_id(peer_id)
|
||||
.with_timestamp(timestamp)
|
||||
.build()
|
||||
}
|
||||
|
||||
fn assert_file(file: &SignedAnnounceFile, tx_id: TxID, peer_id: PeerId, timestamp: u32) {
|
||||
assert_eq!(file.tx_id, tx_id);
|
||||
assert_eq!(PeerId::from(file.peer_id.clone()), peer_id);
|
||||
assert_eq!(file.timestamp, timestamp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_cache_insert_pop() {
|
||||
let mut cache = create_file_cache(100, 3, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
assert_eq!(cache.total_announcements, 0);
|
||||
|
||||
let peer1 = PeerId::random();
|
||||
let peer2 = PeerId::random();
|
||||
let tx1 = TxID::random_hash(1);
|
||||
let tx2 = TxID::random_hash(2);
|
||||
|
||||
cache.insert(create_file_2(tx1, peer1, now - 1));
|
||||
assert_eq!(cache.total_announcements, 1);
|
||||
cache.insert(create_file_2(tx2, peer1, now - 2));
|
||||
assert_eq!(cache.total_announcements, 2);
|
||||
cache.insert(create_file_2(tx1, peer2, now - 3));
|
||||
assert_eq!(cache.total_announcements, 3);
|
||||
|
||||
assert_file(&cache.pop().unwrap(), tx1, peer2, now - 3);
|
||||
assert_eq!(cache.total_announcements, 2);
|
||||
assert_file(&cache.pop().unwrap(), tx2, peer1, now - 2);
|
||||
assert_eq!(cache.total_announcements, 1);
|
||||
assert_file(&cache.pop().unwrap(), tx1, peer1, now - 1);
|
||||
assert_eq!(cache.total_announcements, 0);
|
||||
assert_eq!(cache.pop(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_cache_insert_cap_exceeded() {
|
||||
let mut cache = create_file_cache(3, 3, 3600);
|
||||
let now = timestamp_now();
|
||||
|
||||
let tx1 = TxID::random_hash(1);
|
||||
cache.insert(create_file_2(tx1, PeerId::random(), now - 7));
|
||||
cache.insert(create_file_2(tx1, PeerId::random(), now - 8));
|
||||
cache.insert(create_file_2(tx1, PeerId::random(), now - 9));
|
||||
assert_eq!(cache.total_announcements, 3);
|
||||
|
||||
// insert more files and cause to max entries limited
|
||||
let tx2 = TxID::random_hash(2);
|
||||
cache.insert(create_file_2(tx2, PeerId::random(), now - 1));
|
||||
assert_all_files(cache.all(tx1).unwrap_or_default(), vec![now - 8, now - 7]);
|
||||
cache.insert(create_file_2(tx2, PeerId::random(), now - 2));
|
||||
assert_all_files(cache.all(tx1).unwrap_or_default(), vec![now - 7]);
|
||||
cache.insert(create_file_2(tx2, PeerId::random(), now - 3));
|
||||
assert_all_files(cache.all(tx1).unwrap_or_default(), vec![]);
|
||||
|
||||
assert_all_files(
|
||||
cache.all(tx2).unwrap_or_default(),
|
||||
vec![now - 3, now - 2, now - 1],
|
||||
);
|
||||
}
|
||||
}
|
20
node/file_location_cache/src/lib.rs
Normal file
20
node/file_location_cache/src/lib.rs
Normal file
@ -0,0 +1,20 @@
|
||||
mod file_location_cache;
|
||||
pub mod test_util;
|
||||
|
||||
pub use crate::file_location_cache::FileLocationCache;
|
||||
|
||||
pub struct Config {
|
||||
pub max_entries_total: usize,
|
||||
pub max_entries_per_file: usize,
|
||||
pub entry_expiration_time_secs: u32,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Config {
|
||||
max_entries_total: 4096,
|
||||
max_entries_per_file: 4,
|
||||
entry_expiration_time_secs: 3600,
|
||||
}
|
||||
}
|
||||
}
|
47
node/file_location_cache/src/test_util.rs
Normal file
47
node/file_location_cache/src/test_util.rs
Normal file
@ -0,0 +1,47 @@
|
||||
use network::{
|
||||
libp2p::identity,
|
||||
types::{AnnounceFile, SignedAnnounceFile},
|
||||
Multiaddr, PeerId,
|
||||
};
|
||||
use shared_types::{timestamp_now, TxID};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AnnounceFileBuilder {
|
||||
tx_id: Option<TxID>,
|
||||
peer_id: Option<PeerId>,
|
||||
timestamp: Option<u32>,
|
||||
}
|
||||
|
||||
impl AnnounceFileBuilder {
|
||||
pub fn with_tx_id(mut self, tx_id: TxID) -> Self {
|
||||
self.tx_id = Some(tx_id);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_peer_id(mut self, peer_id: PeerId) -> Self {
|
||||
self.peer_id = Some(peer_id);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_timestamp(mut self, timestamp: u32) -> Self {
|
||||
self.timestamp = Some(timestamp);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> SignedAnnounceFile {
|
||||
let tx_id = self.tx_id.unwrap_or_else(|| TxID::random_hash(0));
|
||||
let peer_id = self.peer_id.unwrap_or_else(PeerId::random);
|
||||
let at: Multiaddr = "/ip4/127.0.0.1/tcp/10000".parse().unwrap();
|
||||
let timestamp = self.timestamp.unwrap_or_else(timestamp_now);
|
||||
|
||||
let msg = AnnounceFile {
|
||||
tx_id,
|
||||
peer_id: peer_id.into(),
|
||||
at: at.into(),
|
||||
timestamp,
|
||||
};
|
||||
|
||||
let keypair = identity::Keypair::generate_secp256k1();
|
||||
msg.into_signed(&keypair).unwrap()
|
||||
}
|
||||
}
|
24
node/log_entry_sync/Cargo.toml
Normal file
24
node/log_entry_sync/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "log_entry_sync"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "=1.0.58", features = ["backtrace"] }
|
||||
append_merkle = { path = "../../common/append_merkle" }
|
||||
async-trait = "0.1.56"
|
||||
ethereum-types = "0.14"
|
||||
futures = "0.3.21"
|
||||
jsonrpsee = { version = "0.14.0", features = ["full"] }
|
||||
shared_types = { path = "../shared_types" }
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
tokio = "1.19.2"
|
||||
ethers = { version = "^2", features = ["ws"] }
|
||||
serde_json = "1.0.82"
|
||||
storage = { path = "../storage" }
|
||||
contract-interface = { path = "../../common/contract-interface" }
|
||||
futures-core = "0.3.28"
|
||||
futures-util = "0.3.28"
|
||||
thiserror = "1.0.44"
|
10
node/log_entry_sync/src/lib.rs
Normal file
10
node/log_entry_sync/src/lib.rs
Normal file
@ -0,0 +1,10 @@
|
||||
extern crate core;
|
||||
|
||||
pub(crate) mod rpc_proxy;
|
||||
mod sync_manager;
|
||||
|
||||
pub use rpc_proxy::ContractAddress;
|
||||
pub use sync_manager::{
|
||||
config::{CacheConfig, LogSyncConfig},
|
||||
LogSyncEvent, LogSyncManager,
|
||||
};
|
30
node/log_entry_sync/src/rpc_proxy/eth.rs
Normal file
30
node/log_entry_sync/src/rpc_proxy/eth.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use crate::rpc_proxy::{ContractAddress, EvmRpcProxy, SubEvent, SubFilter};
|
||||
use async_trait::async_trait;
|
||||
use ethers::prelude::{Bytes, Middleware, Provider};
|
||||
use ethers::providers::Ws;
|
||||
use ethers::types::TransactionRequest;
|
||||
use jsonrpsee::core::client::Subscription;
|
||||
|
||||
pub struct EthClient {
|
||||
client: Provider<Ws>,
|
||||
}
|
||||
|
||||
impl EthClient {
|
||||
#[allow(unused)]
|
||||
pub async fn new(url: &str) -> anyhow::Result<EthClient> {
|
||||
let client = Provider::new(Ws::connect(url).await?);
|
||||
Ok(Self { client })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl EvmRpcProxy for EthClient {
|
||||
async fn call(&self, to: ContractAddress, data: Bytes) -> anyhow::Result<Bytes> {
|
||||
let request = TransactionRequest::new().to(to).data(data);
|
||||
Ok(self.client.call(&request.into(), None).await?)
|
||||
}
|
||||
|
||||
async fn sub_events(&self, _filter: SubFilter) -> Subscription<SubEvent> {
|
||||
todo!()
|
||||
}
|
||||
}
|
38
node/log_entry_sync/src/rpc_proxy/mod.rs
Normal file
38
node/log_entry_sync/src/rpc_proxy/mod.rs
Normal file
@ -0,0 +1,38 @@
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use ethereum_types::{H160, H256};
|
||||
use ethers::prelude::Bytes;
|
||||
use jsonrpsee::core::client::Subscription;
|
||||
|
||||
// TODO: Define accounts/filter/events as associated types?
|
||||
// TODO: Define an abstraction suitable for other chains.
|
||||
#[async_trait]
|
||||
pub trait EvmRpcProxy {
|
||||
async fn call(&self, to: ContractAddress, data: Bytes) -> Result<Bytes>;
|
||||
|
||||
async fn sub_events(&self, filter: SubFilter) -> Subscription<SubEvent>;
|
||||
}
|
||||
|
||||
pub type ContractAddress = H160;
|
||||
|
||||
pub type Topic = H256;
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct SubFilter {
|
||||
to: Option<ContractAddress>,
|
||||
topics: Vec<Topic>,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct SubEvent {
|
||||
/// Address
|
||||
pub address: ContractAddress,
|
||||
|
||||
/// Topics
|
||||
pub topics: Vec<Topic>,
|
||||
|
||||
/// Data
|
||||
pub data: Bytes,
|
||||
}
|
||||
|
||||
pub(crate) mod eth;
|
65
node/log_entry_sync/src/sync_manager/config.rs
Normal file
65
node/log_entry_sync/src/sync_manager/config.rs
Normal file
@ -0,0 +1,65 @@
|
||||
use crate::rpc_proxy::ContractAddress;
|
||||
|
||||
pub struct LogSyncConfig {
|
||||
pub rpc_endpoint_url: String,
|
||||
pub contract_address: ContractAddress,
|
||||
pub cache_config: CacheConfig,
|
||||
|
||||
/// The block number where we start to sync data.
|
||||
/// This is usually the block number when Zgs contract is deployed.
|
||||
pub start_block_number: u64,
|
||||
/// The number of blocks needed for confirmation on the blockchain.
|
||||
/// This is used to rollback to a stable height if reorg happens during node restart.
|
||||
/// TODO(zz): Some blockchains have better confirmation/finalization mechanisms.
|
||||
pub confirmation_block_count: u64,
|
||||
/// Maximum number of event logs to poll at a time.
|
||||
pub log_page_size: u64,
|
||||
|
||||
// blockchain provider retry params
|
||||
// the number of retries after a connection times out
|
||||
pub rate_limit_retries: u32,
|
||||
// the nubmer of retries for rate limited responses
|
||||
pub timeout_retries: u32,
|
||||
// the duration to wait before retry, in ms
|
||||
pub initial_backoff: u64,
|
||||
// the duration between each paginated getLogs RPC call, in ms.
|
||||
// This is set to avoid triggering the throttling mechanism in the RPC server.
|
||||
pub recover_query_delay: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CacheConfig {
|
||||
/// The data with a size larger than this will not be cached.
|
||||
/// This is reasonable because uploading
|
||||
pub max_data_size: usize,
|
||||
pub tx_seq_ttl: usize,
|
||||
}
|
||||
|
||||
impl LogSyncConfig {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
rpc_endpoint_url: String,
|
||||
contract_address: ContractAddress,
|
||||
start_block_number: u64,
|
||||
confirmation_block_count: u64,
|
||||
cache_config: CacheConfig,
|
||||
log_page_size: u64,
|
||||
rate_limit_retries: u32,
|
||||
timeout_retries: u32,
|
||||
initial_backoff: u64,
|
||||
recover_query_delay: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
rpc_endpoint_url,
|
||||
contract_address,
|
||||
cache_config,
|
||||
start_block_number,
|
||||
confirmation_block_count,
|
||||
log_page_size,
|
||||
rate_limit_retries,
|
||||
timeout_retries,
|
||||
initial_backoff,
|
||||
recover_query_delay,
|
||||
}
|
||||
}
|
||||
}
|
58
node/log_entry_sync/src/sync_manager/data_cache.rs
Normal file
58
node/log_entry_sync/src/sync_manager/data_cache.rs
Normal file
@ -0,0 +1,58 @@
|
||||
use crate::sync_manager::config::CacheConfig;
|
||||
use shared_types::DataRoot;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
|
||||
struct CachedData {
|
||||
/// Used for garbage collection.
|
||||
last_seen_tx_seq: u64,
|
||||
/// Complete data for a given DataRoot.
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
pub struct DataCache {
|
||||
root_to_data: HashMap<DataRoot, CachedData>,
|
||||
config: CacheConfig,
|
||||
}
|
||||
|
||||
impl DataCache {
|
||||
pub fn new(config: CacheConfig) -> Self {
|
||||
Self {
|
||||
root_to_data: HashMap::new(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_data(&mut self, root: DataRoot, tx_seq: u64, data: Vec<u8>) -> bool {
|
||||
if data.len() > self.config.max_data_size {
|
||||
// large data are not cached.
|
||||
return false;
|
||||
}
|
||||
// TODO: insert partial data and merge here.
|
||||
self.root_to_data
|
||||
.entry(root)
|
||||
.and_modify(|cached| {
|
||||
cached.last_seen_tx_seq = cmp::max(tx_seq, cached.last_seen_tx_seq)
|
||||
})
|
||||
.or_insert(CachedData {
|
||||
last_seen_tx_seq: tx_seq,
|
||||
data,
|
||||
});
|
||||
true
|
||||
}
|
||||
|
||||
/// Remove and return the data of a given `DataRoot`.
|
||||
/// If two completed reverted transactions have the same root and both appear later,
|
||||
/// the second one will have its data copied in `put_tx`.
|
||||
pub fn pop_data(&mut self, root: &DataRoot) -> Option<Vec<u8>> {
|
||||
self.root_to_data.remove(root).map(|e| e.data)
|
||||
}
|
||||
|
||||
/// Remove timeout data entries according to TTL.
|
||||
pub fn garbage_collect(&mut self, latest_tx_seq: u64) {
|
||||
// We won't keep too many data, so it's okay to just iterate here.
|
||||
self.root_to_data.retain(|_, cached| {
|
||||
cached.last_seen_tx_seq + self.config.tx_seq_ttl as u64 >= latest_tx_seq
|
||||
})
|
||||
}
|
||||
}
|
356
node/log_entry_sync/src/sync_manager/log_entry_fetcher.rs
Normal file
356
node/log_entry_sync/src/sync_manager/log_entry_fetcher.rs
Normal file
@ -0,0 +1,356 @@
|
||||
use crate::rpc_proxy::ContractAddress;
|
||||
use crate::sync_manager::log_query::LogQuery;
|
||||
use crate::sync_manager::{repeat_run_and_log, RETRY_WAIT_MS};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use append_merkle::{Algorithm, Sha3Algorithm};
|
||||
use contract_interface::{ZgsFlow, SubmissionNode, SubmitFilter};
|
||||
use ethers::abi::RawLog;
|
||||
use ethers::prelude::{BlockNumber, EthLogDecode, Http, Log, Middleware, Provider, U256};
|
||||
use ethers::providers::{FilterKind, HttpRateLimitRetryPolicy, RetryClient, RetryClientBuilder};
|
||||
use ethers::types::H256;
|
||||
use futures::StreamExt;
|
||||
use jsonrpsee::tracing::{debug, error, info};
|
||||
use shared_types::{DataRoot, Transaction};
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
|
||||
|
||||
pub struct LogEntryFetcher {
|
||||
contract_address: ContractAddress,
|
||||
log_page_size: u64,
|
||||
provider: Arc<Provider<RetryClient<Http>>>,
|
||||
|
||||
confirmation_delay: u64,
|
||||
}
|
||||
|
||||
impl LogEntryFetcher {
|
||||
pub async fn new(
|
||||
url: &str,
|
||||
contract_address: ContractAddress,
|
||||
log_page_size: u64,
|
||||
confirmation_delay: u64,
|
||||
rate_limit_retries: u32,
|
||||
timeout_retries: u32,
|
||||
initial_backoff: u64,
|
||||
) -> Result<Self> {
|
||||
let provider = Arc::new(Provider::new(
|
||||
RetryClientBuilder::default()
|
||||
.rate_limit_retries(rate_limit_retries)
|
||||
.timeout_retries(timeout_retries)
|
||||
.initial_backoff(Duration::from_millis(initial_backoff))
|
||||
.build(Http::from_str(url)?, Box::new(HttpRateLimitRetryPolicy)),
|
||||
));
|
||||
// TODO: `error` types are removed from the ABI json file.
|
||||
Ok(Self {
|
||||
contract_address,
|
||||
provider,
|
||||
log_page_size,
|
||||
confirmation_delay,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn start_recover(
|
||||
&self,
|
||||
start_block_number: u64,
|
||||
end_block_number: u64,
|
||||
executor: &TaskExecutor,
|
||||
log_query_delay: Duration,
|
||||
) -> UnboundedReceiver<LogFetchProgress> {
|
||||
let provider = self.provider.clone();
|
||||
let (recover_tx, recover_rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
let contract = ZgsFlow::new(self.contract_address, provider.clone());
|
||||
let log_page_size = self.log_page_size;
|
||||
|
||||
executor.spawn(
|
||||
async move {
|
||||
let mut progress = start_block_number;
|
||||
let mut filter = contract
|
||||
.submit_filter()
|
||||
.from_block(progress)
|
||||
.to_block(end_block_number)
|
||||
.filter;
|
||||
let mut stream = LogQuery::new(&provider, &filter, log_query_delay)
|
||||
.with_page_size(log_page_size);
|
||||
debug!(
|
||||
"start_recover starts, start={} end={}",
|
||||
start_block_number, end_block_number
|
||||
);
|
||||
while let Some(maybe_log) = stream.next().await {
|
||||
match maybe_log {
|
||||
Ok(log) => {
|
||||
let sync_progress =
|
||||
if log.block_hash.is_some() && log.block_number.is_some() {
|
||||
let synced_block = LogFetchProgress::SyncedBlock((
|
||||
log.block_number.unwrap().as_u64(),
|
||||
log.block_hash.unwrap(),
|
||||
));
|
||||
progress = log.block_number.unwrap().as_u64();
|
||||
Some(synced_block)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
debug!("recover: progress={:?}", sync_progress);
|
||||
|
||||
match SubmitFilter::decode_log(&RawLog {
|
||||
topics: log.topics,
|
||||
data: log.data.to_vec(),
|
||||
}) {
|
||||
Ok(event) => {
|
||||
if let Err(e) = recover_tx
|
||||
.send(submission_event_to_transaction(event))
|
||||
.and_then(|_| match sync_progress {
|
||||
Some(b) => recover_tx.send(b),
|
||||
None => Ok(()),
|
||||
})
|
||||
{
|
||||
error!("send error: e={:?}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("log decode error: e={:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("log query error: e={:?}", e);
|
||||
filter = filter.from_block(progress);
|
||||
stream = LogQuery::new(&provider, &filter, log_query_delay)
|
||||
.with_page_size(log_page_size);
|
||||
tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"log recover",
|
||||
);
|
||||
recover_rx
|
||||
}
|
||||
|
||||
pub fn start_watch(
|
||||
&self,
|
||||
start_block_number: u64,
|
||||
executor: &TaskExecutor,
|
||||
) -> UnboundedReceiver<LogFetchProgress> {
|
||||
let (watch_tx, watch_rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
let contract = ZgsFlow::new(self.contract_address, self.provider.clone());
|
||||
let provider = self.provider.clone();
|
||||
let mut log_confirmation_queue = LogConfirmationQueue::new(self.confirmation_delay);
|
||||
executor.spawn(
|
||||
async move {
|
||||
let mut filter = contract
|
||||
.submit_filter()
|
||||
.from_block(start_block_number)
|
||||
.filter;
|
||||
debug!("start_watch starts, start={}", start_block_number);
|
||||
let mut filter_id =
|
||||
repeat_run_and_log(|| provider.new_filter(FilterKind::Logs(&filter))).await;
|
||||
let mut progress = start_block_number;
|
||||
|
||||
loop {
|
||||
match Self::watch_loop(
|
||||
provider.as_ref(),
|
||||
filter_id,
|
||||
&watch_tx,
|
||||
&mut log_confirmation_queue,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Err(e) => {
|
||||
error!("log sync watch error: e={:?}", e);
|
||||
filter = filter.from_block(progress);
|
||||
filter_id = repeat_run_and_log(|| {
|
||||
provider.new_filter(FilterKind::Logs(&filter))
|
||||
})
|
||||
.await;
|
||||
}
|
||||
Ok(Some(p)) => {
|
||||
progress = p;
|
||||
info!("log sync to block number {:?}", progress);
|
||||
}
|
||||
Ok(None) => {
|
||||
error!(
|
||||
"log sync gets entries without progress? old_progress={}",
|
||||
progress
|
||||
)
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await;
|
||||
}
|
||||
},
|
||||
"log watch",
|
||||
);
|
||||
watch_rx
|
||||
}
|
||||
|
||||
async fn watch_loop(
|
||||
provider: &Provider<RetryClient<Http>>,
|
||||
filter_id: U256,
|
||||
watch_tx: &UnboundedSender<LogFetchProgress>,
|
||||
log_confirmation_queue: &mut LogConfirmationQueue,
|
||||
) -> Result<Option<u64>> {
|
||||
debug!("get block");
|
||||
let latest_block = provider
|
||||
.get_block(BlockNumber::Latest)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow!("None for latest block"))?;
|
||||
debug!("get filter changes");
|
||||
let logs: Vec<Log> = provider.get_filter_changes(filter_id).await?;
|
||||
if let Some(reverted) = log_confirmation_queue.push(logs)? {
|
||||
watch_tx.send(LogFetchProgress::Reverted(reverted))?;
|
||||
}
|
||||
debug!("get filter end");
|
||||
for log in log_confirmation_queue.confirm_logs(latest_block.number.unwrap().as_u64()) {
|
||||
assert!(!log.removed.unwrap_or(false));
|
||||
// TODO(zz): Log parse error means logs might be lost here.
|
||||
let tx = SubmitFilter::decode_log(&RawLog {
|
||||
topics: log.topics,
|
||||
data: log.data.to_vec(),
|
||||
})?;
|
||||
watch_tx.send(submission_event_to_transaction(tx))?;
|
||||
}
|
||||
let progress = if latest_block.hash.is_some() && latest_block.number.is_some() {
|
||||
Some((
|
||||
latest_block.number.unwrap().as_u64(),
|
||||
latest_block.hash.unwrap(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(p) = &progress {
|
||||
watch_tx.send(LogFetchProgress::SyncedBlock(*p))?;
|
||||
}
|
||||
Ok(progress.map(|p| p.0))
|
||||
}
|
||||
|
||||
pub fn provider(&self) -> &Provider<RetryClient<Http>> {
|
||||
self.provider.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
struct LogConfirmationQueue {
|
||||
/// Keep the unconfirmed new logs.
|
||||
/// The key is the block number and the value is the set of needed logs in that block.
|
||||
queue: VecDeque<(u64, Vec<Log>)>,
|
||||
|
||||
latest_block_number: u64,
|
||||
confirmation_delay: u64,
|
||||
}
|
||||
|
||||
impl LogConfirmationQueue {
|
||||
fn new(confirmation_delay: u64) -> Self {
|
||||
Self {
|
||||
queue: VecDeque::new(),
|
||||
latest_block_number: 0,
|
||||
confirmation_delay,
|
||||
}
|
||||
}
|
||||
/// Push a set of new logs.
|
||||
/// We assumes that these logs are in order, and removed logs are returned first.
|
||||
///
|
||||
/// Return `Ok(Some(tx_seq))` of the first reverted tx_seq if chain reorg happens.
|
||||
/// `Err` is returned if assumptions are violated (like the log have missing fields).
|
||||
fn push(&mut self, logs: Vec<Log>) -> Result<Option<u64>> {
|
||||
let mut revert_to = None;
|
||||
// First merge logs according to the block number.
|
||||
let mut block_logs: BTreeMap<u64, Vec<Log>> = BTreeMap::new();
|
||||
let mut removed_block_logs = BTreeMap::new();
|
||||
for log in logs {
|
||||
let set = if log.removed.unwrap_or(false) {
|
||||
&mut removed_block_logs
|
||||
} else {
|
||||
&mut block_logs
|
||||
};
|
||||
let block_number = log
|
||||
.block_number
|
||||
.ok_or_else(|| anyhow!("block number missing"))?
|
||||
.as_u64();
|
||||
set.entry(block_number).or_default().push(log);
|
||||
}
|
||||
|
||||
// Handle revert if it happens.
|
||||
for (block_number, removed_logs) in &removed_block_logs {
|
||||
if revert_to.is_none() {
|
||||
let reverted_index = match self.queue.binary_search_by_key(block_number, |e| e.0) {
|
||||
Ok(x) => x,
|
||||
Err(x) => x,
|
||||
};
|
||||
self.queue.truncate(reverted_index);
|
||||
let first = removed_logs.first().expect("not empty");
|
||||
let first_reverted_tx_seq = SubmitFilter::decode_log(&RawLog {
|
||||
topics: first.topics.clone(),
|
||||
data: first.data.to_vec(),
|
||||
})?
|
||||
.submission_index
|
||||
.as_u64();
|
||||
revert_to = Some(first_reverted_tx_seq);
|
||||
} else {
|
||||
// Other removed logs should have larger tx seq, so no need to process them.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Add new logs to the queue.
|
||||
for (block_number, new_logs) in block_logs {
|
||||
if block_number <= self.queue.back().map(|e| e.0).unwrap_or(0) {
|
||||
bail!("reverted without being notified");
|
||||
}
|
||||
self.queue.push_back((block_number, new_logs));
|
||||
}
|
||||
|
||||
Ok(revert_to)
|
||||
}
|
||||
|
||||
/// Pass in the latest block number and return the confirmed logs.
|
||||
fn confirm_logs(&mut self, latest_block_number: u64) -> Vec<Log> {
|
||||
self.latest_block_number = latest_block_number;
|
||||
let mut confirmed_logs = Vec::new();
|
||||
while let Some((block_number, _)) = self.queue.front() {
|
||||
if *block_number
|
||||
> self
|
||||
.latest_block_number
|
||||
.wrapping_sub(self.confirmation_delay)
|
||||
{
|
||||
break;
|
||||
}
|
||||
let (_, mut logs) = self.queue.pop_front().unwrap();
|
||||
confirmed_logs.append(&mut logs);
|
||||
}
|
||||
confirmed_logs
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum LogFetchProgress {
|
||||
SyncedBlock((u64, H256)),
|
||||
Transaction(Transaction),
|
||||
Reverted(u64),
|
||||
}
|
||||
|
||||
fn submission_event_to_transaction(e: SubmitFilter) -> LogFetchProgress {
|
||||
LogFetchProgress::Transaction(Transaction {
|
||||
stream_ids: vec![],
|
||||
data: vec![],
|
||||
data_merkle_root: nodes_to_root(&e.submission.nodes),
|
||||
merkle_nodes: e
|
||||
.submission
|
||||
.nodes
|
||||
.iter()
|
||||
// the submission height is the height of the root node starting from height 0.
|
||||
.map(|SubmissionNode { root, height }| (height.as_usize() + 1, root.into()))
|
||||
.collect(),
|
||||
start_entry_index: e.start_pos.as_u64(),
|
||||
size: e.submission.length.as_u64(),
|
||||
seq: e.submission_index.as_u64(),
|
||||
})
|
||||
}
|
||||
|
||||
fn nodes_to_root(node_list: &Vec<SubmissionNode>) -> DataRoot {
|
||||
let mut root: DataRoot = node_list.last().expect("not empty").root.into();
|
||||
for next_node in node_list[..node_list.len() - 1].iter().rev() {
|
||||
root = Sha3Algorithm::parent(&next_node.root.into(), &root);
|
||||
}
|
||||
root
|
||||
}
|
183
node/log_entry_sync/src/sync_manager/log_query.rs
Normal file
183
node/log_entry_sync/src/sync_manager/log_query.rs
Normal file
@ -0,0 +1,183 @@
|
||||
use ethers::prelude::{Filter, JsonRpcClient, Log, Middleware, Provider, ProviderError, U64};
|
||||
use futures_core::stream::Stream;
|
||||
use jsonrpsee::tracing::trace;
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
pub(crate) type PinBoxFut<'a, T> =
|
||||
Pin<Box<dyn Future<Output = Result<T, ProviderError>> + Send + 'a>>;
|
||||
|
||||
/// A log query provides streaming access to historical logs via a paginated
|
||||
/// request. For streaming access to future logs, use [`Middleware::watch`] or
|
||||
/// [`Middleware::subscribe_logs`]
|
||||
pub struct LogQuery<'a, P> {
|
||||
provider: &'a Provider<P>,
|
||||
filter: Filter,
|
||||
from_block: Option<U64>,
|
||||
page_size: u64,
|
||||
current_logs: VecDeque<Log>,
|
||||
last_block: Option<U64>,
|
||||
state: LogQueryState<'a>,
|
||||
delay: Duration,
|
||||
}
|
||||
|
||||
enum LogQueryState<'a> {
|
||||
Initial,
|
||||
LoadLastBlock(PinBoxFut<'a, U64>),
|
||||
LoadLogs(PinBoxFut<'a, Vec<Log>>),
|
||||
Consume,
|
||||
}
|
||||
|
||||
impl<'a, P> LogQuery<'a, P>
|
||||
where
|
||||
P: JsonRpcClient,
|
||||
{
|
||||
/// Instantiate a new `LogQuery`
|
||||
pub fn new(provider: &'a Provider<P>, filter: &Filter, delay: Duration) -> Self {
|
||||
Self {
|
||||
provider,
|
||||
filter: filter.clone(),
|
||||
from_block: filter.get_from_block(),
|
||||
page_size: 10000,
|
||||
current_logs: VecDeque::new(),
|
||||
last_block: None,
|
||||
state: LogQueryState::Initial,
|
||||
delay,
|
||||
}
|
||||
}
|
||||
|
||||
/// set page size for pagination
|
||||
pub fn with_page_size(mut self, page_size: u64) -> Self {
|
||||
self.page_size = page_size;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! rewake_with_new_state {
|
||||
($ctx:ident, $this:ident, $new_state:expr) => {
|
||||
$this.state = $new_state;
|
||||
$ctx.waker().wake_by_ref();
|
||||
return Poll::Pending
|
||||
};
|
||||
}
|
||||
|
||||
/// Errors while querying for logs
|
||||
#[derive(Error, Debug)]
|
||||
pub enum LogQueryError<E> {
|
||||
/// Error loading latest block
|
||||
#[error(transparent)]
|
||||
LoadLastBlockError(E),
|
||||
/// Error loading logs from block range
|
||||
#[error(transparent)]
|
||||
LoadLogsError(E),
|
||||
}
|
||||
|
||||
impl<'a, P> Stream for LogQuery<'a, P>
|
||||
where
|
||||
P: JsonRpcClient,
|
||||
{
|
||||
type Item = Result<Log, LogQueryError<ProviderError>>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let delay = self.delay;
|
||||
match &mut self.state {
|
||||
LogQueryState::Initial => {
|
||||
if !self.filter.is_paginatable() {
|
||||
// if not paginatable, load logs and consume
|
||||
let filter = self.filter.clone();
|
||||
let provider = self.provider;
|
||||
#[allow(clippy::redundant_async_block)]
|
||||
let fut = Box::pin(async move {
|
||||
tokio::time::sleep(delay).await;
|
||||
provider.get_logs(&filter).await
|
||||
});
|
||||
rewake_with_new_state!(ctx, self, LogQueryState::LoadLogs(fut));
|
||||
} else {
|
||||
// if paginatable, load last block
|
||||
let fut = self.provider.get_block_number();
|
||||
rewake_with_new_state!(ctx, self, LogQueryState::LoadLastBlock(fut));
|
||||
}
|
||||
}
|
||||
LogQueryState::LoadLastBlock(fut) => {
|
||||
match futures_util::ready!(fut.as_mut().poll(ctx)) {
|
||||
Ok(last_block) => {
|
||||
trace!("log_query: last_block={}", last_block);
|
||||
self.last_block = Some(last_block);
|
||||
|
||||
// this is okay because we will only enter this state when the filter is
|
||||
// paginatable i.e. from block is set
|
||||
let from_block = self.filter.get_from_block().unwrap();
|
||||
let to_block = from_block + self.page_size;
|
||||
self.from_block = Some(to_block + 1);
|
||||
|
||||
let filter = self
|
||||
.filter
|
||||
.clone()
|
||||
.from_block(from_block)
|
||||
.to_block(to_block);
|
||||
let provider = self.provider;
|
||||
// load first page of logs
|
||||
#[allow(clippy::redundant_async_block)]
|
||||
let fut = Box::pin(async move {
|
||||
tokio::time::sleep(delay).await;
|
||||
provider.get_logs(&filter).await
|
||||
});
|
||||
rewake_with_new_state!(ctx, self, LogQueryState::LoadLogs(fut));
|
||||
}
|
||||
Err(err) => Poll::Ready(Some(Err(LogQueryError::LoadLastBlockError(err)))),
|
||||
}
|
||||
}
|
||||
LogQueryState::LoadLogs(fut) => match futures_util::ready!(fut.as_mut().poll(ctx)) {
|
||||
Ok(logs) => {
|
||||
self.current_logs = VecDeque::from(logs);
|
||||
rewake_with_new_state!(ctx, self, LogQueryState::Consume);
|
||||
}
|
||||
Err(err) => Poll::Ready(Some(Err(LogQueryError::LoadLogsError(err)))),
|
||||
},
|
||||
LogQueryState::Consume => {
|
||||
let log = self.current_logs.pop_front();
|
||||
if log.is_none() {
|
||||
// consumed all the logs
|
||||
if !self.filter.is_paginatable() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
// load new logs if there are still more pages to go through
|
||||
// can safely assume this will always be set in this state
|
||||
let from_block = self.from_block.unwrap();
|
||||
let to_block = from_block + self.page_size;
|
||||
|
||||
// no more pages to load, and everything is consumed
|
||||
// can safely assume this will always be set in this state
|
||||
if from_block > self.last_block.unwrap() {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
// load next page
|
||||
self.from_block = Some(to_block + 1);
|
||||
|
||||
let filter = self
|
||||
.filter
|
||||
.clone()
|
||||
.from_block(from_block)
|
||||
.to_block(to_block);
|
||||
let provider = self.provider;
|
||||
#[allow(clippy::redundant_async_block)]
|
||||
let fut = Box::pin(async move {
|
||||
tokio::time::sleep(delay).await;
|
||||
provider.get_logs(&filter).await
|
||||
});
|
||||
|
||||
rewake_with_new_state!(ctx, self, LogQueryState::LoadLogs(fut));
|
||||
}
|
||||
} else {
|
||||
Poll::Ready(log.map(Ok))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
319
node/log_entry_sync/src/sync_manager/mod.rs
Normal file
319
node/log_entry_sync/src/sync_manager/mod.rs
Normal file
@ -0,0 +1,319 @@
|
||||
use crate::sync_manager::config::LogSyncConfig;
|
||||
use crate::sync_manager::data_cache::DataCache;
|
||||
use crate::sync_manager::log_entry_fetcher::{LogEntryFetcher, LogFetchProgress};
|
||||
use anyhow::{bail, Result};
|
||||
use ethers::prelude::Middleware;
|
||||
use futures::FutureExt;
|
||||
use jsonrpsee::tracing::{debug, error, trace, warn};
|
||||
use shared_types::{ChunkArray, Transaction};
|
||||
use std::fmt::Debug;
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use storage::log_store::Store;
|
||||
use task_executor::{ShutdownReason, TaskExecutor};
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::sync::mpsc::UnboundedReceiver;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
const RETRY_WAIT_MS: u64 = 500;
|
||||
const BROADCAST_CHANNEL_CAPACITY: usize = 16;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum LogSyncEvent {
|
||||
/// Chain reorg detected without any operation yet.
|
||||
ReorgDetected { tx_seq: u64 },
|
||||
/// Transaction reverted in storage.
|
||||
Reverted { tx_seq: u64 },
|
||||
/// Synced a transaction from blockchain
|
||||
TxSynced { tx: Transaction },
|
||||
}
|
||||
|
||||
pub struct LogSyncManager {
|
||||
config: LogSyncConfig,
|
||||
log_fetcher: LogEntryFetcher,
|
||||
store: Arc<RwLock<dyn Store>>,
|
||||
data_cache: DataCache,
|
||||
|
||||
next_tx_seq: u64,
|
||||
|
||||
/// To broadcast events to handle in advance.
|
||||
event_send: broadcast::Sender<LogSyncEvent>,
|
||||
}
|
||||
|
||||
impl LogSyncManager {
|
||||
pub async fn spawn(
|
||||
config: LogSyncConfig,
|
||||
executor: TaskExecutor,
|
||||
store: Arc<RwLock<dyn Store>>,
|
||||
) -> Result<broadcast::Sender<LogSyncEvent>> {
|
||||
let next_tx_seq = store.read().await.next_tx_seq();
|
||||
|
||||
let executor_clone = executor.clone();
|
||||
let mut shutdown_sender = executor.shutdown_sender();
|
||||
|
||||
let (event_send, _) = broadcast::channel(BROADCAST_CHANNEL_CAPACITY);
|
||||
let event_send_cloned = event_send.clone();
|
||||
|
||||
// Spawn the task to sync log entries from the blockchain.
|
||||
executor.spawn(
|
||||
run_and_log(
|
||||
move || {
|
||||
shutdown_sender
|
||||
.try_send(ShutdownReason::Failure("log sync failure"))
|
||||
.expect("shutdown send error")
|
||||
},
|
||||
async move {
|
||||
let log_fetcher = LogEntryFetcher::new(
|
||||
&config.rpc_endpoint_url,
|
||||
config.contract_address,
|
||||
config.log_page_size,
|
||||
config.confirmation_block_count,
|
||||
config.rate_limit_retries,
|
||||
config.timeout_retries,
|
||||
config.initial_backoff,
|
||||
)
|
||||
.await?;
|
||||
let data_cache = DataCache::new(config.cache_config.clone());
|
||||
let mut log_sync_manager = Self {
|
||||
config,
|
||||
log_fetcher,
|
||||
next_tx_seq,
|
||||
store,
|
||||
data_cache,
|
||||
event_send,
|
||||
};
|
||||
|
||||
// Load previous progress from db and check if chain reorg happens after restart.
|
||||
// TODO(zz): Handle reorg instead of return.
|
||||
let start_block_number =
|
||||
match log_sync_manager.store.read().await.get_sync_progress()? {
|
||||
// No previous progress, so just use config.
|
||||
None => log_sync_manager.config.start_block_number,
|
||||
Some((block_number, block_hash)) => {
|
||||
match log_sync_manager
|
||||
.log_fetcher
|
||||
.provider()
|
||||
.get_block(block_number)
|
||||
.await
|
||||
{
|
||||
Ok(Some(b)) => {
|
||||
if b.hash == Some(block_hash) {
|
||||
block_number
|
||||
} else {
|
||||
warn!(
|
||||
"log sync progress check hash fails, \
|
||||
block_number={:?} expect={:?} get={:?}",
|
||||
block_number, block_hash, b.hash
|
||||
);
|
||||
// Assume the blocks before this are not reverted.
|
||||
block_number.saturating_sub(
|
||||
log_sync_manager.config.confirmation_block_count,
|
||||
)
|
||||
}
|
||||
}
|
||||
e => {
|
||||
error!("log sync progress check rpc fails, e={:?}", e);
|
||||
bail!("log sync start error");
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let latest_block_number = log_sync_manager
|
||||
.log_fetcher
|
||||
.provider()
|
||||
.get_block_number()
|
||||
.await?
|
||||
.as_u64();
|
||||
|
||||
// Start watching before recovery to ensure that no log is skipped.
|
||||
// TODO(zz): Rate limit to avoid OOM during recovery.
|
||||
let watch_rx = log_sync_manager
|
||||
.log_fetcher
|
||||
.start_watch(latest_block_number, &executor_clone);
|
||||
let recover_rx = log_sync_manager.log_fetcher.start_recover(
|
||||
start_block_number,
|
||||
// -1 so the recover and watch ranges do not overlap.
|
||||
latest_block_number.wrapping_sub(1),
|
||||
&executor_clone,
|
||||
Duration::from_millis(log_sync_manager.config.recover_query_delay),
|
||||
);
|
||||
log_sync_manager.handle_data(recover_rx).await?;
|
||||
// Syncing `watch_rx` is supposed to block forever.
|
||||
log_sync_manager.handle_data(watch_rx).await?;
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.map(|_| ()),
|
||||
"log_sync",
|
||||
);
|
||||
Ok(event_send_cloned)
|
||||
}
|
||||
|
||||
async fn put_tx(&mut self, tx: Transaction) -> bool {
|
||||
// We call this after process chain reorg, so the sequence number should match.
|
||||
match tx.seq.cmp(&self.next_tx_seq) {
|
||||
std::cmp::Ordering::Less => true,
|
||||
std::cmp::Ordering::Equal => {
|
||||
debug!("log entry sync get entry: {:?}", tx);
|
||||
self.put_tx_inner(tx).await
|
||||
}
|
||||
std::cmp::Ordering::Greater => {
|
||||
error!(
|
||||
"Unexpected transaction seq: next={} get={}",
|
||||
self.next_tx_seq, tx.seq
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `tx_seq` is the first reverted tx seq.
|
||||
async fn process_reverted(&mut self, tx_seq: u64) {
|
||||
warn!("revert for chain reorg: seq={}", tx_seq);
|
||||
{
|
||||
let store = self.store.read().await;
|
||||
for seq in tx_seq..self.next_tx_seq {
|
||||
if matches!(store.check_tx_completed(seq), Ok(true)) {
|
||||
if let Ok(Some(tx)) = store.get_tx_by_seq_number(seq) {
|
||||
// TODO(zz): Skip reading the rear padding data?
|
||||
if let Ok(Some(data)) =
|
||||
store.get_chunks_by_tx_and_index_range(seq, 0, tx.num_entries())
|
||||
{
|
||||
if !self
|
||||
.data_cache
|
||||
.add_data(tx.data_merkle_root, seq, data.data)
|
||||
{
|
||||
// TODO(zz): Data too large. Save to disk?
|
||||
warn!("large reverted data dropped for tx={:?}", tx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let _ = self.event_send.send(LogSyncEvent::ReorgDetected { tx_seq });
|
||||
|
||||
// TODO(zz): `wrapping_sub` here is a hack to handle the case of tx_seq=0.
|
||||
if let Err(e) = self.store.write().await.revert_to(tx_seq.wrapping_sub(1)) {
|
||||
error!("revert_to fails: e={:?}", e);
|
||||
return;
|
||||
}
|
||||
self.next_tx_seq = tx_seq;
|
||||
|
||||
let _ = self.event_send.send(LogSyncEvent::Reverted { tx_seq });
|
||||
}
|
||||
|
||||
async fn handle_data(&mut self, mut rx: UnboundedReceiver<LogFetchProgress>) -> Result<()> {
|
||||
while let Some(data) = rx.recv().await {
|
||||
trace!("handle_data: data={:?}", data);
|
||||
match data {
|
||||
LogFetchProgress::SyncedBlock(progress) => {
|
||||
match self
|
||||
.log_fetcher
|
||||
.provider()
|
||||
.get_block(
|
||||
progress
|
||||
.0
|
||||
.saturating_sub(self.config.confirmation_block_count),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Some(b)) => {
|
||||
if let (Some(block_number), Some(block_hash)) = (b.number, b.hash) {
|
||||
self.store
|
||||
.write()
|
||||
.await
|
||||
.put_sync_progress((block_number.as_u64(), block_hash))?;
|
||||
}
|
||||
}
|
||||
e => {
|
||||
error!("log put progress check rpc fails, e={:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
LogFetchProgress::Transaction(tx) => {
|
||||
if !self.put_tx(tx.clone()).await {
|
||||
// Unexpected error.
|
||||
error!("log sync write error");
|
||||
break;
|
||||
}
|
||||
if let Err(e) = self.event_send.send(LogSyncEvent::TxSynced { tx }) {
|
||||
error!("log sync broadcast error, error={:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
LogFetchProgress::Reverted(reverted) => {
|
||||
self.process_reverted(reverted).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn put_tx_inner(&mut self, tx: Transaction) -> bool {
|
||||
if let Err(e) = self.store.write().await.put_tx(tx.clone()) {
|
||||
error!("put_tx error: e={:?}", e);
|
||||
false
|
||||
} else {
|
||||
if let Some(data) = self.data_cache.pop_data(&tx.data_merkle_root) {
|
||||
let mut store = self.store.write().await;
|
||||
// We are holding a mutable reference of LogSyncManager, so no chain reorg is
|
||||
// possible after put_tx.
|
||||
if let Err(e) = store
|
||||
.put_chunks_with_tx_hash(
|
||||
tx.seq,
|
||||
tx.hash(),
|
||||
ChunkArray {
|
||||
data,
|
||||
start_index: 0,
|
||||
},
|
||||
)
|
||||
.and_then(|_| store.finalize_tx_with_hash(tx.seq, tx.hash()))
|
||||
{
|
||||
error!("put_tx data error: e={:?}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
self.data_cache.garbage_collect(self.next_tx_seq);
|
||||
self.next_tx_seq += 1;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_and_log<R, E>(
|
||||
mut on_error: impl FnMut(),
|
||||
f: impl Future<Output = std::result::Result<R, E>> + Send,
|
||||
) -> Option<R>
|
||||
where
|
||||
E: Debug,
|
||||
{
|
||||
match f.await {
|
||||
Err(e) => {
|
||||
error!("log sync failure: e={:?}", e);
|
||||
on_error();
|
||||
None
|
||||
}
|
||||
Ok(r) => Some(r),
|
||||
}
|
||||
}
|
||||
|
||||
async fn repeat_run_and_log<R, E, F>(f: impl Fn() -> F) -> R
|
||||
where
|
||||
E: Debug,
|
||||
F: Future<Output = std::result::Result<R, E>> + Send,
|
||||
{
|
||||
loop {
|
||||
if let Some(r) = run_and_log(|| {}, f()).await {
|
||||
break r;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) mod config;
|
||||
mod data_cache;
|
||||
mod log_entry_fetcher;
|
||||
mod log_query;
|
22
node/miner/Cargo.toml
Normal file
22
node/miner/Cargo.toml
Normal file
@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "miner"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
network = { path = "../network" }
|
||||
storage = { path = "../storage" }
|
||||
zgs_spec = { path = "../../common/spec" }
|
||||
zgs_seal = { path = "../../common/zgs_seal" }
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
contract-interface = { path = "../../common/contract-interface" }
|
||||
ethereum-types = "0.14"
|
||||
tokio = { version = "1.19.2", features = ["full"] }
|
||||
tracing = "0.1.35"
|
||||
blake2 = "0.10"
|
||||
tiny-keccak = {version="2.0",features = ["keccak"]}
|
||||
rand = "^0.8"
|
||||
ethers = "^2"
|
||||
lazy_static = "1.4"
|
||||
async-trait = "0.1.56"
|
||||
shared_types = { path = "../shared_types" }
|
53
node/miner/src/config.rs
Normal file
53
node/miner/src/config.rs
Normal file
@ -0,0 +1,53 @@
|
||||
use ethereum_types::{Address, H256};
|
||||
use ethers::core::k256::SecretKey;
|
||||
use ethers::middleware::SignerMiddleware;
|
||||
use ethers::providers::Http;
|
||||
use ethers::providers::Middleware;
|
||||
use ethers::providers::Provider;
|
||||
use ethers::signers::LocalWallet;
|
||||
use ethers::signers::Signer;
|
||||
|
||||
pub struct MinerConfig {
|
||||
pub(crate) miner_id: H256,
|
||||
pub(crate) miner_key: H256,
|
||||
pub(crate) rpc_endpoint_url: String,
|
||||
pub(crate) mine_address: Address,
|
||||
pub(crate) flow_address: Address,
|
||||
}
|
||||
|
||||
pub type MineServiceMiddleware = SignerMiddleware<Provider<Http>, LocalWallet>;
|
||||
|
||||
impl MinerConfig {
|
||||
pub fn new(
|
||||
miner_id: Option<H256>,
|
||||
miner_key: Option<H256>,
|
||||
rpc_endpoint_url: String,
|
||||
mine_address: Address,
|
||||
flow_address: Address,
|
||||
) -> Option<MinerConfig> {
|
||||
match (miner_id, miner_key) {
|
||||
(Some(miner_id), Some(miner_key)) => Some(MinerConfig {
|
||||
miner_id,
|
||||
miner_key,
|
||||
rpc_endpoint_url,
|
||||
mine_address,
|
||||
flow_address,
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn make_provider(&self) -> Result<MineServiceMiddleware, String> {
|
||||
let provider = Provider::<Http>::try_from(&self.rpc_endpoint_url)
|
||||
.map_err(|e| format!("Can not parse blockchain endpoint: {:?}", e))?;
|
||||
let chain_id = provider
|
||||
.get_chainid()
|
||||
.await
|
||||
.map_err(|e| format!("Unable to get chain_id: {:?}", e))?;
|
||||
let secret_key = SecretKey::from_bytes(self.miner_key.as_ref().into())
|
||||
.map_err(|e| format!("Cannot parse private key: {:?}", e))?;
|
||||
let signer = LocalWallet::from(secret_key).with_chain_id(chain_id.as_u64());
|
||||
let middleware = SignerMiddleware::new(provider, signer);
|
||||
Ok(middleware)
|
||||
}
|
||||
}
|
19
node/miner/src/lib.rs
Normal file
19
node/miner/src/lib.rs
Normal file
@ -0,0 +1,19 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
extern crate contract_interface;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod config;
|
||||
mod loader;
|
||||
mod mine;
|
||||
pub mod pora;
|
||||
mod sealer;
|
||||
mod service;
|
||||
mod submitter;
|
||||
mod watcher;
|
||||
|
||||
pub use config::MinerConfig;
|
||||
pub use loader::PoraLoader;
|
||||
pub use mine::CustomMineRange;
|
||||
pub use service::{MineService, MinerMessage};
|
20
node/miner/src/loader.rs
Normal file
20
node/miner/src/loader.rs
Normal file
@ -0,0 +1,20 @@
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
use storage::log_store::{MineLoadChunk, Store};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[async_trait]
|
||||
pub trait PoraLoader: Send + Sync {
|
||||
async fn load_sealed_data(&self, index: u64) -> Option<MineLoadChunk>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PoraLoader for Arc<RwLock<dyn Store>> {
|
||||
async fn load_sealed_data(&self, chunk_index: u64) -> Option<MineLoadChunk> {
|
||||
let store = &*self.read().await;
|
||||
match store.flow().load_sealed_data(chunk_index) {
|
||||
Ok(Some(chunk)) => Some(chunk),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
173
node/miner/src/mine.rs
Normal file
173
node/miner/src/mine.rs
Normal file
@ -0,0 +1,173 @@
|
||||
use contract_interface::zgs_flow::MineContext;
|
||||
use ethereum_types::{H256, U256};
|
||||
use rand::{self, Rng};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::{broadcast, mpsc};
|
||||
|
||||
use zgs_spec::{SECTORS_PER_LOAD, SECTORS_PER_MAX_MINING_RANGE, SECTORS_PER_PRICING};
|
||||
|
||||
use crate::{
|
||||
pora::{AnswerWithoutProof, Miner},
|
||||
watcher::MineContextMessage,
|
||||
MinerConfig, MinerMessage, PoraLoader,
|
||||
};
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct PoraService {
|
||||
mine_context_receiver: mpsc::UnboundedReceiver<MineContextMessage>,
|
||||
mine_answer_sender: mpsc::UnboundedSender<AnswerWithoutProof>,
|
||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||
loader: Arc<dyn PoraLoader>,
|
||||
|
||||
puzzle: Option<PoraPuzzle>,
|
||||
mine_range: CustomMineRange,
|
||||
miner_id: H256,
|
||||
}
|
||||
|
||||
struct PoraPuzzle {
|
||||
context: MineContext,
|
||||
target_quality: U256,
|
||||
}
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct CustomMineRange {
|
||||
start_position: Option<u64>,
|
||||
end_position: Option<u64>,
|
||||
}
|
||||
|
||||
impl CustomMineRange {
|
||||
#[inline]
|
||||
fn to_valid_range(self, context: &MineContext) -> Option<(u64, u64)> {
|
||||
let self_start_position = self.start_position?;
|
||||
let self_end_position = self.end_position?;
|
||||
|
||||
if self_start_position >= self_end_position {
|
||||
return None;
|
||||
}
|
||||
let minable_length =
|
||||
(context.flow_length.as_u64() / SECTORS_PER_LOAD as u64) * SECTORS_PER_LOAD as u64;
|
||||
|
||||
let mining_length = std::cmp::min(minable_length, SECTORS_PER_MAX_MINING_RANGE as u64);
|
||||
|
||||
let start_position = std::cmp::min(self_start_position, minable_length - mining_length);
|
||||
let start_position =
|
||||
(start_position / SECTORS_PER_PRICING as u64) * SECTORS_PER_PRICING as u64;
|
||||
Some((start_position, mining_length))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn is_covered(&self, recall_position: u64) -> Option<bool> {
|
||||
let self_start_position = self.start_position?;
|
||||
let self_end_position = self.end_position?;
|
||||
|
||||
if self.start_position >= self.end_position {
|
||||
return Some(false);
|
||||
}
|
||||
Some(
|
||||
self_start_position <= recall_position + SECTORS_PER_LOAD as u64
|
||||
|| self_end_position > recall_position,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl PoraService {
|
||||
pub fn spawn(
|
||||
executor: TaskExecutor,
|
||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||
mine_context_receiver: mpsc::UnboundedReceiver<MineContextMessage>,
|
||||
loader: Arc<dyn PoraLoader>,
|
||||
config: &MinerConfig,
|
||||
) -> mpsc::UnboundedReceiver<AnswerWithoutProof> {
|
||||
let (mine_answer_sender, mine_answer_receiver) =
|
||||
mpsc::unbounded_channel::<AnswerWithoutProof>();
|
||||
let mine_range = CustomMineRange {
|
||||
start_position: Some(0),
|
||||
end_position: Some(u64::MAX),
|
||||
};
|
||||
let pora = PoraService {
|
||||
mine_context_receiver,
|
||||
mine_answer_sender,
|
||||
msg_recv,
|
||||
puzzle: None,
|
||||
mine_range,
|
||||
miner_id: config.miner_id,
|
||||
loader,
|
||||
};
|
||||
executor.spawn(async move { Box::pin(pora.start()).await }, "pora_master");
|
||||
mine_answer_receiver
|
||||
}
|
||||
|
||||
async fn start(mut self) {
|
||||
let mut mining_enabled = true;
|
||||
let mut channel_opened = true;
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
v = self.msg_recv.recv(), if channel_opened => {
|
||||
trace!("PoraService receives msg={:?}", v);
|
||||
match v {
|
||||
Ok(MinerMessage::ToggleMining(enable)) => {
|
||||
info!("Toggle mining: {}", if enable { "on" } else { "off" });
|
||||
mining_enabled = enable;
|
||||
}
|
||||
Ok(MinerMessage::SetStartPosition(pos)) => {
|
||||
info!("Change start position to: {:?}", pos);
|
||||
self.mine_range.start_position = pos;
|
||||
}
|
||||
Ok(MinerMessage::SetEndPosition(pos)) => {
|
||||
info!("Change end position to: {:?}", pos);
|
||||
self.mine_range.end_position = pos;
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed)=>{
|
||||
warn!("Unexpected: Mine service config channel closed.");
|
||||
channel_opened = false;
|
||||
}
|
||||
Err(_)=>{
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
maybe_msg = self.mine_context_receiver.recv() => {
|
||||
trace!("PoraService receives context={:?}", maybe_msg);
|
||||
if let Some(msg) = maybe_msg {
|
||||
debug!("Update mine service: {:?}", msg);
|
||||
self.puzzle = msg.map(|(context, target_quality)| PoraPuzzle {
|
||||
context, target_quality
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
_ = async {}, if mining_enabled && self.as_miner().is_some() => {
|
||||
let nonce = H256(rand::thread_rng().gen());
|
||||
let miner = self.as_miner().unwrap();
|
||||
if let Some(answer) = miner.iteration(nonce).await{
|
||||
debug!("Hit Pora answer {:?}", answer);
|
||||
if self.mine_answer_sender.send(answer).is_err() {
|
||||
warn!("Mine submitter channel closed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn as_miner(&self) -> Option<Miner> {
|
||||
match self.puzzle.as_ref() {
|
||||
Some(puzzle) => self.mine_range.to_valid_range(&puzzle.context).map(
|
||||
|(start_position, mining_length)| Miner {
|
||||
start_position,
|
||||
mining_length,
|
||||
miner_id: &self.miner_id,
|
||||
custom_mine_range: &self.mine_range,
|
||||
context: &puzzle.context,
|
||||
target_quality: &puzzle.target_quality,
|
||||
loader: &*self.loader,
|
||||
},
|
||||
),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
165
node/miner/src/pora.rs
Normal file
165
node/miner/src/pora.rs
Normal file
@ -0,0 +1,165 @@
|
||||
use crate::{CustomMineRange, PoraLoader};
|
||||
use blake2::{Blake2b512, Digest};
|
||||
use contract_interface::zgs_flow::MineContext;
|
||||
use ethereum_types::{H256, U256};
|
||||
use zgs_spec::{BYTES_PER_SCRATCHPAD, BYTES_PER_SEAL, SECTORS_PER_LOAD, SECTORS_PER_SEAL};
|
||||
use storage::log_store::MineLoadChunk;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
pub const BLAKE2B_OUTPUT_BYTES: usize = 64;
|
||||
pub const KECCAK256_OUTPUT_BYTES: usize = 32;
|
||||
|
||||
fn keccak(input: impl AsRef<[u8]>) -> [u8; KECCAK256_OUTPUT_BYTES] {
|
||||
let mut hasher = Keccak::v256();
|
||||
let mut output = [0u8; 32];
|
||||
hasher.update(input.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
output
|
||||
}
|
||||
|
||||
pub(crate) struct Miner<'a> {
|
||||
pub start_position: u64,
|
||||
pub mining_length: u64,
|
||||
pub miner_id: &'a H256,
|
||||
pub context: &'a MineContext,
|
||||
pub target_quality: &'a U256,
|
||||
pub loader: &'a dyn PoraLoader,
|
||||
pub custom_mine_range: &'a CustomMineRange,
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct AnswerWithoutProof {
|
||||
pub context_digest: H256,
|
||||
pub context_flow_root: H256,
|
||||
pub nonce: H256,
|
||||
pub miner_id: H256,
|
||||
pub start_position: u64,
|
||||
pub mining_length: u64,
|
||||
pub recall_position: u64,
|
||||
pub seal_offset: usize,
|
||||
pub sealed_data: [u8; BYTES_PER_SEAL],
|
||||
}
|
||||
|
||||
impl<'a> Miner<'a> {
|
||||
pub async fn iteration(&self, nonce: H256) -> Option<AnswerWithoutProof> {
|
||||
let (scratch_pad, recall_seed) = self.make_scratch_pad(&nonce);
|
||||
|
||||
let (_, recall_offset) = U256::from_big_endian(&recall_seed)
|
||||
.div_mod(U256::from((self.mining_length as usize) / SECTORS_PER_LOAD));
|
||||
let recall_offset = recall_offset.as_u64();
|
||||
if !self
|
||||
.custom_mine_range
|
||||
.is_covered(self.start_position + recall_offset * SECTORS_PER_LOAD as u64)
|
||||
.unwrap()
|
||||
{
|
||||
trace!(
|
||||
"recall offset not in range: recall_offset={}, range={:?}",
|
||||
recall_offset,
|
||||
self.custom_mine_range
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
let MineLoadChunk {
|
||||
loaded_chunk,
|
||||
avalibilities,
|
||||
} = self
|
||||
.loader
|
||||
.load_sealed_data(self.start_position / SECTORS_PER_LOAD as u64 + recall_offset)
|
||||
.await?;
|
||||
|
||||
let scratch_pad: [[u8; BYTES_PER_SEAL]; BYTES_PER_SCRATCHPAD / BYTES_PER_SEAL] =
|
||||
unsafe { std::mem::transmute(scratch_pad) };
|
||||
|
||||
for ((idx, mut sealed_data), scratch_pad) in loaded_chunk
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.zip(scratch_pad.iter().cycle())
|
||||
.zip(avalibilities.into_iter())
|
||||
.filter_map(|(data, avaliable)| avaliable.then_some(data))
|
||||
{
|
||||
// Rust can optimize this loop well.
|
||||
for (x, y) in sealed_data.iter_mut().zip(scratch_pad.iter()) {
|
||||
*x ^= y;
|
||||
}
|
||||
|
||||
let quality = self.pora(idx, &nonce, &sealed_data);
|
||||
if &quality <= self.target_quality {
|
||||
debug!("Find a PoRA valid answer, quality: {}", quality);
|
||||
// Undo mix data when find a valid solition
|
||||
for (x, y) in sealed_data.iter_mut().zip(scratch_pad.iter()) {
|
||||
*x ^= y;
|
||||
}
|
||||
return Some(AnswerWithoutProof {
|
||||
context_digest: H256::from(self.context.digest),
|
||||
context_flow_root: self.context.flow_root.into(),
|
||||
nonce,
|
||||
miner_id: *self.miner_id,
|
||||
start_position: self.start_position,
|
||||
mining_length: self.mining_length,
|
||||
recall_position: self.start_position
|
||||
+ recall_offset * SECTORS_PER_LOAD as u64
|
||||
+ idx as u64 * SECTORS_PER_SEAL as u64,
|
||||
seal_offset: idx,
|
||||
sealed_data,
|
||||
});
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn make_scratch_pad(
|
||||
&self,
|
||||
nonce: &H256,
|
||||
) -> ([u8; BYTES_PER_SCRATCHPAD], [u8; KECCAK256_OUTPUT_BYTES]) {
|
||||
let mut digest: [u8; BLAKE2B_OUTPUT_BYTES] = {
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update(self.miner_id);
|
||||
hasher.update(nonce);
|
||||
hasher.update(self.context.digest);
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.start_position.to_be_bytes());
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.mining_length.to_be_bytes());
|
||||
|
||||
hasher.finalize().into()
|
||||
};
|
||||
|
||||
let mut scratch_pad =
|
||||
[[0u8; BLAKE2B_OUTPUT_BYTES]; BYTES_PER_SCRATCHPAD / BLAKE2B_OUTPUT_BYTES];
|
||||
for scratch_pad_cell in scratch_pad.iter_mut() {
|
||||
digest = Blake2b512::new().chain_update(digest).finalize().into();
|
||||
*scratch_pad_cell = digest;
|
||||
}
|
||||
|
||||
let scratch_pad: [u8; BYTES_PER_SCRATCHPAD] = unsafe { std::mem::transmute(scratch_pad) };
|
||||
let recall_seed: [u8; KECCAK256_OUTPUT_BYTES] = keccak(digest);
|
||||
|
||||
(scratch_pad, recall_seed)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn pora(&self, seal_index: usize, nonce: &H256, mixed_data: &[u8; BYTES_PER_SEAL]) -> U256 {
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(seal_index.to_be_bytes());
|
||||
|
||||
hasher.update(self.miner_id);
|
||||
hasher.update(nonce);
|
||||
hasher.update(self.context.digest);
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.start_position.to_be_bytes());
|
||||
|
||||
hasher.update([0u8; 24]);
|
||||
hasher.update(self.mining_length.to_be_bytes());
|
||||
|
||||
hasher.update([0u8; 64]);
|
||||
hasher.update(mixed_data);
|
||||
|
||||
let digest = hasher.finalize();
|
||||
|
||||
U256::from_big_endian(&digest[0..32])
|
||||
}
|
||||
}
|
213
node/miner/src/sealer.rs
Normal file
213
node/miner/src/sealer.rs
Normal file
@ -0,0 +1,213 @@
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
use ethereum_types::H256;
|
||||
use tokio::{
|
||||
sync::RwLock,
|
||||
time::{sleep, Duration, Instant},
|
||||
};
|
||||
|
||||
use contract_interface::{EpochRangeWithContextDigest, ZgsFlow};
|
||||
use zgs_spec::SECTORS_PER_SEAL;
|
||||
use storage::{
|
||||
error::Result,
|
||||
log_store::{SealAnswer, SealTask, Store},
|
||||
};
|
||||
use task_executor::TaskExecutor;
|
||||
|
||||
use crate::config::{MineServiceMiddleware, MinerConfig};
|
||||
|
||||
const DB_QUERY_PERIOD_ON_NO_TASK: u64 = 1;
|
||||
const DB_QUERY_PERIOD_ON_ERROR: u64 = 5;
|
||||
const CHAIN_STATUS_QUERY_PERIOD: u64 = 5;
|
||||
|
||||
pub struct Sealer {
|
||||
flow_contract: ZgsFlow<MineServiceMiddleware>,
|
||||
store: Arc<RwLock<dyn Store>>,
|
||||
context_cache: BTreeMap<u128, EpochRangeWithContextDigest>,
|
||||
last_context_flow_length: u64,
|
||||
miner_id: H256,
|
||||
}
|
||||
|
||||
impl Sealer {
|
||||
pub fn spawn(
|
||||
executor: TaskExecutor,
|
||||
provider: Arc<MineServiceMiddleware>,
|
||||
store: Arc<RwLock<dyn Store>>,
|
||||
config: &MinerConfig,
|
||||
) {
|
||||
let flow_contract = ZgsFlow::new(config.flow_address, provider);
|
||||
let sealer = Sealer {
|
||||
flow_contract,
|
||||
store,
|
||||
context_cache: Default::default(),
|
||||
last_context_flow_length: 0,
|
||||
miner_id: config.miner_id,
|
||||
};
|
||||
|
||||
executor.spawn(async move { Box::pin(sealer.start()).await }, "data_sealer");
|
||||
}
|
||||
|
||||
async fn start(mut self) {
|
||||
let db_checker_throttle = sleep(Duration::from_secs(0));
|
||||
tokio::pin!(db_checker_throttle);
|
||||
|
||||
let contract_checker_throttle = sleep(Duration::from_secs(0));
|
||||
tokio::pin!(contract_checker_throttle);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
() = &mut contract_checker_throttle, if !contract_checker_throttle.is_elapsed() => {
|
||||
}
|
||||
|
||||
() = &mut db_checker_throttle, if !db_checker_throttle.is_elapsed() => {
|
||||
}
|
||||
|
||||
_ = async {}, if contract_checker_throttle.is_elapsed() => {
|
||||
if let Err(err) = self.update_flow_length().await{
|
||||
warn!("Fetch onchain context failed {:?}", err);
|
||||
}
|
||||
contract_checker_throttle.as_mut().reset(Instant::now() + Duration::from_secs(CHAIN_STATUS_QUERY_PERIOD));
|
||||
}
|
||||
|
||||
_ = async {}, if db_checker_throttle.is_elapsed() => {
|
||||
match self.seal_iteration().await {
|
||||
Ok(true) => {},
|
||||
Ok(false) => {db_checker_throttle.as_mut().reset(Instant::now() + Duration::from_secs(DB_QUERY_PERIOD_ON_NO_TASK));}
|
||||
Err(err) => {
|
||||
warn!("Seal iteration failed {:?}", err);
|
||||
db_checker_throttle.as_mut().reset(Instant::now() + Duration::from_secs(DB_QUERY_PERIOD_ON_ERROR));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_flow_length(&mut self) -> Result<()> {
|
||||
let recent_context = self.flow_contract.make_context_with_result().call().await?;
|
||||
debug!(target: "seal", "Recent context is {:?}", recent_context);
|
||||
|
||||
let recent_flow_length = recent_context.flow_length.as_u64();
|
||||
if self.last_context_flow_length < recent_flow_length {
|
||||
let epoch_range = self
|
||||
.flow_contract
|
||||
.get_epoch_range(recent_context.digest)
|
||||
.call()
|
||||
.await?;
|
||||
self.context_cache.insert(
|
||||
epoch_range.start,
|
||||
EpochRangeWithContextDigest {
|
||||
start: epoch_range.start,
|
||||
end: epoch_range.end,
|
||||
digest: recent_context.digest,
|
||||
},
|
||||
);
|
||||
self.last_context_flow_length = recent_flow_length;
|
||||
info!(target: "seal", "Update sealable flow length: {}", recent_flow_length)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fetch_context(&mut self, seal_index: u64) -> Result<Option<(H256, u64)>> {
|
||||
let last_entry = ((seal_index as usize + 1) * SECTORS_PER_SEAL - 1) as u128;
|
||||
if self.last_context_flow_length <= last_entry as u64 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let Some((_, context)) = self.context_cache.range(..=last_entry).next_back() {
|
||||
if context.start <= last_entry && context.end > last_entry {
|
||||
return Ok(Some((
|
||||
H256(context.digest),
|
||||
context.end as u64 / SECTORS_PER_SEAL as u64,
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
let context = match self
|
||||
.flow_contract
|
||||
.query_context_at_position(last_entry)
|
||||
.call()
|
||||
.await
|
||||
{
|
||||
Ok(context) => context,
|
||||
Err(err) => {
|
||||
info!("Error when fetch entries {:?}", err);
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
info!(
|
||||
target:"seal", "Fetch new context: range {} -> {}",
|
||||
context.start, context.end
|
||||
);
|
||||
self.context_cache.insert(context.start, context.clone());
|
||||
|
||||
Ok(Some((
|
||||
H256(context.digest),
|
||||
context.end as u64 / SECTORS_PER_SEAL as u64,
|
||||
)))
|
||||
}
|
||||
|
||||
async fn fetch_task(&self) -> Result<Option<Vec<SealTask>>> {
|
||||
let seal_index_max = self.last_context_flow_length as usize / SECTORS_PER_SEAL;
|
||||
self.store
|
||||
.read()
|
||||
.await
|
||||
.flow()
|
||||
.pull_seal_chunk(seal_index_max)
|
||||
}
|
||||
|
||||
async fn submit_answer(&self, answers: Vec<SealAnswer>) -> Result<()> {
|
||||
self.store
|
||||
.write()
|
||||
.await
|
||||
.flow_mut()
|
||||
.submit_seal_result(answers)
|
||||
}
|
||||
|
||||
async fn seal_iteration(&mut self) -> Result<bool> {
|
||||
let tasks = match self.fetch_task().await? {
|
||||
Some(tasks) if !tasks.is_empty() => tasks,
|
||||
_ => {
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Get seal tasks at seal index {:?}",
|
||||
tasks.iter().map(|x| x.seal_index).collect::<Vec<u64>>()
|
||||
);
|
||||
|
||||
let mut answers = Vec::with_capacity(tasks.len());
|
||||
|
||||
for task in tasks {
|
||||
let (context_digest, end_seal) =
|
||||
if let Some(context) = self.fetch_context(task.seal_index).await? {
|
||||
context
|
||||
} else {
|
||||
trace!(target: "seal", "Index {} is not ready for seal", task.seal_index);
|
||||
continue;
|
||||
};
|
||||
let mut data = task.non_sealed_data;
|
||||
zgs_seal::seal(
|
||||
&mut data,
|
||||
&self.miner_id,
|
||||
&context_digest,
|
||||
task.seal_index * SECTORS_PER_SEAL as u64,
|
||||
);
|
||||
answers.push(SealAnswer {
|
||||
seal_index: task.seal_index,
|
||||
version: task.version,
|
||||
sealed_data: data,
|
||||
miner_id: self.miner_id,
|
||||
seal_context: context_digest,
|
||||
context_end_seal: end_seal,
|
||||
});
|
||||
}
|
||||
|
||||
self.submit_answer(answers).await?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
}
|
62
node/miner/src/service.rs
Normal file
62
node/miner/src/service.rs
Normal file
@ -0,0 +1,62 @@
|
||||
use crate::sealer::Sealer;
|
||||
use crate::submitter::Submitter;
|
||||
use crate::{config::MinerConfig, mine::PoraService, watcher::MineContextWatcher};
|
||||
use network::NetworkMessage;
|
||||
use std::sync::Arc;
|
||||
use storage::log_store::Store;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum MinerMessage {
|
||||
/// Enable / Disable Mining
|
||||
ToggleMining(bool),
|
||||
|
||||
/// Change mining range
|
||||
SetStartPosition(Option<u64>),
|
||||
SetEndPosition(Option<u64>),
|
||||
}
|
||||
|
||||
pub struct MineService;
|
||||
|
||||
impl MineService {
|
||||
pub async fn spawn(
|
||||
executor: task_executor::TaskExecutor,
|
||||
_network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||
config: MinerConfig,
|
||||
store: Arc<RwLock<dyn Store>>,
|
||||
) -> Result<broadcast::Sender<MinerMessage>, String> {
|
||||
let provider = Arc::new(config.make_provider().await?);
|
||||
|
||||
let (msg_send, msg_recv) = broadcast::channel(1024);
|
||||
|
||||
let mine_context_receiver = MineContextWatcher::spawn(
|
||||
executor.clone(),
|
||||
msg_recv.resubscribe(),
|
||||
provider.clone(),
|
||||
&config,
|
||||
);
|
||||
|
||||
let mine_answer_receiver = PoraService::spawn(
|
||||
executor.clone(),
|
||||
msg_recv.resubscribe(),
|
||||
mine_context_receiver,
|
||||
Arc::new(store.clone()),
|
||||
&config,
|
||||
);
|
||||
|
||||
Submitter::spawn(
|
||||
executor.clone(),
|
||||
mine_answer_receiver,
|
||||
provider.clone(),
|
||||
store.clone(),
|
||||
&config,
|
||||
);
|
||||
|
||||
Sealer::spawn(executor, provider, store, &config);
|
||||
|
||||
debug!("Starting miner service");
|
||||
|
||||
Ok(msg_send)
|
||||
}
|
||||
}
|
128
node/miner/src/submitter.rs
Normal file
128
node/miner/src/submitter.rs
Normal file
@ -0,0 +1,128 @@
|
||||
use contract_interface::PoraAnswer;
|
||||
use contract_interface::{ZgsFlow, PoraMine};
|
||||
use ethers::providers::PendingTransaction;
|
||||
use shared_types::FlowRangeProof;
|
||||
use std::sync::Arc;
|
||||
use storage::log_store::Store;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::{mpsc, RwLock};
|
||||
|
||||
use crate::config::{MineServiceMiddleware, MinerConfig};
|
||||
use crate::pora::AnswerWithoutProof;
|
||||
|
||||
use zgs_spec::SECTORS_PER_SEAL;
|
||||
|
||||
const SUBMISSION_RETIES: usize = 3;
|
||||
|
||||
pub struct Submitter {
|
||||
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
|
||||
mine_contract: PoraMine<MineServiceMiddleware>,
|
||||
flow_contract: ZgsFlow<MineServiceMiddleware>,
|
||||
store: Arc<RwLock<dyn Store>>,
|
||||
}
|
||||
|
||||
impl Submitter {
|
||||
pub fn spawn(
|
||||
executor: TaskExecutor,
|
||||
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
|
||||
provider: Arc<MineServiceMiddleware>,
|
||||
store: Arc<RwLock<dyn Store>>,
|
||||
config: &MinerConfig,
|
||||
) {
|
||||
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
||||
let flow_contract = ZgsFlow::new(config.flow_address, provider);
|
||||
|
||||
let submitter = Submitter {
|
||||
mine_answer_receiver,
|
||||
mine_contract,
|
||||
flow_contract,
|
||||
store,
|
||||
};
|
||||
executor.spawn(
|
||||
async move { Box::pin(submitter.start()).await },
|
||||
"mine_answer_submitter",
|
||||
);
|
||||
}
|
||||
|
||||
async fn start(mut self) {
|
||||
loop {
|
||||
match self.mine_answer_receiver.recv().await {
|
||||
Some(answer) => {
|
||||
if let Err(e) = self.submit_answer(answer).await {
|
||||
warn!(e)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("Mine submitter stopped because mine answer channel is closed.");
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async fn submit_answer(&mut self, mine_answer: AnswerWithoutProof) -> Result<(), String> {
|
||||
debug!("submit answer: {:?}", mine_answer);
|
||||
let sealed_context_digest = self
|
||||
.flow_contract
|
||||
.query_context_at_position(
|
||||
(mine_answer.recall_position + SECTORS_PER_SEAL as u64 - 1) as u128,
|
||||
)
|
||||
.call()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to fetch sealed contest digest: {:?}", e))?;
|
||||
debug!("Fetch sealed context: {:?}", sealed_context_digest);
|
||||
|
||||
let flow_proof = self
|
||||
.store
|
||||
.read()
|
||||
.await
|
||||
.get_proof_at_root(
|
||||
&mine_answer.context_flow_root,
|
||||
mine_answer.recall_position,
|
||||
SECTORS_PER_SEAL as u64,
|
||||
)
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let answer = PoraAnswer {
|
||||
context_digest: mine_answer.context_digest.0,
|
||||
nonce: mine_answer.nonce.0,
|
||||
miner_id: mine_answer.miner_id.0,
|
||||
start_position: mine_answer.start_position.into(),
|
||||
mine_length: mine_answer.mining_length.into(),
|
||||
recall_position: mine_answer.recall_position.into(),
|
||||
seal_offset: mine_answer.seal_offset.into(),
|
||||
sealed_context_digest: sealed_context_digest.digest, // TODO(kevin): wait for implementation of data sealing.
|
||||
sealed_data: unsafe { std::mem::transmute(mine_answer.sealed_data) },
|
||||
merkle_proof: flow_proof_to_pora_merkle_proof(flow_proof),
|
||||
};
|
||||
trace!("submit_answer: answer={:?}", answer);
|
||||
|
||||
let submission_call = self.mine_contract.submit(answer).legacy();
|
||||
let pending_transaction: PendingTransaction<'_, _> = submission_call
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Fail to send mine answer transaction: {:?}", e))?;
|
||||
|
||||
let receipt = pending_transaction
|
||||
.retries(SUBMISSION_RETIES)
|
||||
.await
|
||||
.map_err(|e| format!("Fail to execute mine answer transaction: {:?}", e))?
|
||||
.ok_or(format!(
|
||||
"Mine answer transaction dropped after {} retires",
|
||||
SUBMISSION_RETIES
|
||||
))?;
|
||||
|
||||
info!("Submit PoRA sucess");
|
||||
debug!("Receipt: {:?}", receipt);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: The conversion will be simpler if we optimize range proof structure.
|
||||
fn flow_proof_to_pora_merkle_proof(flow_proof: FlowRangeProof) -> Vec<[u8; 32]> {
|
||||
let depth_in_sealed_data = SECTORS_PER_SEAL.trailing_zeros() as usize;
|
||||
let full_proof: Vec<[u8; 32]> = flow_proof.left_proof.lemma().iter().map(|h| h.0).collect();
|
||||
// Exclude `item`, the nodes in the sealed data subtree, and `root`.
|
||||
full_proof[depth_in_sealed_data + 1..full_proof.len() - 1].to_vec()
|
||||
}
|
142
node/miner/src/watcher.rs
Normal file
142
node/miner/src/watcher.rs
Normal file
@ -0,0 +1,142 @@
|
||||
#![allow(unused)]
|
||||
|
||||
use contract_interface::{zgs_flow::MineContext, ZgsFlow, PoraMine};
|
||||
use ethereum_types::{Address, H256, U256};
|
||||
use ethers::{
|
||||
contract::Contract,
|
||||
providers::{JsonRpcClient, Middleware, Provider, StreamExt},
|
||||
types::BlockId,
|
||||
};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::{
|
||||
sync::{broadcast, mpsc},
|
||||
time::{sleep, Instant, Sleep},
|
||||
try_join,
|
||||
};
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{ops::DerefMut, str::FromStr};
|
||||
|
||||
use crate::{config::MineServiceMiddleware, MinerConfig, MinerMessage};
|
||||
|
||||
pub type MineContextMessage = Option<(MineContext, U256)>;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref EMPTY_HASH: H256 =
|
||||
H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap();
|
||||
}
|
||||
|
||||
pub struct MineContextWatcher {
|
||||
provider: Arc<MineServiceMiddleware>,
|
||||
flow_contract: ZgsFlow<MineServiceMiddleware>,
|
||||
mine_contract: PoraMine<MineServiceMiddleware>,
|
||||
|
||||
mine_context_sender: mpsc::UnboundedSender<MineContextMessage>,
|
||||
last_report: MineContextMessage,
|
||||
|
||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||
}
|
||||
|
||||
impl MineContextWatcher {
|
||||
pub fn spawn(
|
||||
executor: TaskExecutor,
|
||||
msg_recv: broadcast::Receiver<MinerMessage>,
|
||||
provider: Arc<MineServiceMiddleware>,
|
||||
config: &MinerConfig,
|
||||
) -> mpsc::UnboundedReceiver<MineContextMessage> {
|
||||
let provider = provider;
|
||||
|
||||
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
||||
let flow_contract = ZgsFlow::new(config.flow_address, provider.clone());
|
||||
|
||||
let (mine_context_sender, mine_context_receiver) =
|
||||
mpsc::unbounded_channel::<MineContextMessage>();
|
||||
let watcher = MineContextWatcher {
|
||||
provider,
|
||||
flow_contract,
|
||||
mine_contract,
|
||||
mine_context_sender,
|
||||
msg_recv,
|
||||
last_report: None,
|
||||
};
|
||||
executor.spawn(
|
||||
async move { Box::pin(watcher.start()).await },
|
||||
"mine_context_watcher",
|
||||
);
|
||||
mine_context_receiver
|
||||
}
|
||||
|
||||
async fn start(mut self) {
|
||||
let mut mining_enabled = true;
|
||||
let mut channel_opened = true;
|
||||
|
||||
let mut mining_throttle = sleep(Duration::from_secs(0));
|
||||
tokio::pin!(mining_throttle);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
v = self.msg_recv.recv(), if channel_opened => {
|
||||
match v {
|
||||
Ok(MinerMessage::ToggleMining(enable)) => {
|
||||
mining_enabled = enable;
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => {
|
||||
channel_opened = false;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
() = &mut mining_throttle, if !mining_throttle.is_elapsed() => {
|
||||
}
|
||||
|
||||
_ = async {}, if mining_enabled && mining_throttle.is_elapsed() => {
|
||||
mining_throttle.as_mut().reset(Instant::now() + Duration::from_secs(1));
|
||||
if let Err(err) = self.query_recent_context().await {
|
||||
warn!(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn query_recent_context(&mut self) -> Result<(), String> {
|
||||
// let mut watcher = self
|
||||
// .provider
|
||||
// .watch_blocks()
|
||||
// .await
|
||||
// .expect("should success")
|
||||
// .stream();
|
||||
// watcher.next().await
|
||||
let context_call = self.flow_contract.make_context_with_result();
|
||||
let epoch_call = self.mine_contract.last_mined_epoch();
|
||||
let quality_call = self.mine_contract.target_quality();
|
||||
|
||||
let (context, epoch, quality) =
|
||||
try_join!(context_call.call(), epoch_call.call(), quality_call.call())
|
||||
.map_err(|e| format!("Failed to query mining context: {:?}", e))?;
|
||||
let report = if context.epoch > epoch && context.digest != EMPTY_HASH.0 {
|
||||
if context.block_digest == [0; 32] {
|
||||
warn!("Mine Context is not updated on time.");
|
||||
None
|
||||
} else {
|
||||
Some((context, quality))
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if report != self.last_report {
|
||||
self.mine_context_sender
|
||||
.send(report.clone())
|
||||
.map_err(|e| format!("Failed to send out the most recent mine context: {:?}", e))?;
|
||||
}
|
||||
self.last_report = report;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
55
node/network/Cargo.toml
Normal file
55
node/network/Cargo.toml
Normal file
@ -0,0 +1,55 @@
|
||||
[package]
|
||||
name = "network"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
directory = { path = "../../common/directory" }
|
||||
dirs = "4.0.0"
|
||||
discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] }
|
||||
error-chain = "0.12.4"
|
||||
eth2_ssz = "0.4.0"
|
||||
eth2_ssz_derive = "0.3.0"
|
||||
eth2_ssz_types = "0.2.1"
|
||||
ethereum-types = "0.14"
|
||||
fnv = "1.0.7"
|
||||
futures = "0.3.21"
|
||||
hashset_delay = { path = "../../common/hashset_delay" }
|
||||
hex = "0.4.3"
|
||||
zgs_version = { path = "../../common/zgs_version" }
|
||||
lazy_static = "1.4.0"
|
||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||
lru = "0.7.7"
|
||||
parking_lot = "0.12.1"
|
||||
prometheus-client = "0.16.0"
|
||||
rand = "0.8.5"
|
||||
regex = "1.5.6"
|
||||
serde = { version = "1.0.137", features = ["derive"] }
|
||||
serde_derive = "1.0.137"
|
||||
sha2 = "0.10.2"
|
||||
shared_types = { path = "../shared_types" }
|
||||
smallvec = "1.8.0"
|
||||
snap = "1.0.5"
|
||||
strum = { version = "0.24.1", features = ["derive"] }
|
||||
task_executor = { path = "../../common/task_executor" }
|
||||
tiny-keccak = "2.0.2"
|
||||
tokio = { version = "1.19.2", features = ["time", "macros"] }
|
||||
tokio-io-timeout = "1.2.0"
|
||||
tokio-util = { version = "0.6.10", features = ["codec", "compat", "time"] }
|
||||
tracing = "0.1.35"
|
||||
unsigned-varint = { version = "0.7.1", features = ["codec"] }
|
||||
if-addrs = "0.10.1"
|
||||
slog = "2.7.0"
|
||||
igd = "0.12.1"
|
||||
|
||||
[dependencies.libp2p]
|
||||
version = "0.45.1"
|
||||
default-features = false
|
||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
||||
|
||||
[dev-dependencies]
|
||||
exit-future = "0.2.0"
|
||||
tempfile = "3.3.0"
|
||||
tracing-test = "0.2.2"
|
||||
unused_port = { path = "../../common/unused_port" }
|
||||
void = "1.0.2"
|
183
node/network/src/behaviour/gossip_cache.rs
Normal file
183
node/network/src/behaviour/gossip_cache.rs
Normal file
@ -0,0 +1,183 @@
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::types::GossipKind;
|
||||
use crate::GossipTopic;
|
||||
|
||||
use tokio_util::time::delay_queue::{DelayQueue, Key};
|
||||
|
||||
/// Store of gossip messages that we failed to publish and will try again later. By default, all
|
||||
/// messages are ignored. This behaviour can be changed using `GossipCacheBuilder::default_timeout`
|
||||
/// to apply the same delay to every kind. Individual timeouts for specific kinds can be set and
|
||||
/// will overwrite the default_timeout if present.
|
||||
pub struct GossipCache {
|
||||
/// Expire timeouts for each topic-msg pair.
|
||||
expirations: DelayQueue<(GossipTopic, Vec<u8>)>,
|
||||
/// Messages cached for each topic.
|
||||
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
|
||||
/// Timeout for Example messages.
|
||||
example: Option<Duration>,
|
||||
/// Timeout for FindFile messages.
|
||||
find_file: Option<Duration>,
|
||||
/// Timeout for AnnounceFile.
|
||||
announce_file: Option<Duration>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct GossipCacheBuilder {
|
||||
default_timeout: Option<Duration>,
|
||||
/// Timeout for Example messages.
|
||||
example: Option<Duration>,
|
||||
/// Timeout for blocks FindFile messages.
|
||||
find_file: Option<Duration>,
|
||||
/// Timeout for AnnounceFile messages.
|
||||
announce_file: Option<Duration>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl GossipCacheBuilder {
|
||||
/// By default, all timeouts all disabled. Setting a default timeout will enable all timeout
|
||||
/// that are not already set.
|
||||
pub fn default_timeout(mut self, timeout: Duration) -> Self {
|
||||
self.default_timeout = Some(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// Timeout for Example messages.
|
||||
pub fn example_timeout(mut self, timeout: Duration) -> Self {
|
||||
self.example = Some(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// Timeout for FindFile messages.
|
||||
pub fn find_file_timeout(mut self, timeout: Duration) -> Self {
|
||||
self.find_file = Some(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
/// Timeout for AnnounceFile messages.
|
||||
pub fn announce_file_timeout(mut self, timeout: Duration) -> Self {
|
||||
self.announce_file = Some(timeout);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> GossipCache {
|
||||
let GossipCacheBuilder {
|
||||
default_timeout,
|
||||
example,
|
||||
find_file,
|
||||
announce_file,
|
||||
} = self;
|
||||
|
||||
GossipCache {
|
||||
expirations: DelayQueue::default(),
|
||||
topic_msgs: HashMap::default(),
|
||||
example: example.or(default_timeout),
|
||||
find_file: find_file.or(default_timeout),
|
||||
announce_file: announce_file.or(default_timeout),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GossipCache {
|
||||
/// Get a builder of a `GossipCache`. Topic kinds for which no timeout is defined will be
|
||||
/// ignored if added in `insert`.
|
||||
pub fn builder() -> GossipCacheBuilder {
|
||||
GossipCacheBuilder::default()
|
||||
}
|
||||
|
||||
// Insert a message to be sent later.
|
||||
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
|
||||
let expire_timeout = match topic.kind() {
|
||||
GossipKind::Example => self.example,
|
||||
GossipKind::FindFile => self.find_file,
|
||||
GossipKind::AnnounceFile => self.announce_file,
|
||||
};
|
||||
|
||||
let expire_timeout = match expire_timeout {
|
||||
Some(expire_timeout) => expire_timeout,
|
||||
None => return,
|
||||
};
|
||||
|
||||
match self
|
||||
.topic_msgs
|
||||
.entry(topic.clone())
|
||||
.or_default()
|
||||
.entry(data.clone())
|
||||
{
|
||||
Entry::Occupied(key) => self.expirations.reset(key.get(), expire_timeout),
|
||||
Entry::Vacant(entry) => {
|
||||
let key = self.expirations.insert((topic, data), expire_timeout);
|
||||
entry.insert(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the registered messages for this topic.
|
||||
pub fn retrieve(&mut self, topic: &GossipTopic) -> Option<impl Iterator<Item = Vec<u8>> + '_> {
|
||||
if let Some(msgs) = self.topic_msgs.remove(topic) {
|
||||
for (_, key) in msgs.iter() {
|
||||
self.expirations.remove(key);
|
||||
}
|
||||
Some(msgs.into_keys())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl futures::stream::Stream for GossipCache {
|
||||
type Item = Result<GossipTopic, String>; // We don't care to retrieve the expired data.
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
match self.expirations.poll_expired(cx) {
|
||||
Poll::Ready(Some(Ok(expired))) => {
|
||||
let expected_key = expired.key();
|
||||
let (topic, data) = expired.into_inner();
|
||||
match self.topic_msgs.get_mut(&topic) {
|
||||
Some(msgs) => {
|
||||
let key = msgs.remove(&data);
|
||||
debug_assert_eq!(key, Some(expected_key));
|
||||
if msgs.is_empty() {
|
||||
// no more messages for this topic.
|
||||
self.topic_msgs.remove(&topic);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
#[cfg(debug_assertions)]
|
||||
panic!("Topic for registered message is not present.")
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Ok(topic)))
|
||||
}
|
||||
Poll::Ready(Some(Err(x))) => Poll::Ready(Some(Err(x.to_string()))),
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::types::GossipKind;
|
||||
|
||||
use super::*;
|
||||
use futures::stream::StreamExt;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stream() {
|
||||
let mut cache = GossipCache::builder()
|
||||
.default_timeout(Duration::from_millis(300))
|
||||
.build();
|
||||
let test_topic =
|
||||
GossipTopic::new(GossipKind::Example, crate::types::GossipEncoding::SSZSnappy);
|
||||
cache.insert(test_topic, vec![]);
|
||||
tokio::time::sleep(Duration::from_millis(300)).await;
|
||||
while cache.next().await.is_some() {}
|
||||
assert!(cache.expirations.is_empty());
|
||||
assert!(cache.topic_msgs.is_empty());
|
||||
}
|
||||
}
|
354
node/network/src/behaviour/gossipsub_scoring_parameters.rs
Normal file
354
node/network/src/behaviour/gossipsub_scoring_parameters.rs
Normal file
@ -0,0 +1,354 @@
|
||||
// use crate::error;
|
||||
use libp2p::gossipsub::PeerScoreThresholds;
|
||||
// use std::cmp::max;
|
||||
// use std::collections::HashMap;
|
||||
// use std::time::Duration;
|
||||
|
||||
// const MAX_IN_MESH_SCORE: f64 = 10.0;
|
||||
// const MAX_FIRST_MESSAGE_DELIVERIES_SCORE: f64 = 40.0;
|
||||
// const BEACON_BLOCK_WEIGHT: f64 = 0.5;
|
||||
// const BEACON_AGGREGATE_PROOF_WEIGHT: f64 = 0.5;
|
||||
// const VOLUNTARY_EXIT_WEIGHT: f64 = 0.05;
|
||||
// const PROPOSER_SLASHING_WEIGHT: f64 = 0.05;
|
||||
// const ATTESTER_SLASHING_WEIGHT: f64 = 0.05;
|
||||
|
||||
/// The time window (seconds) that we expect messages to be forwarded to us in the mesh.
|
||||
// const MESH_MESSAGE_DELIVERIES_WINDOW: u64 = 2;
|
||||
|
||||
// Const as this is used in the peer manager to prevent gossip from disconnecting peers.
|
||||
pub const GREYLIST_THRESHOLD: f64 = -16000.0;
|
||||
|
||||
/// Builds the peer score thresholds.
|
||||
pub fn lighthouse_gossip_thresholds() -> PeerScoreThresholds {
|
||||
PeerScoreThresholds {
|
||||
gossip_threshold: -4000.0,
|
||||
publish_threshold: -8000.0,
|
||||
graylist_threshold: GREYLIST_THRESHOLD,
|
||||
accept_px_threshold: 100.0,
|
||||
opportunistic_graft_threshold: 5.0,
|
||||
}
|
||||
}
|
||||
|
||||
// pub struct PeerScoreSettings {
|
||||
// // slot: Duration,
|
||||
// epoch: Duration,
|
||||
|
||||
// // beacon_attestation_subnet_weight: f64,
|
||||
// max_positive_score: f64,
|
||||
|
||||
// decay_interval: Duration,
|
||||
// decay_to_zero: f64,
|
||||
|
||||
// // mesh_n: usize,
|
||||
// // max_committees_per_slot: usize,
|
||||
// // target_committee_size: usize,
|
||||
// // target_aggregators_per_committee: usize,
|
||||
// attestation_subnet_count: u64,
|
||||
// }
|
||||
|
||||
// impl PeerScoreSettings {
|
||||
// pub fn new(chain_spec: &ChainSpec) -> PeerScoreSettings {
|
||||
// let slot = Duration::from_secs(chain_spec.seconds_per_slot);
|
||||
// let beacon_attestation_subnet_weight = 1.0 / chain_spec.attestation_subnet_count as f64;
|
||||
// let max_positive_score = (MAX_IN_MESH_SCORE + MAX_FIRST_MESSAGE_DELIVERIES_SCORE)
|
||||
// * (BEACON_BLOCK_WEIGHT
|
||||
// + BEACON_AGGREGATE_PROOF_WEIGHT
|
||||
// + beacon_attestation_subnet_weight * chain_spec.attestation_subnet_count as f64
|
||||
// + VOLUNTARY_EXIT_WEIGHT
|
||||
// + PROPOSER_SLASHING_WEIGHT
|
||||
// + ATTESTER_SLASHING_WEIGHT);
|
||||
|
||||
// PeerScoreSettings {
|
||||
// // slot,
|
||||
// // epoch: slot * TSpec::slots_per_epoch() as u32,
|
||||
// epoch: slot * 32 as u32,
|
||||
// // beacon_attestation_subnet_weight,
|
||||
// max_positive_score,
|
||||
// decay_interval: max(Duration::from_secs(1), slot),
|
||||
// decay_to_zero: 0.01,
|
||||
// // mesh_n: gs_config.mesh_n(),
|
||||
// // max_committees_per_slot: chain_spec.max_committees_per_slot,
|
||||
// // target_committee_size: chain_spec.target_committee_size,
|
||||
// // target_aggregators_per_committee: chain_spec.target_aggregators_per_committee as usize,
|
||||
// attestation_subnet_count: chain_spec.attestation_subnet_count,
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub fn get_peer_score_params(
|
||||
// &self,
|
||||
// thresholds: &PeerScoreThresholds,
|
||||
// ) -> error::Result<PeerScoreParams> {
|
||||
// let mut params = PeerScoreParams {
|
||||
// decay_interval: self.decay_interval,
|
||||
// decay_to_zero: self.decay_to_zero,
|
||||
// retain_score: self.epoch * 100,
|
||||
// app_specific_weight: 1.0,
|
||||
// ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP
|
||||
// behaviour_penalty_threshold: 6.0,
|
||||
// behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10),
|
||||
// ..Default::default()
|
||||
// };
|
||||
|
||||
// let target_value = Self::decay_convergence(
|
||||
// params.behaviour_penalty_decay,
|
||||
// // 10.0 / TSpec::slots_per_epoch() as f64,
|
||||
// 10.0 / 32 as f64,
|
||||
// ) - params.behaviour_penalty_threshold;
|
||||
// params.behaviour_penalty_weight = thresholds.gossip_threshold / target_value.powi(2);
|
||||
|
||||
// params.topic_score_cap = self.max_positive_score * 0.5;
|
||||
// params.ip_colocation_factor_weight = -params.topic_score_cap;
|
||||
|
||||
// params.topics = HashMap::new();
|
||||
|
||||
// // let get_hash = |kind: GossipKind| -> TopicHash {
|
||||
// // let topic: Topic =
|
||||
// // GossipTopic::new(kind, GossipEncoding::default()).into();
|
||||
// // topic.hash()
|
||||
// // };
|
||||
|
||||
// // //first all fixed topics
|
||||
// // params.topics.insert(
|
||||
// // get_hash(GossipKind::VoluntaryExit),
|
||||
// // Self::get_topic_params(
|
||||
// // self,
|
||||
// // VOLUNTARY_EXIT_WEIGHT,
|
||||
// // // 4.0 / TSpec::slots_per_epoch() as f64,
|
||||
// // 4.0 / 32 as f64,
|
||||
// // self.epoch * 100,
|
||||
// // None,
|
||||
// // ),
|
||||
// // );
|
||||
// // params.topics.insert(
|
||||
// // get_hash(GossipKind::AttesterSlashing),
|
||||
// // Self::get_topic_params(
|
||||
// // self,
|
||||
// // ATTESTER_SLASHING_WEIGHT,
|
||||
// // // 1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
|
||||
// // 1.0 / 5.0 / 32 as f64,
|
||||
// // self.epoch * 100,
|
||||
// // None,
|
||||
// // ),
|
||||
// // );
|
||||
// // params.topics.insert(
|
||||
// // get_hash(GossipKind::ProposerSlashing),
|
||||
// // Self::get_topic_params(
|
||||
// // self,
|
||||
// // PROPOSER_SLASHING_WEIGHT,
|
||||
// // // 1.0 / 5.0 / TSpec::slots_per_epoch() as f64,
|
||||
// // 1.0 / 5.0 / 32 as f64,
|
||||
// // self.epoch * 100,
|
||||
// // None,
|
||||
// // ),
|
||||
// // );
|
||||
|
||||
// //dynamic topics
|
||||
// // let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) =
|
||||
// // self.get_dynamic_topic_params(active_validators, current_slot)?;
|
||||
|
||||
// // params
|
||||
// // .topics
|
||||
// // .insert(get_hash(GossipKind::BeaconBlock), beacon_block_params);
|
||||
|
||||
// // params.topics.insert(
|
||||
// // get_hash(GossipKind::BeaconAggregateAndProof),
|
||||
// // beacon_aggregate_proof_params,
|
||||
// // );
|
||||
|
||||
// // for i in 0..self.attestation_subnet_count {
|
||||
// // params.topics.insert(
|
||||
// // get_hash(GossipKind::Attestation(SubnetId::new(i))),
|
||||
// // beacon_attestation_subnet_params.clone(),
|
||||
// // );
|
||||
// // }
|
||||
|
||||
// Ok(params)
|
||||
// }
|
||||
|
||||
// // pub fn get_dynamic_topic_params(
|
||||
// // &self,
|
||||
// // active_validators: usize,
|
||||
// // current_slot: Slot,
|
||||
// // ) -> error::Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams)> {
|
||||
// // let (aggregators_per_slot, committees_per_slot) =
|
||||
// // self.expected_aggregator_count_per_slot(active_validators)?;
|
||||
// // let multiple_bursts_per_subnet_per_epoch = committees_per_slot as u64
|
||||
// // >= 2 * self.attestation_subnet_count / TSpec::slots_per_epoch();
|
||||
|
||||
// // let beacon_block_params = Self::get_topic_params(
|
||||
// // self,
|
||||
// // BEACON_BLOCK_WEIGHT,
|
||||
// // 1.0,
|
||||
// // self.epoch * 20,
|
||||
// // Some((TSpec::slots_per_epoch() * 5, 3.0, self.epoch, current_slot)),
|
||||
// // );
|
||||
|
||||
// // let beacon_aggregate_proof_params = Self::get_topic_params(
|
||||
// // self,
|
||||
// // BEACON_AGGREGATE_PROOF_WEIGHT,
|
||||
// // aggregators_per_slot,
|
||||
// // self.epoch,
|
||||
// // Some((TSpec::slots_per_epoch() * 2, 4.0, self.epoch, current_slot)),
|
||||
// // );
|
||||
// // let beacon_attestation_subnet_params = Self::get_topic_params(
|
||||
// // self,
|
||||
// // self.beacon_attestation_subnet_weight,
|
||||
// // active_validators as f64
|
||||
// // / self.attestation_subnet_count as f64
|
||||
// // / TSpec::slots_per_epoch() as f64,
|
||||
// // self.epoch
|
||||
// // * (if multiple_bursts_per_subnet_per_epoch {
|
||||
// // 1
|
||||
// // } else {
|
||||
// // 4
|
||||
// // }),
|
||||
// // Some((
|
||||
// // TSpec::slots_per_epoch()
|
||||
// // * (if multiple_bursts_per_subnet_per_epoch {
|
||||
// // 4
|
||||
// // } else {
|
||||
// // 16
|
||||
// // }),
|
||||
// // 16.0,
|
||||
// // if multiple_bursts_per_subnet_per_epoch {
|
||||
// // self.slot * (TSpec::slots_per_epoch() as u32 / 2 + 1)
|
||||
// // } else {
|
||||
// // self.epoch * 3
|
||||
// // },
|
||||
// // current_slot,
|
||||
// // )),
|
||||
// // );
|
||||
|
||||
// // Ok((
|
||||
// // beacon_block_params,
|
||||
// // beacon_aggregate_proof_params,
|
||||
// // beacon_attestation_subnet_params,
|
||||
// // ))
|
||||
// // }
|
||||
|
||||
// pub fn attestation_subnet_count(&self) -> u64 {
|
||||
// self.attestation_subnet_count
|
||||
// }
|
||||
|
||||
// fn score_parameter_decay_with_base(
|
||||
// decay_time: Duration,
|
||||
// decay_interval: Duration,
|
||||
// decay_to_zero: f64,
|
||||
// ) -> f64 {
|
||||
// let ticks = decay_time.as_secs_f64() / decay_interval.as_secs_f64();
|
||||
// decay_to_zero.powf(1.0 / ticks)
|
||||
// }
|
||||
|
||||
// fn decay_convergence(decay: f64, rate: f64) -> f64 {
|
||||
// rate / (1.0 - decay)
|
||||
// }
|
||||
|
||||
// // fn threshold(decay: f64, rate: f64) -> f64 {
|
||||
// // Self::decay_convergence(decay, rate) * decay
|
||||
// // }
|
||||
|
||||
// // fn expected_aggregator_count_per_slot(
|
||||
// // &self,
|
||||
// // active_validators: usize,
|
||||
// // ) -> error::Result<(f64, usize)> {
|
||||
// // let committees_per_slot = TSpec::get_committee_count_per_slot_with(
|
||||
// // active_validators,
|
||||
// // self.max_committees_per_slot,
|
||||
// // self.target_committee_size,
|
||||
// // )
|
||||
// // .map_err(|e| format!("Could not get committee count from spec: {:?}", e))?;
|
||||
|
||||
// // let committees = committees_per_slot * TSpec::slots_per_epoch() as usize;
|
||||
|
||||
// // let smaller_committee_size = active_validators / committees;
|
||||
// // let num_larger_committees = active_validators - smaller_committee_size * committees;
|
||||
|
||||
// // let modulo_smaller = max(
|
||||
// // 1,
|
||||
// // smaller_committee_size / self.target_aggregators_per_committee as usize,
|
||||
// // );
|
||||
// // let modulo_larger = max(
|
||||
// // 1,
|
||||
// // (smaller_committee_size + 1) / self.target_aggregators_per_committee as usize,
|
||||
// // );
|
||||
|
||||
// // Ok((
|
||||
// // (((committees - num_larger_committees) * smaller_committee_size) as f64
|
||||
// // / modulo_smaller as f64
|
||||
// // + (num_larger_committees * (smaller_committee_size + 1)) as f64
|
||||
// // / modulo_larger as f64)
|
||||
// // / TSpec::slots_per_epoch() as f64,
|
||||
// // committees_per_slot,
|
||||
// // ))
|
||||
// // }
|
||||
|
||||
// fn score_parameter_decay(&self, decay_time: Duration) -> f64 {
|
||||
// Self::score_parameter_decay_with_base(decay_time, self.decay_interval, self.decay_to_zero)
|
||||
// }
|
||||
|
||||
// // fn get_topic_params(
|
||||
// // &self,
|
||||
// // topic_weight: f64,
|
||||
// // expected_message_rate: f64,
|
||||
// // first_message_decay_time: Duration,
|
||||
// // // decay slots (decay time in slots), cap factor, activation window, current slot
|
||||
// // mesh_message_info: Option<(u64, f64, Duration, Slot)>,
|
||||
// // ) -> TopicScoreParams {
|
||||
// // let mut t_params = TopicScoreParams::default();
|
||||
|
||||
// // t_params.topic_weight = topic_weight;
|
||||
|
||||
// // t_params.time_in_mesh_quantum = self.slot;
|
||||
// // t_params.time_in_mesh_cap = 3600.0 / t_params.time_in_mesh_quantum.as_secs_f64();
|
||||
// // t_params.time_in_mesh_weight = 10.0 / t_params.time_in_mesh_cap;
|
||||
|
||||
// // t_params.first_message_deliveries_decay =
|
||||
// // self.score_parameter_decay(first_message_decay_time);
|
||||
// // t_params.first_message_deliveries_cap = Self::decay_convergence(
|
||||
// // t_params.first_message_deliveries_decay,
|
||||
// // 2.0 * expected_message_rate / self.mesh_n as f64,
|
||||
// // );
|
||||
// // t_params.first_message_deliveries_weight = 40.0 / t_params.first_message_deliveries_cap;
|
||||
|
||||
// // if let Some((decay_slots, cap_factor, activation_window, current_slot)) = mesh_message_info
|
||||
// // {
|
||||
// // let decay_time = self.slot * decay_slots as u32;
|
||||
// // t_params.mesh_message_deliveries_decay = self.score_parameter_decay(decay_time);
|
||||
// // t_params.mesh_message_deliveries_threshold = Self::threshold(
|
||||
// // t_params.mesh_message_deliveries_decay,
|
||||
// // expected_message_rate / 50.0,
|
||||
// // );
|
||||
// // t_params.mesh_message_deliveries_cap =
|
||||
// // if cap_factor * t_params.mesh_message_deliveries_threshold < 2.0 {
|
||||
// // 2.0
|
||||
// // } else {
|
||||
// // cap_factor * t_params.mesh_message_deliveries_threshold
|
||||
// // };
|
||||
// // t_params.mesh_message_deliveries_activation = activation_window;
|
||||
// // t_params.mesh_message_deliveries_window =
|
||||
// // Duration::from_secs(MESH_MESSAGE_DELIVERIES_WINDOW);
|
||||
// // t_params.mesh_failure_penalty_decay = t_params.mesh_message_deliveries_decay;
|
||||
// // t_params.mesh_message_deliveries_weight = -t_params.topic_weight;
|
||||
// // t_params.mesh_failure_penalty_weight = t_params.mesh_message_deliveries_weight;
|
||||
// // if decay_slots >= current_slot.as_u64() {
|
||||
// // t_params.mesh_message_deliveries_threshold = 0.0;
|
||||
// // t_params.mesh_message_deliveries_weight = 0.0;
|
||||
// // }
|
||||
// // } else {
|
||||
// // t_params.mesh_message_deliveries_weight = 0.0;
|
||||
// // t_params.mesh_message_deliveries_threshold = 0.0;
|
||||
// // t_params.mesh_message_deliveries_decay = 0.0;
|
||||
// // t_params.mesh_message_deliveries_cap = 0.0;
|
||||
// // t_params.mesh_message_deliveries_window = Duration::from_secs(0);
|
||||
// // t_params.mesh_message_deliveries_activation = Duration::from_secs(0);
|
||||
// // t_params.mesh_failure_penalty_decay = 0.0;
|
||||
// // t_params.mesh_failure_penalty_weight = 0.0;
|
||||
// // }
|
||||
|
||||
// // t_params.invalid_message_deliveries_weight =
|
||||
// // -self.max_positive_score / t_params.topic_weight;
|
||||
// // t_params.invalid_message_deliveries_decay = self.score_parameter_decay(self.epoch * 50);
|
||||
|
||||
// // t_params
|
||||
// // }
|
||||
// }
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user