Compare commits

..

1 Commits

Author SHA1 Message Date
Roy Lu
fd9c033176 Updated README 2024-10-23 08:52:56 -07:00
125 changed files with 1112 additions and 2945 deletions

47
Cargo.lock generated
View File

@ -223,10 +223,7 @@ dependencies = [
"eth2_ssz", "eth2_ssz",
"eth2_ssz_derive", "eth2_ssz_derive",
"ethereum-types 0.14.1", "ethereum-types 0.14.1",
"itertools 0.13.0",
"lazy_static", "lazy_static",
"lru 0.12.5",
"metrics",
"once_cell", "once_cell",
"serde", "serde",
"tiny-keccak", "tiny-keccak",
@ -913,9 +910,7 @@ dependencies = [
"anyhow", "anyhow",
"async-lock 2.8.0", "async-lock 2.8.0",
"hashlink 0.8.4", "hashlink 0.8.4",
"lazy_static",
"log_entry_sync", "log_entry_sync",
"metrics",
"network", "network",
"shared_types", "shared_types",
"storage-async", "storage-async",
@ -1678,7 +1673,7 @@ dependencies = [
"hkdf", "hkdf",
"lazy_static", "lazy_static",
"libp2p-core 0.30.2", "libp2p-core 0.30.2",
"lru 0.7.8", "lru",
"parking_lot 0.11.2", "parking_lot 0.11.2",
"rand 0.8.5", "rand 0.8.5",
"rlp", "rlp",
@ -2519,12 +2514,6 @@ version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "foldhash"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2"
[[package]] [[package]]
name = "foreign-types" name = "foreign-types"
version = "0.3.2" version = "0.3.2"
@ -2957,17 +2946,6 @@ dependencies = [
"allocator-api2", "allocator-api2",
] ]
[[package]]
name = "hashbrown"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash",
]
[[package]] [[package]]
name = "hashers" name = "hashers"
version = "1.0.1" version = "1.0.1"
@ -4139,7 +4117,7 @@ dependencies = [
"libp2p-core 0.33.0", "libp2p-core 0.33.0",
"libp2p-swarm", "libp2p-swarm",
"log", "log",
"lru 0.7.8", "lru",
"prost 0.10.4", "prost 0.10.4",
"prost-build 0.10.4", "prost-build 0.10.4",
"prost-codec", "prost-codec",
@ -4655,14 +4633,12 @@ dependencies = [
"jsonrpsee", "jsonrpsee",
"lazy_static", "lazy_static",
"metrics", "metrics",
"reqwest",
"serde_json", "serde_json",
"shared_types", "shared_types",
"storage", "storage",
"task_executor", "task_executor",
"thiserror", "thiserror",
"tokio", "tokio",
"url",
] ]
[[package]] [[package]]
@ -4674,15 +4650,6 @@ dependencies = [
"hashbrown 0.12.3", "hashbrown 0.12.3",
] ]
[[package]]
name = "lru"
version = "0.12.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
dependencies = [
"hashbrown 0.15.0",
]
[[package]] [[package]]
name = "lru-cache" name = "lru-cache"
version = "0.1.2" version = "0.1.2"
@ -4748,10 +4715,9 @@ dependencies = [
[[package]] [[package]]
name = "metrics" name = "metrics"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/Conflux-Chain/conflux-rust.git?rev=c4734e337c66d38e6396742cd5117b596e8d2603#c4734e337c66d38e6396742cd5117b596e8d2603" source = "git+https://github.com/Conflux-Chain/conflux-rust.git?rev=992ebc5483d937c8f6b883e266f8ed2a67a7fa9a#992ebc5483d937c8f6b883e266f8ed2a67a7fa9a"
dependencies = [ dependencies = [
"chrono", "chrono",
"duration-str",
"futures", "futures",
"influx_db_client", "influx_db_client",
"lazy_static", "lazy_static",
@ -5033,7 +4999,6 @@ dependencies = [
name = "network" name = "network"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"channel",
"directory", "directory",
"dirs 4.0.0", "dirs 4.0.0",
"discv5", "discv5",
@ -5053,7 +5018,7 @@ dependencies = [
"lazy_static", "lazy_static",
"libp2p", "libp2p",
"lighthouse_metrics", "lighthouse_metrics",
"lru 0.7.8", "lru",
"parking_lot 0.12.3", "parking_lot 0.12.3",
"rand 0.8.5", "rand 0.8.5",
"regex", "regex",
@ -7306,11 +7271,8 @@ dependencies = [
"kvdb", "kvdb",
"kvdb-memorydb", "kvdb-memorydb",
"kvdb-rocksdb", "kvdb-rocksdb",
"lazy_static",
"merkle_light", "merkle_light",
"merkle_tree", "merkle_tree",
"metrics",
"once_cell",
"parking_lot 0.12.3", "parking_lot 0.12.3",
"rand 0.8.5", "rand 0.8.5",
"rayon", "rayon",
@ -7332,7 +7294,6 @@ name = "storage-async"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"backtrace",
"eth2_ssz", "eth2_ssz",
"shared_types", "shared_types",
"storage", "storage",

View File

@ -28,7 +28,7 @@ members = [
resolver = "2" resolver = "2"
[workspace.dependencies] [workspace.dependencies]
metrics = { git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "c4734e337c66d38e6396742cd5117b596e8d2603" } metrics = { git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "992ebc5483d937c8f6b883e266f8ed2a67a7fa9a" }
[patch.crates-io] [patch.crates-io]
discv5 = { path = "version-meld/discv5" } discv5 = { path = "version-meld/discv5" }
@ -37,7 +37,3 @@ enr = { path = "version-meld/enr" }
[profile.bench.package.'storage'] [profile.bench.package.'storage']
debug = true debug = true
[profile.dev]
# enabling debug_assertions will make node fail to start because of checks in `clap`.
debug-assertions = false

View File

@ -2,34 +2,32 @@
## Overview ## Overview
0G Storage is the storage layer for the ZeroGravity data availability (DA) system. The 0G Storage layer holds three important features: 0G Storage is a decentralized data storage system designed to address the challenges of high-throughput and low-latency data storage and retrieval, in areas such as AI and gaming.
* Built-in - It is natively built into the ZeroGravity DA system for data storage and retrieval. In addition, it forms the storage layer for the 0G data availability (DA) system, with the cross-layer integration abstracted away from Rollup and AppChain builders.
* General purpose - It is designed to support atomic transactions, mutable kv stores as well as archive log systems to enable wide range of applications with various data types.
* Incentive - Instead of being just a decentralized database, 0G Storage introduces PoRA mining algorithm to incentivize storage network participants.
To dive deep into the technical details, continue reading [0G Storage Spec.](docs/) ## System Architecture
## Integration 0G Storage consists of two main components:
We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to easily integrate 0G Storage in their applications with the following features: 1. **Data Publishing Lane**: Ensures quick data availability and verification through the 0G Consensus network.
2. **Data Storage Lane**: Manages large data transfers and storage using an erasure-coding mechanism for redundancy and reliability.
* File Merkle Tree Class Across the two lanes, 0G Storage supports the following features:
* Flow Contract Types
* RPC methods support
* File upload
* Support browser environment
* Tests for different environments (In Progress)
* File download (In Progress)
## Deployment * **General Purpose Design**: Supports atomic transactions, mutable key-value stores, and archive log systems, enabling a wide range of applications with various data types.
* **Incentivized Participation**: Utilizes the PoRA (Proof of Random Access) mining algorithm to incentivize storage network participants.
Please refer to [Deployment](docs/run.md) page for detailed steps to compile and start a 0G Storage node. For in-depth technical details about 0G Storage, please read our [Intro to 0G Storage](https://docs.0g.ai/og-storage).
## Test ## Documentation
Please refer to the [One Box Test](docs/onebox-test.md) page for local testing purpose. - If you want to run a node, please refer to the [Running a Node](https://docs.0g.ai/run-a-node/storage-node) guide.
- If you want build a project using 0G storage, please refer to the [0G Storage SDK](https://docs.0g.ai/build-with-0g/storage-sdk) guide.
## Contributing ## Support and Additional Resources
We want to do everything we can to help you be successful while working on your contribution and projects. Here you'll find various resources and communities that may help you complete a project or contribute to 0G.
To make contributions to the project, please follow the guidelines [here](contributing.md). ### Communities
- [0G Telegram](https://t.me/web3_0glabs)
- [0G Discord](https://discord.com/invite/0glabs)

View File

@ -13,8 +13,3 @@ serde = { version = "1.0.137", features = ["derive"] }
lazy_static = "1.4.0" lazy_static = "1.4.0"
tracing = "0.1.36" tracing = "0.1.36"
once_cell = "1.19.0" once_cell = "1.19.0"
metrics = { workspace = true }
itertools = "0.13.0"
lru = "0.12.5"

View File

@ -1,30 +1,23 @@
mod merkle_tree; mod merkle_tree;
mod metrics;
mod node_manager;
mod proof; mod proof;
mod sha3; mod sha3;
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use itertools::Itertools;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::{BTreeMap, HashMap}; use std::collections::{BTreeMap, HashMap};
use std::fmt::Debug; use std::fmt::Debug;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Instant;
use tracing::{trace, warn}; use tracing::{trace, warn};
use crate::merkle_tree::MerkleTreeWrite;
pub use crate::merkle_tree::{ pub use crate::merkle_tree::{
Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, ZERO_HASHES, Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, ZERO_HASHES,
}; };
pub use crate::node_manager::{EmptyNodeDatabase, NodeDatabase, NodeManager, NodeTransaction};
pub use proof::{Proof, RangeProof}; pub use proof::{Proof, RangeProof};
pub use sha3::Sha3Algorithm; pub use sha3::Sha3Algorithm;
pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> { pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> {
/// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves. /// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves.
node_manager: NodeManager<E>, layers: Vec<Vec<E>>,
/// Keep the delta nodes that can be used to construct a history tree. /// Keep the delta nodes that can be used to construct a history tree.
/// The key is the root node of that version. /// The key is the root node of that version.
delta_nodes_map: BTreeMap<u64, DeltaNodes<E>>, delta_nodes_map: BTreeMap<u64, DeltaNodes<E>>,
@ -42,16 +35,13 @@ pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> {
impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> { impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn new(leaves: Vec<E>, leaf_height: usize, start_tx_seq: Option<u64>) -> Self { pub fn new(leaves: Vec<E>, leaf_height: usize, start_tx_seq: Option<u64>) -> Self {
let mut merkle = Self { let mut merkle = Self {
node_manager: NodeManager::new_dummy(), layers: vec![leaves],
delta_nodes_map: BTreeMap::new(), delta_nodes_map: BTreeMap::new(),
root_to_tx_seq_map: HashMap::new(), root_to_tx_seq_map: HashMap::new(),
min_depth: None, min_depth: None,
leaf_height, leaf_height,
_a: Default::default(), _a: Default::default(),
}; };
merkle.node_manager.start_transaction();
merkle.node_manager.add_layer();
merkle.node_manager.append_nodes(0, &leaves);
if merkle.leaves() == 0 { if merkle.leaves() == 0 {
if let Some(seq) = start_tx_seq { if let Some(seq) = start_tx_seq {
merkle.delta_nodes_map.insert( merkle.delta_nodes_map.insert(
@ -61,12 +51,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
}, },
); );
} }
merkle.node_manager.commit();
return merkle; return merkle;
} }
// Reconstruct the whole tree. // Reconstruct the whole tree.
merkle.recompute(0, 0, None); merkle.recompute(0, 0, None);
merkle.node_manager.commit();
// Commit the first version in memory. // Commit the first version in memory.
// TODO(zz): Check when the roots become available. // TODO(zz): Check when the roots become available.
merkle.commit(start_tx_seq); merkle.commit(start_tx_seq);
@ -74,44 +62,19 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
pub fn new_with_subtrees( pub fn new_with_subtrees(
node_db: Arc<dyn NodeDatabase<E>>, initial_data: MerkleTreeInitialData<E>,
node_cache_capacity: usize,
leaf_height: usize, leaf_height: usize,
start_tx_seq: Option<u64>,
) -> Result<Self> { ) -> Result<Self> {
let mut merkle = Self { let mut merkle = Self {
node_manager: NodeManager::new(node_db, node_cache_capacity)?, layers: vec![vec![]],
delta_nodes_map: BTreeMap::new(), delta_nodes_map: BTreeMap::new(),
root_to_tx_seq_map: HashMap::new(), root_to_tx_seq_map: HashMap::new(),
min_depth: None, min_depth: None,
leaf_height, leaf_height,
_a: Default::default(), _a: Default::default(),
}; };
if merkle.height() == 0 { if initial_data.subtree_list.is_empty() {
merkle.node_manager.start_transaction();
merkle.node_manager.add_layer();
merkle.node_manager.commit();
}
Ok(merkle)
}
/// This is only used for the last chunk, so `leaf_height` is always 0 so far.
pub fn new_with_depth(leaves: Vec<E>, depth: usize, start_tx_seq: Option<u64>) -> Self {
let mut node_manager = NodeManager::new_dummy();
node_manager.start_transaction();
if leaves.is_empty() {
// Create an empty merkle tree with `depth`.
let mut merkle = Self {
// dummy node manager for the last chunk.
node_manager,
delta_nodes_map: BTreeMap::new(),
root_to_tx_seq_map: HashMap::new(),
min_depth: Some(depth),
leaf_height: 0,
_a: Default::default(),
};
for _ in 0..depth {
merkle.node_manager.add_layer();
}
if let Some(seq) = start_tx_seq { if let Some(seq) = start_tx_seq {
merkle.delta_nodes_map.insert( merkle.delta_nodes_map.insert(
seq, seq,
@ -120,26 +83,54 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
}, },
); );
} }
merkle.node_manager.commit(); return Ok(merkle);
merkle }
} else { merkle.append_subtree_list(initial_data.subtree_list)?;
merkle.commit(start_tx_seq);
for (index, h) in initial_data.known_leaves {
merkle.fill_leaf(index, h);
}
for (layer_index, position, h) in initial_data.extra_mpt_nodes {
// TODO: Delete duplicate nodes from DB.
merkle.layers[layer_index][position] = h;
}
Ok(merkle)
}
/// This is only used for the last chunk, so `leaf_height` is always 0 so far.
pub fn new_with_depth(leaves: Vec<E>, depth: usize, start_tx_seq: Option<u64>) -> Self {
if leaves.is_empty() {
// Create an empty merkle tree with `depth`.
let mut merkle = Self { let mut merkle = Self {
// dummy node manager for the last chunk. layers: vec![vec![]; depth],
node_manager,
delta_nodes_map: BTreeMap::new(), delta_nodes_map: BTreeMap::new(),
root_to_tx_seq_map: HashMap::new(), root_to_tx_seq_map: HashMap::new(),
min_depth: Some(depth), min_depth: Some(depth),
leaf_height: 0, leaf_height: 0,
_a: Default::default(), _a: Default::default(),
}; };
merkle.node_manager.add_layer(); if let Some(seq) = start_tx_seq {
merkle.append_nodes(0, &leaves); merkle.delta_nodes_map.insert(
for _ in 1..depth { seq,
merkle.node_manager.add_layer(); DeltaNodes {
right_most_nodes: vec![],
},
);
} }
merkle
} else {
let mut layers = vec![vec![]; depth];
layers[0] = leaves;
let mut merkle = Self {
layers,
delta_nodes_map: BTreeMap::new(),
root_to_tx_seq_map: HashMap::new(),
min_depth: Some(depth),
leaf_height: 0,
_a: Default::default(),
};
// Reconstruct the whole tree. // Reconstruct the whole tree.
merkle.recompute(0, 0, None); merkle.recompute(0, 0, None);
merkle.node_manager.commit();
// Commit the first version in memory. // Commit the first version in memory.
merkle.commit(start_tx_seq); merkle.commit(start_tx_seq);
merkle merkle
@ -147,31 +138,22 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
pub fn append(&mut self, new_leaf: E) { pub fn append(&mut self, new_leaf: E) {
let start_time = Instant::now();
if new_leaf == E::null() { if new_leaf == E::null() {
// appending null is not allowed. // appending null is not allowed.
return; return;
} }
self.node_manager.start_transaction(); self.layers[0].push(new_leaf);
self.node_manager.push_node(0, new_leaf);
self.recompute_after_append_leaves(self.leaves() - 1); self.recompute_after_append_leaves(self.leaves() - 1);
self.node_manager.commit();
metrics::APPEND.update_since(start_time);
} }
pub fn append_list(&mut self, leaf_list: Vec<E>) { pub fn append_list(&mut self, mut leaf_list: Vec<E>) {
let start_time = Instant::now();
if leaf_list.contains(&E::null()) { if leaf_list.contains(&E::null()) {
// appending null is not allowed. // appending null is not allowed.
return; return;
} }
self.node_manager.start_transaction();
let start_index = self.leaves(); let start_index = self.leaves();
self.node_manager.append_nodes(0, &leaf_list); self.layers[0].append(&mut leaf_list);
self.recompute_after_append_leaves(start_index); self.recompute_after_append_leaves(start_index);
self.node_manager.commit();
metrics::APPEND_LIST.update_since(start_time);
} }
/// Append a leaf list by providing their intermediate node hash. /// Append a leaf list by providing their intermediate node hash.
@ -180,57 +162,43 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// Other nodes in the subtree will be set to `null` nodes. /// Other nodes in the subtree will be set to `null` nodes.
/// TODO: Optimize to avoid storing the `null` nodes? /// TODO: Optimize to avoid storing the `null` nodes?
pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> { pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> {
let start_time = Instant::now();
if subtree_root == E::null() { if subtree_root == E::null() {
// appending null is not allowed. // appending null is not allowed.
bail!("subtree_root is null"); bail!("subtree_root is null");
} }
self.node_manager.start_transaction();
let start_index = self.leaves(); let start_index = self.leaves();
self.append_subtree_inner(subtree_depth, subtree_root)?; self.append_subtree_inner(subtree_depth, subtree_root)?;
self.recompute_after_append_subtree(start_index, subtree_depth - 1); self.recompute_after_append_subtree(start_index, subtree_depth - 1);
self.node_manager.commit();
metrics::APPEND_SUBTREE.update_since(start_time);
Ok(()) Ok(())
} }
pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> { pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> {
let start_time = Instant::now();
if subtree_list.iter().any(|(_, root)| root == &E::null()) { if subtree_list.iter().any(|(_, root)| root == &E::null()) {
// appending null is not allowed. // appending null is not allowed.
bail!("subtree_list contains null"); bail!("subtree_list contains null");
} }
self.node_manager.start_transaction();
for (subtree_depth, subtree_root) in subtree_list { for (subtree_depth, subtree_root) in subtree_list {
let start_index = self.leaves(); let start_index = self.leaves();
self.append_subtree_inner(subtree_depth, subtree_root)?; self.append_subtree_inner(subtree_depth, subtree_root)?;
self.recompute_after_append_subtree(start_index, subtree_depth - 1); self.recompute_after_append_subtree(start_index, subtree_depth - 1);
} }
self.node_manager.commit();
metrics::APPEND_SUBTREE_LIST.update_since(start_time);
Ok(()) Ok(())
} }
/// Change the value of the last leaf and return the new merkle root. /// Change the value of the last leaf and return the new merkle root.
/// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves. /// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves.
pub fn update_last(&mut self, updated_leaf: E) { pub fn update_last(&mut self, updated_leaf: E) {
let start_time = Instant::now();
if updated_leaf == E::null() { if updated_leaf == E::null() {
// updating to null is not allowed. // updating to null is not allowed.
return; return;
} }
self.node_manager.start_transaction(); if self.layers[0].is_empty() {
if self.layer_len(0) == 0 {
// Special case for the first data. // Special case for the first data.
self.push_node(0, updated_leaf); self.layers[0].push(updated_leaf);
} else { } else {
self.update_node(0, self.layer_len(0) - 1, updated_leaf); *self.layers[0].last_mut().unwrap() = updated_leaf;
} }
self.recompute_after_append_leaves(self.leaves() - 1); self.recompute_after_append_leaves(self.leaves() - 1);
self.node_manager.commit();
metrics::UPDATE_LAST.update_since(start_time);
} }
/// Fill an unknown `null` leaf with its real value. /// Fill an unknown `null` leaf with its real value.
@ -239,17 +207,13 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn fill_leaf(&mut self, index: usize, leaf: E) { pub fn fill_leaf(&mut self, index: usize, leaf: E) {
if leaf == E::null() { if leaf == E::null() {
// fill leaf with null is not allowed. // fill leaf with null is not allowed.
} else if self.node(0, index) == E::null() { } else if self.layers[0][index] == E::null() {
self.node_manager.start_transaction(); self.layers[0][index] = leaf;
self.update_node(0, index, leaf);
self.recompute_after_fill_leaves(index, index + 1); self.recompute_after_fill_leaves(index, index + 1);
self.node_manager.commit(); } else if self.layers[0][index] != leaf {
} else if self.node(0, index) != leaf {
panic!( panic!(
"Fill with invalid leaf, index={} was={:?} get={:?}", "Fill with invalid leaf, index={} was={:?} get={:?}",
index, index, self.layers[0][index], leaf
self.node(0, index),
leaf
); );
} }
} }
@ -262,20 +226,18 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
&mut self, &mut self,
proof: RangeProof<E>, proof: RangeProof<E>,
) -> Result<Vec<(usize, usize, E)>> { ) -> Result<Vec<(usize, usize, E)>> {
self.node_manager.start_transaction(); self.fill_with_proof(
let mut updated_nodes = Vec::new(); proof
let mut left_nodes = proof.left_proof.proof_nodes_in_tree(); .left_proof
if left_nodes.len() >= self.leaf_height { .proof_nodes_in_tree()
updated_nodes .split_off(self.leaf_height),
.append(&mut self.fill_with_proof(left_nodes.split_off(self.leaf_height))?); )?;
} self.fill_with_proof(
let mut right_nodes = proof.right_proof.proof_nodes_in_tree(); proof
if right_nodes.len() >= self.leaf_height { .right_proof
updated_nodes .proof_nodes_in_tree()
.append(&mut self.fill_with_proof(right_nodes.split_off(self.leaf_height))?); .split_off(self.leaf_height),
} )
self.node_manager.commit();
Ok(updated_nodes)
} }
pub fn fill_with_file_proof( pub fn fill_with_file_proof(
@ -300,16 +262,13 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
if tx_merkle_nodes.is_empty() { if tx_merkle_nodes.is_empty() {
return Ok(Vec::new()); return Ok(Vec::new());
} }
self.node_manager.start_transaction();
let mut position_and_data = let mut position_and_data =
proof.file_proof_nodes_in_tree(tx_merkle_nodes, tx_merkle_nodes_size); proof.file_proof_nodes_in_tree(tx_merkle_nodes, tx_merkle_nodes_size);
let start_index = (start_index >> self.leaf_height) as usize; let start_index = (start_index >> self.leaf_height) as usize;
for (i, (position, _)) in position_and_data.iter_mut().enumerate() { for (i, (position, _)) in position_and_data.iter_mut().enumerate() {
*position += start_index >> i; *position += start_index >> i;
} }
let updated_nodes = self.fill_with_proof(position_and_data)?; self.fill_with_proof(position_and_data)
self.node_manager.commit();
Ok(updated_nodes)
} }
/// This assumes that the proof leaf is no lower than the tree leaf. It holds for both SegmentProof and ChunkProof. /// This assumes that the proof leaf is no lower than the tree leaf. It holds for both SegmentProof and ChunkProof.
@ -321,27 +280,28 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
let mut updated_nodes = Vec::new(); let mut updated_nodes = Vec::new();
// A valid proof should not fail the following checks. // A valid proof should not fail the following checks.
for (i, (position, data)) in position_and_data.into_iter().enumerate() { for (i, (position, data)) in position_and_data.into_iter().enumerate() {
if position > self.layer_len(i) { let layer = &mut self.layers[i];
if position > layer.len() {
bail!( bail!(
"proof position out of range, position={} layer.len()={}", "proof position out of range, position={} layer.len()={}",
position, position,
self.layer_len(i) layer.len()
); );
} }
if position == self.layer_len(i) { if position == layer.len() {
// skip padding node. // skip padding node.
continue; continue;
} }
if self.node(i, position) == E::null() { if layer[position] == E::null() {
self.update_node(i, position, data.clone()); layer[position] = data.clone();
updated_nodes.push((i, position, data)) updated_nodes.push((i, position, data))
} else if self.node(i, position) != data { } else if layer[position] != data {
// The last node in each layer may have changed in the tree. // The last node in each layer may have changed in the tree.
trace!( trace!(
"conflict data layer={} position={} tree_data={:?} proof_data={:?}", "conflict data layer={} position={} tree_data={:?} proof_data={:?}",
i, i,
position, position,
self.node(i, position), layer[position],
data data
); );
} }
@ -357,8 +317,8 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
if position >= self.leaves() { if position >= self.leaves() {
bail!("Out of bound: position={} end={}", position, self.leaves()); bail!("Out of bound: position={} end={}", position, self.leaves());
} }
if self.node(0, position) != E::null() { if self.layers[0][position] != E::null() {
Ok(Some(self.node(0, position))) Ok(Some(self.layers[0][position].clone()))
} else { } else {
// The leaf hash is unknown. // The leaf hash is unknown.
Ok(None) Ok(None)
@ -406,11 +366,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
return; return;
} }
let mut right_most_nodes = Vec::new(); let mut right_most_nodes = Vec::new();
for height in 0..self.height() { for layer in &self.layers {
let pos = self.layer_len(height) - 1; right_most_nodes.push((layer.len() - 1, layer.last().unwrap().clone()));
right_most_nodes.push((pos, self.node(height, pos)));
} }
let root = self.root(); let root = self.root().clone();
self.delta_nodes_map self.delta_nodes_map
.insert(tx_seq, DeltaNodes::new(right_most_nodes)); .insert(tx_seq, DeltaNodes::new(right_most_nodes));
self.root_to_tx_seq_map.insert(root, tx_seq); self.root_to_tx_seq_map.insert(root, tx_seq);
@ -418,8 +377,8 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
fn before_extend_layer(&mut self, height: usize) { fn before_extend_layer(&mut self, height: usize) {
if height == self.height() { if height == self.layers.len() {
self.node_manager.add_layer() self.layers.push(Vec::new());
} }
} }
@ -436,6 +395,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
/// Given a range of changed leaf nodes and recompute the tree. /// Given a range of changed leaf nodes and recompute the tree.
/// Since this tree is append-only, we always compute to the end.
fn recompute( fn recompute(
&mut self, &mut self,
mut start_index: usize, mut start_index: usize,
@ -445,29 +405,22 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
start_index >>= height; start_index >>= height;
maybe_end_index = maybe_end_index.map(|end| end >> height); maybe_end_index = maybe_end_index.map(|end| end >> height);
// Loop until we compute the new root and reach `tree_depth`. // Loop until we compute the new root and reach `tree_depth`.
while self.layer_len(height) > 1 || height < self.height() - 1 { while self.layers[height].len() > 1 || height < self.layers.len() - 1 {
let next_layer_start_index = start_index >> 1; let next_layer_start_index = start_index >> 1;
if start_index % 2 == 1 { if start_index % 2 == 1 {
start_index -= 1; start_index -= 1;
} }
let mut end_index = maybe_end_index.unwrap_or(self.layer_len(height)); let mut end_index = maybe_end_index.unwrap_or(self.layers[height].len());
if end_index % 2 == 1 && end_index != self.layer_len(height) { if end_index % 2 == 1 && end_index != self.layers[height].len() {
end_index += 1; end_index += 1;
} }
let mut i = 0; let mut i = 0;
let iter = self let mut iter = self.layers[height][start_index..end_index].chunks_exact(2);
.node_manager
.get_nodes(height, start_index, end_index)
.chunks(2);
// We cannot modify the parent layer while iterating the child layer, // We cannot modify the parent layer while iterating the child layer,
// so just keep the changes and update them later. // so just keep the changes and update them later.
let mut parent_update = Vec::new(); let mut parent_update = Vec::new();
for chunk_iter in &iter { while let Some([left, right]) = iter.next() {
let chunk: Vec<_> = chunk_iter.collect();
if chunk.len() == 2 {
let left = &chunk[0];
let right = &chunk[1];
// If either left or right is null (unknown), we cannot compute the parent hash. // If either left or right is null (unknown), we cannot compute the parent hash.
// Note that if we are recompute a range of an existing tree, // Note that if we are recompute a range of an existing tree,
// we do not need to keep these possibly null parent. This is only saved // we do not need to keep these possibly null parent. This is only saved
@ -479,9 +432,8 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
}; };
parent_update.push((next_layer_start_index + i, parent)); parent_update.push((next_layer_start_index + i, parent));
i += 1; i += 1;
} else { }
assert_eq!(chunk.len(), 1); if let [r] = iter.remainder() {
let r = &chunk[0];
// Same as above. // Same as above.
let parent = if *r == E::null() { let parent = if *r == E::null() {
E::null() E::null()
@ -490,7 +442,6 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
}; };
parent_update.push((next_layer_start_index + i, parent)); parent_update.push((next_layer_start_index + i, parent));
} }
}
if !parent_update.is_empty() { if !parent_update.is_empty() {
self.before_extend_layer(height + 1); self.before_extend_layer(height + 1);
} }
@ -498,27 +449,27 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
// we can just overwrite `last_changed_parent_index` with new values. // we can just overwrite `last_changed_parent_index` with new values.
let mut last_changed_parent_index = None; let mut last_changed_parent_index = None;
for (parent_index, parent) in parent_update { for (parent_index, parent) in parent_update {
match parent_index.cmp(&self.layer_len(height + 1)) { match parent_index.cmp(&self.layers[height + 1].len()) {
Ordering::Less => { Ordering::Less => {
// We do not overwrite with null. // We do not overwrite with null.
if parent != E::null() { if parent != E::null() {
if self.node(height + 1, parent_index) == E::null() if self.layers[height + 1][parent_index] == E::null()
// The last node in a layer can be updated. // The last node in a layer can be updated.
|| (self.node(height + 1, parent_index) != parent || (self.layers[height + 1][parent_index] != parent
&& parent_index == self.layer_len(height + 1) - 1) && parent_index == self.layers[height + 1].len() - 1)
{ {
self.update_node(height + 1, parent_index, parent); self.layers[height + 1][parent_index] = parent;
last_changed_parent_index = Some(parent_index); last_changed_parent_index = Some(parent_index);
} else if self.node(height + 1, parent_index) != parent { } else if self.layers[height + 1][parent_index] != parent {
// Recompute changes a node in the middle. This should be impossible // Recompute changes a node in the middle. This should be impossible
// if the inputs are valid. // if the inputs are valid.
panic!("Invalid append merkle tree! height={} index={} expected={:?} get={:?}", panic!("Invalid append merkle tree! height={} index={} expected={:?} get={:?}",
height + 1, parent_index, self.node(height + 1, parent_index), parent); height + 1, parent_index, self.layers[height + 1][parent_index], parent);
} }
} }
} }
Ordering::Equal => { Ordering::Equal => {
self.push_node(height + 1, parent); self.layers[height + 1].push(parent);
last_changed_parent_index = Some(parent_index); last_changed_parent_index = Some(parent_index);
} }
Ordering::Greater => { Ordering::Greater => {
@ -549,10 +500,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
for height in 0..(subtree_depth - 1) { for height in 0..(subtree_depth - 1) {
self.before_extend_layer(height); self.before_extend_layer(height);
let subtree_layer_size = 1 << (subtree_depth - 1 - height); let subtree_layer_size = 1 << (subtree_depth - 1 - height);
self.append_nodes(height, &vec![E::null(); subtree_layer_size]); self.layers[height].append(&mut vec![E::null(); subtree_layer_size]);
} }
self.before_extend_layer(subtree_depth - 1); self.before_extend_layer(subtree_depth - 1);
self.push_node(subtree_depth - 1, subtree_root); self.layers[subtree_depth - 1].push(subtree_root);
Ok(()) Ok(())
} }
@ -563,45 +514,23 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
pub fn revert_to(&mut self, tx_seq: u64) -> Result<()> { pub fn revert_to(&mut self, tx_seq: u64) -> Result<()> {
if self.layer_len(0) == 0 { if self.layers[0].is_empty() {
// Any previous state of an empty tree is always empty. // Any previous state of an empty tree is always empty.
return Ok(()); return Ok(());
} }
self.node_manager.start_transaction();
let delta_nodes = self let delta_nodes = self
.delta_nodes_map .delta_nodes_map
.get(&tx_seq) .get(&tx_seq)
.ok_or_else(|| anyhow!("tx_seq unavailable, root={:?}", tx_seq))? .ok_or_else(|| anyhow!("tx_seq unavailable, root={:?}", tx_seq))?;
.clone();
// Dropping the upper layers that are not in the old merkle tree. // Dropping the upper layers that are not in the old merkle tree.
for height in (delta_nodes.right_most_nodes.len()..self.height()).rev() { self.layers.truncate(delta_nodes.right_most_nodes.len());
self.node_manager.truncate_layer(height);
}
for (height, (last_index, right_most_node)) in for (height, (last_index, right_most_node)) in
delta_nodes.right_most_nodes.iter().enumerate() delta_nodes.right_most_nodes.iter().enumerate()
{ {
self.node_manager.truncate_nodes(height, *last_index + 1); self.layers[height].truncate(*last_index + 1);
self.update_node(height, *last_index, right_most_node.clone()) self.layers[height][*last_index] = right_most_node.clone();
} }
self.clear_after(tx_seq); self.clear_after(tx_seq);
self.node_manager.commit();
Ok(())
}
// Revert to a tx_seq not in `delta_nodes_map`.
// This is needed to revert the last unfinished tx after restart.
pub fn revert_to_leaves(&mut self, leaves: usize) -> Result<()> {
self.node_manager.start_transaction();
for height in (0..self.height()).rev() {
let kept_nodes = leaves >> height;
if kept_nodes == 0 {
self.node_manager.truncate_layer(height);
} else {
self.node_manager.truncate_nodes(height, kept_nodes + 1);
}
}
self.recompute_after_append_leaves(leaves);
self.node_manager.commit();
Ok(()) Ok(())
} }
@ -621,25 +550,17 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
bail!("empty tree"); bail!("empty tree");
} }
Ok(HistoryTree { Ok(HistoryTree {
node_manager: &self.node_manager, layers: &self.layers,
delta_nodes, delta_nodes,
leaf_height: self.leaf_height, leaf_height: self.leaf_height,
}) })
} }
pub fn reset(&mut self) { pub fn reset(&mut self) {
self.node_manager.start_transaction(); self.layers = match self.min_depth {
for height in (0..self.height()).rev() { None => vec![vec![]],
self.node_manager.truncate_layer(height); Some(depth) => vec![vec![]; depth],
} };
if let Some(depth) = self.min_depth {
for _ in 0..depth {
self.node_manager.add_layer();
}
} else {
self.node_manager.add_layer();
}
self.node_manager.commit();
} }
fn clear_after(&mut self, tx_seq: u64) { fn clear_after(&mut self, tx_seq: u64) {
@ -659,10 +580,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
fn first_known_root_at(&self, index: usize) -> (usize, E) { fn first_known_root_at(&self, index: usize) -> (usize, E) {
let mut height = 0; let mut height = 0;
let mut index_in_layer = index; let mut index_in_layer = index;
while height < self.height() { while height < self.layers.len() {
let node = self.node(height, index_in_layer); let node = self.node(height, index_in_layer);
if !node.is_null() { if !node.is_null() {
return (height + 1, node); return (height + 1, node.clone());
} }
height += 1; height += 1;
index_in_layer /= 2; index_in_layer /= 2;
@ -707,7 +628,7 @@ impl<E: HashElement> DeltaNodes<E> {
pub struct HistoryTree<'m, E: HashElement> { pub struct HistoryTree<'m, E: HashElement> {
/// A reference to the global tree nodes. /// A reference to the global tree nodes.
node_manager: &'m NodeManager<E>, layers: &'m Vec<Vec<E>>,
/// The delta nodes that are difference from `layers`. /// The delta nodes that are difference from `layers`.
/// This could be a reference, we just take ownership for convenience. /// This could be a reference, we just take ownership for convenience.
delta_nodes: &'m DeltaNodes<E>, delta_nodes: &'m DeltaNodes<E>,
@ -718,18 +639,16 @@ pub struct HistoryTree<'m, E: HashElement> {
impl<E: HashElement, A: Algorithm<E>> MerkleTreeRead for AppendMerkleTree<E, A> { impl<E: HashElement, A: Algorithm<E>> MerkleTreeRead for AppendMerkleTree<E, A> {
type E = E; type E = E;
fn node(&self, layer: usize, index: usize) -> Self::E { fn node(&self, layer: usize, index: usize) -> &Self::E {
self.node_manager &self.layers[layer][index]
.get_node(layer, index)
.expect("index checked")
} }
fn height(&self) -> usize { fn height(&self) -> usize {
self.node_manager.num_layers() self.layers.len()
} }
fn layer_len(&self, layer_height: usize) -> usize { fn layer_len(&self, layer_height: usize) -> usize {
self.node_manager.layer_size(layer_height) self.layers[layer_height].len()
} }
fn padding_node(&self, height: usize) -> Self::E { fn padding_node(&self, height: usize) -> Self::E {
@ -739,13 +658,10 @@ impl<E: HashElement, A: Algorithm<E>> MerkleTreeRead for AppendMerkleTree<E, A>
impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> { impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> {
type E = E; type E = E;
fn node(&self, layer: usize, index: usize) -> Self::E { fn node(&self, layer: usize, index: usize) -> &Self::E {
match self.delta_nodes.get(layer, index).expect("range checked") { match self.delta_nodes.get(layer, index).expect("range checked") {
Some(node) if *node != E::null() => node.clone(), Some(node) if *node != E::null() => node,
_ => self _ => &self.layers[layer][index],
.node_manager
.get_node(layer, index)
.expect("index checked"),
} }
} }
@ -762,22 +678,6 @@ impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> {
} }
} }
impl<E: HashElement, A: Algorithm<E>> MerkleTreeWrite for AppendMerkleTree<E, A> {
type E = E;
fn push_node(&mut self, layer: usize, node: Self::E) {
self.node_manager.push_node(layer, node);
}
fn append_nodes(&mut self, layer: usize, nodes: &[Self::E]) {
self.node_manager.append_nodes(layer, nodes);
}
fn update_node(&mut self, layer: usize, pos: usize, node: Self::E) {
self.node_manager.add_node(layer, pos, node);
}
}
#[macro_export] #[macro_export]
macro_rules! ensure_eq { macro_rules! ensure_eq {
($given:expr, $expected:expr) => { ($given:expr, $expected:expr) => {
@ -799,7 +699,6 @@ macro_rules! ensure_eq {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::merkle_tree::MerkleTreeRead; use crate::merkle_tree::MerkleTreeRead;
use crate::sha3::Sha3Algorithm; use crate::sha3::Sha3Algorithm;
use crate::AppendMerkleTree; use crate::AppendMerkleTree;
use ethereum_types::H256; use ethereum_types::H256;

View File

@ -49,7 +49,7 @@ pub trait Algorithm<E: HashElement> {
pub trait MerkleTreeRead { pub trait MerkleTreeRead {
type E: HashElement; type E: HashElement;
fn node(&self, layer: usize, index: usize) -> Self::E; fn node(&self, layer: usize, index: usize) -> &Self::E;
fn height(&self) -> usize; fn height(&self) -> usize;
fn layer_len(&self, layer_height: usize) -> usize; fn layer_len(&self, layer_height: usize) -> usize;
fn padding_node(&self, height: usize) -> Self::E; fn padding_node(&self, height: usize) -> Self::E;
@ -58,7 +58,7 @@ pub trait MerkleTreeRead {
self.layer_len(0) self.layer_len(0)
} }
fn root(&self) -> Self::E { fn root(&self) -> &Self::E {
self.node(self.height() - 1, 0) self.node(self.height() - 1, 0)
} }
@ -70,16 +70,16 @@ pub trait MerkleTreeRead {
self.leaves() self.leaves()
); );
} }
if self.node(0, leaf_index) == Self::E::null() { if self.node(0, leaf_index) == &Self::E::null() {
bail!("Not ready to generate proof for leaf_index={}", leaf_index); bail!("Not ready to generate proof for leaf_index={}", leaf_index);
} }
if self.height() == 1 { if self.height() == 1 {
return Proof::new(vec![self.root(), self.root().clone()], vec![]); return Proof::new(vec![self.root().clone(), self.root().clone()], vec![]);
} }
let mut lemma: Vec<Self::E> = Vec::with_capacity(self.height()); // path + root let mut lemma: Vec<Self::E> = Vec::with_capacity(self.height()); // path + root
let mut path: Vec<bool> = Vec::with_capacity(self.height() - 2); // path - 1 let mut path: Vec<bool> = Vec::with_capacity(self.height() - 2); // path - 1
let mut index_in_layer = leaf_index; let mut index_in_layer = leaf_index;
lemma.push(self.node(0, leaf_index)); lemma.push(self.node(0, leaf_index).clone());
for height in 0..(self.height() - 1) { for height in 0..(self.height() - 1) {
trace!( trace!(
"gen_proof: height={} index={} hash={:?}", "gen_proof: height={} index={} hash={:?}",
@ -93,15 +93,15 @@ pub trait MerkleTreeRead {
// TODO: This can be skipped if the tree size is available in validation. // TODO: This can be skipped if the tree size is available in validation.
lemma.push(self.padding_node(height)); lemma.push(self.padding_node(height));
} else { } else {
lemma.push(self.node(height, index_in_layer + 1)); lemma.push(self.node(height, index_in_layer + 1).clone());
} }
} else { } else {
path.push(false); path.push(false);
lemma.push(self.node(height, index_in_layer - 1)); lemma.push(self.node(height, index_in_layer - 1).clone());
} }
index_in_layer >>= 1; index_in_layer >>= 1;
} }
lemma.push(self.root()); lemma.push(self.root().clone());
if lemma.contains(&Self::E::null()) { if lemma.contains(&Self::E::null()) {
bail!( bail!(
"Not enough data to generate proof, lemma={:?} path={:?}", "Not enough data to generate proof, lemma={:?} path={:?}",
@ -130,13 +130,6 @@ pub trait MerkleTreeRead {
} }
} }
pub trait MerkleTreeWrite {
type E: HashElement;
fn push_node(&mut self, layer: usize, node: Self::E);
fn append_nodes(&mut self, layer: usize, nodes: &[Self::E]);
fn update_node(&mut self, layer: usize, pos: usize, node: Self::E);
}
/// This includes the data to reconstruct an `AppendMerkleTree` root where some nodes /// This includes the data to reconstruct an `AppendMerkleTree` root where some nodes
/// are `null`. Other intermediate nodes will be computed based on these known nodes. /// are `null`. Other intermediate nodes will be computed based on these known nodes.
pub struct MerkleTreeInitialData<E: HashElement> { pub struct MerkleTreeInitialData<E: HashElement> {

View File

@ -1,11 +0,0 @@
use std::sync::Arc;
use metrics::{register_timer, Timer};
lazy_static::lazy_static! {
pub static ref APPEND: Arc<dyn Timer> = register_timer("append_merkle_append");
pub static ref APPEND_LIST: Arc<dyn Timer> = register_timer("append_merkle_append_list");
pub static ref APPEND_SUBTREE: Arc<dyn Timer> = register_timer("append_merkle_append_subtree");
pub static ref APPEND_SUBTREE_LIST: Arc<dyn Timer> = register_timer("append_merkle_append_subtree_list");
pub static ref UPDATE_LAST: Arc<dyn Timer> = register_timer("append_merkle_update_last");
}

View File

@ -1,219 +0,0 @@
use crate::HashElement;
use anyhow::Result;
use lru::LruCache;
use std::any::Any;
use std::num::NonZeroUsize;
use std::sync::Arc;
use tracing::error;
pub struct NodeManager<E: HashElement> {
cache: LruCache<(usize, usize), E>,
layer_size: Vec<usize>,
db: Arc<dyn NodeDatabase<E>>,
db_tx: Option<Box<dyn NodeTransaction<E>>>,
}
impl<E: HashElement> NodeManager<E> {
pub fn new(db: Arc<dyn NodeDatabase<E>>, capacity: usize) -> Result<Self> {
let mut layer = 0;
let mut layer_size = Vec::new();
while let Some(size) = db.get_layer_size(layer)? {
layer_size.push(size);
layer += 1;
}
Ok(Self {
cache: LruCache::new(NonZeroUsize::new(capacity).expect("capacity should be non-zero")),
layer_size,
db,
db_tx: None,
})
}
pub fn new_dummy() -> Self {
Self {
cache: LruCache::unbounded(),
layer_size: vec![],
db: Arc::new(EmptyNodeDatabase {}),
db_tx: None,
}
}
pub fn push_node(&mut self, layer: usize, node: E) {
self.add_node(layer, self.layer_size[layer], node);
self.set_layer_size(layer, self.layer_size[layer] + 1);
}
pub fn append_nodes(&mut self, layer: usize, nodes: &[E]) {
let mut pos = self.layer_size[layer];
let mut saved_nodes = Vec::with_capacity(nodes.len());
for node in nodes {
self.cache.put((layer, pos), node.clone());
saved_nodes.push((layer, pos, node));
pos += 1;
}
self.set_layer_size(layer, pos);
self.db_tx().save_node_list(&saved_nodes);
}
pub fn get_node(&self, layer: usize, pos: usize) -> Option<E> {
match self.cache.peek(&(layer, pos)) {
Some(node) => Some(node.clone()),
None => self.db.get_node(layer, pos).unwrap_or_else(|e| {
error!("Failed to get node: {}", e);
None
}),
}
}
pub fn get_nodes(&self, layer: usize, start_pos: usize, end_pos: usize) -> NodeIterator<E> {
NodeIterator {
node_manager: self,
layer,
start_pos,
end_pos,
}
}
pub fn add_node(&mut self, layer: usize, pos: usize, node: E) {
// No need to insert if the value is unchanged.
if self.cache.get(&(layer, pos)) != Some(&node) {
self.db_tx().save_node(layer, pos, &node);
self.cache.put((layer, pos), node);
}
}
pub fn add_layer(&mut self) {
self.layer_size.push(0);
let layer = self.layer_size.len() - 1;
self.db_tx().save_layer_size(layer, 0);
}
pub fn layer_size(&self, layer: usize) -> usize {
self.layer_size[layer]
}
pub fn num_layers(&self) -> usize {
self.layer_size.len()
}
pub fn truncate_nodes(&mut self, layer: usize, pos_end: usize) {
let mut removed_nodes = Vec::new();
for pos in pos_end..self.layer_size[layer] {
self.cache.pop(&(layer, pos));
removed_nodes.push((layer, pos));
}
self.db_tx().remove_node_list(&removed_nodes);
self.set_layer_size(layer, pos_end);
}
pub fn truncate_layer(&mut self, layer: usize) {
self.truncate_nodes(layer, 0);
if layer == self.num_layers() - 1 {
self.layer_size.pop();
self.db_tx().remove_layer_size(layer);
}
}
pub fn start_transaction(&mut self) {
if self.db_tx.is_some() {
error!("start new tx before commit");
panic!("start new tx before commit");
}
self.db_tx = Some(self.db.start_transaction());
}
pub fn commit(&mut self) {
let tx = match self.db_tx.take() {
Some(tx) => tx,
None => {
error!("db_tx is None");
return;
}
};
if let Err(e) = self.db.commit(tx) {
error!("Failed to commit db transaction: {}", e);
}
}
fn db_tx(&mut self) -> &mut dyn NodeTransaction<E> {
(*self.db_tx.as_mut().expect("tx checked")).as_mut()
}
fn set_layer_size(&mut self, layer: usize, size: usize) {
self.layer_size[layer] = size;
self.db_tx().save_layer_size(layer, size);
}
}
pub struct NodeIterator<'a, E: HashElement> {
node_manager: &'a NodeManager<E>,
layer: usize,
start_pos: usize,
end_pos: usize,
}
impl<'a, E: HashElement> Iterator for NodeIterator<'a, E> {
type Item = E;
fn next(&mut self) -> Option<Self::Item> {
if self.start_pos < self.end_pos {
let r = self.node_manager.get_node(self.layer, self.start_pos);
self.start_pos += 1;
r
} else {
None
}
}
}
pub trait NodeDatabase<E: HashElement>: Send + Sync {
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<E>>;
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>>;
fn start_transaction(&self) -> Box<dyn NodeTransaction<E>>;
fn commit(&self, tx: Box<dyn NodeTransaction<E>>) -> Result<()>;
}
pub trait NodeTransaction<E: HashElement>: Send + Sync {
fn save_node(&mut self, layer: usize, pos: usize, node: &E);
/// `nodes` are a list of tuples `(layer, pos, node)`.
fn save_node_list(&mut self, nodes: &[(usize, usize, &E)]);
fn remove_node_list(&mut self, nodes: &[(usize, usize)]);
fn save_layer_size(&mut self, layer: usize, size: usize);
fn remove_layer_size(&mut self, layer: usize);
fn into_any(self: Box<Self>) -> Box<dyn Any>;
}
/// A dummy database structure for in-memory merkle tree that will not read/write db.
pub struct EmptyNodeDatabase {}
pub struct EmptyNodeTransaction {}
impl<E: HashElement> NodeDatabase<E> for EmptyNodeDatabase {
fn get_node(&self, _layer: usize, _pos: usize) -> Result<Option<E>> {
Ok(None)
}
fn get_layer_size(&self, _layer: usize) -> Result<Option<usize>> {
Ok(None)
}
fn start_transaction(&self) -> Box<dyn NodeTransaction<E>> {
Box::new(EmptyNodeTransaction {})
}
fn commit(&self, _tx: Box<dyn NodeTransaction<E>>) -> Result<()> {
Ok(())
}
}
impl<E: HashElement> NodeTransaction<E> for EmptyNodeTransaction {
fn save_node(&mut self, _layer: usize, _pos: usize, _node: &E) {}
fn save_node_list(&mut self, _nodes: &[(usize, usize, &E)]) {}
fn remove_node_list(&mut self, _nodes: &[(usize, usize)]) {}
fn save_layer_size(&mut self, _layer: usize, _size: usize) {}
fn remove_layer_size(&mut self, _layer: usize) {}
fn into_any(self: Box<Self>) -> Box<dyn Any> {
self
}
}

View File

@ -15,13 +15,19 @@ abigen!(
); );
#[cfg(feature = "dev")] #[cfg(feature = "dev")]
abigen!(ZgsFlow, "../../storage-contracts-abis/Flow.json"); abigen!(
ZgsFlow,
"../../0g-storage-contracts-dev/artifacts/contracts/dataFlow/Flow.sol/Flow.json"
);
#[cfg(feature = "dev")] #[cfg(feature = "dev")]
abigen!(PoraMine, "../../storage-contracts-abis/PoraMine.json"); abigen!(
PoraMine,
"../../0g-storage-contracts-dev/artifacts/contracts/miner/Mine.sol/PoraMine.json"
);
#[cfg(feature = "dev")] #[cfg(feature = "dev")]
abigen!( abigen!(
ChunkLinearReward, ChunkLinearReward,
"../../storage-contracts-abis/ChunkLinearReward.json" "../../0g-storage-contracts-dev/artifacts/contracts/reward/ChunkLinearReward.sol/ChunkLinearReward.json"
); );

View File

@ -9,5 +9,5 @@ exit-future = "0.2.0"
futures = "0.3.21" futures = "0.3.21"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../lighthouse_metrics" } lighthouse_metrics = { path = "../lighthouse_metrics" }
tokio = { version = "1.38.0", features = ["full"] } tokio = { version = "1.19.2", features = ["rt"] }
tracing = "0.1.35" tracing = "0.1.35"

View File

@ -1,6 +1,6 @@
# Proof of Random Access # Proof of Random Access
The ZeroGravity network adopts a Proof of Random Access (PoRA) mechanism to incentivize miners to store data. By requiring miners to answer randomly produced queries to archived data chunks, the PoRA mechanism establishes the relation between mining proof generation power and data storage. Miners answer the queries repeatedly and computes an output digest for each loaded chunk until find a digest that satisfies the mining difficulty (i.e., has enough leading zeros). PoRA will stress the miners' disk I/O and reduce their capability to respond user queries. So 0G Storage adopts intermittent mining, in which a mining epoch starts with a block generation at a specific block height on the host chain and stops when a valid PoRA is submitted to the 0G Storage contract. The ZeroGravity network adopts a Proof of Random Access (PoRA) mechanism to incentivize miners to store data. By requiring miners to answer randomly produced queries to archived data chunks, the PoRA mechanism establishes the relation between mining proof generation power and data storage. Miners answer the queries repeatedly and computes an output digest for each loaded chunk util find a digest that satisfies the mining difficulty (i.e., has enough leading zeros). PoRA will stress the miners' disk I/O and reduce their capability to respond user queries. So 0G Storage adopts intermittent mining, in which a mining epoch starts with a block generation at a specific block height on the host chain and stops when a valid PoRA is submitted to the 0G Storage contract.
In a strawman design, a PoRA iteration consists of a computing stage and a loading stage. In the computing stage, a miner computes a random recall position (the universal offset in the flow) based on an arbitrary picked random nonce and a mining status read from the host chain. In the loading stage, a miner loads the archived data chunks at the given recall position, and computes output digest by hashing the tuple of mining status and the data chunks. If the output digest satisfies the target difficulty, the miner can construct a legitimate PoRA consists of the chosen random nonce, the loaded data chunk and the proof for the correctness of data chunk to the mining contract. In a strawman design, a PoRA iteration consists of a computing stage and a loading stage. In the computing stage, a miner computes a random recall position (the universal offset in the flow) based on an arbitrary picked random nonce and a mining status read from the host chain. In the loading stage, a miner loads the archived data chunks at the given recall position, and computes output digest by hashing the tuple of mining status and the data chunks. If the output digest satisfies the target difficulty, the miner can construct a legitimate PoRA consists of the chosen random nonce, the loaded data chunk and the proof for the correctness of data chunk to the mining contract.

View File

@ -27,4 +27,4 @@ The mining process of 0G Storage requires to prove data accessibility to random
## Data Flow ## Data Flow
In 0G Storage, committed data are organized sequentially. Such a sequence of data is called a data flow, which can be interpreted as a list of data entries or equivalently a sequence of fixed-size data sectors. Thus, every piece of data in ZeroGravity can be indexed conveniently with a universal offset. This offset will be used to sample challenges in the mining process of PoRA. The default data flow is called the "main flow" of ZeroGravity. It incorporates all new log entries (unless otherwise specified) in an append-only manner. There are also specialized flows that only accept some category of log entries, e.g. data related to a specific application. The most significant advantage of specialized flows is a consecutive addressing space, which may be crucial in some use cases. Furthermore, a specialized flow can apply customized storage price, which is typically significantly higher than the floor price of the default flow, and hence achieves better data availability and reliability. In 0G Storage, committed data are organized sequentially. Such a sequence of data is called a data flow, which can be interpreted as a list of data entries or equivalently a sequence of fixed-size data sectors. Thus, every piece of data in ZeroGravity can be indexed conveniently with a universal offset. This offset will be used to sample challenges in the mining process of PoRA. The default data flow is called the "main flow" of ZeroGravity. It incorporates all new log entries (unless otherwise specified) in an append-only manner. There are also specialized flows that only accept some category of log entries, e.g. data related to a specifc application. The most significant advantage of specialized flows is a consecutive addressing space, which may be crucial in some use cases. Furthermore, a specialized flow can apply customized storage price, which is typically significantly higher than the floor price of the default flow, and hence achieves better data availability and reliability.

View File

@ -1,38 +0,0 @@
# One Box Test
0G storage node provides one box test framework for developers to verify system functionalities via RPC.
## Prerequisites
- Requires python version: 3.8, 3.9 or 3.10, higher version is not guaranteed (e.g. failed to install `pysha3`).
- Installs dependencies under root folder: `pip3 install -r requirements.txt`
## Install Blockchain Nodes
Python test framework will launch blockchain nodes at local machine for storage nodes to interact with. There are 3 kinds of blockchains available:
- 0G blockchain (by default).
- Conflux eSpace (for chain reorg test purpose).
- BSC node (geth).
The blockchain node binaries will be compiled or downloaded from github to `tests/tmp` folder automatically. Alternatively, developers could also manually copy binaries of specific version to the `tests/tmp` folder.
## Run Tests
Changes to the `tests` folder and run following command to run all tests:
```
python test_all.py
```
or, run any single test, e.g.
```
python example_test.py
```
*Note, please ensure blockchain nodes installed before running any single test, e.g. run all tests at first.*
## Add New Test
Please follow the `example_test.py` to add a new `xxx_test.py` file under `tests` folder.

View File

@ -13,5 +13,3 @@ tokio = { version = "1.19.2", features = ["sync"] }
async-lock = "2.5.0" async-lock = "2.5.0"
hashlink = "0.8.0" hashlink = "0.8.0"
tracing = "0.1.35" tracing = "0.1.35"
lazy_static = "1.4.0"
metrics = { workspace = true }

View File

@ -1,16 +1,11 @@
use super::mem_pool::MemoryChunkPool; use super::mem_pool::MemoryChunkPool;
use crate::mem_pool::FileID; use crate::mem_pool::FileID;
use anyhow::Result; use anyhow::Result;
use metrics::{Histogram, Sample}; use network::NetworkMessage;
use network::{NetworkMessage, NetworkSender};
use shared_types::{ChunkArray, FileProof}; use shared_types::{ChunkArray, FileProof};
use std::{sync::Arc, time::Instant}; use std::{sync::Arc, time::SystemTime};
use storage_async::{ShardConfig, Store}; use storage_async::{ShardConfig, Store};
use tokio::sync::mpsc::UnboundedReceiver; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
lazy_static::lazy_static! {
pub static ref FINALIZE_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("chunk_pool_finalize_file_latency", 1024);
}
/// Handle the cached file when uploaded completely and verified from blockchain. /// Handle the cached file when uploaded completely and verified from blockchain.
/// Generally, the file will be persisted into log store. /// Generally, the file will be persisted into log store.
@ -18,7 +13,7 @@ pub struct ChunkPoolHandler {
receiver: UnboundedReceiver<ChunkPoolMessage>, receiver: UnboundedReceiver<ChunkPoolMessage>,
mem_pool: Arc<MemoryChunkPool>, mem_pool: Arc<MemoryChunkPool>,
log_store: Arc<Store>, log_store: Arc<Store>,
sender: NetworkSender, sender: UnboundedSender<NetworkMessage>,
} }
impl ChunkPoolHandler { impl ChunkPoolHandler {
@ -26,7 +21,7 @@ impl ChunkPoolHandler {
receiver: UnboundedReceiver<ChunkPoolMessage>, receiver: UnboundedReceiver<ChunkPoolMessage>,
mem_pool: Arc<MemoryChunkPool>, mem_pool: Arc<MemoryChunkPool>,
log_store: Arc<Store>, log_store: Arc<Store>,
sender: NetworkSender, sender: UnboundedSender<NetworkMessage>,
) -> Self { ) -> Self {
ChunkPoolHandler { ChunkPoolHandler {
receiver, receiver,
@ -73,7 +68,7 @@ impl ChunkPoolHandler {
} }
} }
let start = Instant::now(); let start = SystemTime::now();
if !self if !self
.log_store .log_store
.finalize_tx_with_hash(id.tx_id.seq, id.tx_id.hash) .finalize_tx_with_hash(id.tx_id.seq, id.tx_id.hash)
@ -82,9 +77,8 @@ impl ChunkPoolHandler {
return Ok(false); return Ok(false);
} }
let elapsed = start.elapsed(); let elapsed = start.elapsed()?;
debug!(?id, ?elapsed, "Transaction finalized"); debug!(?id, ?elapsed, "Transaction finalized");
FINALIZE_FILE_LATENCY.update_since(start);
// always remove file from pool after transaction finalized // always remove file from pool after transaction finalized
self.mem_pool.remove_file(&id.root).await; self.mem_pool.remove_file(&id.root).await;

View File

@ -29,7 +29,7 @@ impl Config {
pub fn unbounded( pub fn unbounded(
config: Config, config: Config,
log_store: Arc<storage_async::Store>, log_store: Arc<storage_async::Store>,
network_send: network::NetworkSender, network_send: tokio::sync::mpsc::UnboundedSender<network::NetworkMessage>,
) -> (Arc<MemoryChunkPool>, ChunkPoolHandler) { ) -> (Arc<MemoryChunkPool>, ChunkPoolHandler) {
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel(); let (sender, receiver) = tokio::sync::mpsc::unbounded_channel();

View File

@ -24,5 +24,3 @@ futures-util = "0.3.28"
thiserror = "1.0.44" thiserror = "1.0.44"
lazy_static = "1.4.0" lazy_static = "1.4.0"
metrics = { workspace = true } metrics = { workspace = true }
reqwest = {version = "0.11", features = ["json"]}
url = { version = "2.4", default-features = false }

View File

@ -1,5 +1,3 @@
use std::time::Duration;
use crate::ContractAddress; use crate::ContractAddress;
pub struct LogSyncConfig { pub struct LogSyncConfig {
@ -36,9 +34,6 @@ pub struct LogSyncConfig {
pub watch_loop_wait_time_ms: u64, pub watch_loop_wait_time_ms: u64,
// force to sync log from start block number // force to sync log from start block number
pub force_log_sync_from_start_block_number: bool, pub force_log_sync_from_start_block_number: bool,
// the timeout for blockchain rpc connection
pub blockchain_rpc_timeout: Duration,
} }
#[derive(Clone)] #[derive(Clone)]
@ -66,7 +61,6 @@ impl LogSyncConfig {
remove_finalized_block_interval_minutes: u64, remove_finalized_block_interval_minutes: u64,
watch_loop_wait_time_ms: u64, watch_loop_wait_time_ms: u64,
force_log_sync_from_start_block_number: bool, force_log_sync_from_start_block_number: bool,
blockchain_rpc_timeout: Duration,
) -> Self { ) -> Self {
Self { Self {
rpc_endpoint_url, rpc_endpoint_url,
@ -83,7 +77,6 @@ impl LogSyncConfig {
remove_finalized_block_interval_minutes, remove_finalized_block_interval_minutes,
watch_loop_wait_time_ms, watch_loop_wait_time_ms,
force_log_sync_from_start_block_number, force_log_sync_from_start_block_number,
blockchain_rpc_timeout,
} }
} }
} }

View File

@ -1,6 +1,6 @@
use crate::sync_manager::log_query::LogQuery; use crate::sync_manager::log_query::LogQuery;
use crate::sync_manager::{metrics, RETRY_WAIT_MS}; use crate::sync_manager::RETRY_WAIT_MS;
use crate::{ContractAddress, LogSyncConfig}; use crate::ContractAddress;
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use append_merkle::{Algorithm, Sha3Algorithm}; use append_merkle::{Algorithm, Sha3Algorithm};
use contract_interface::{SubmissionNode, SubmitFilter, ZgsFlow}; use contract_interface::{SubmissionNode, SubmitFilter, ZgsFlow};
@ -12,13 +12,17 @@ use futures::StreamExt;
use jsonrpsee::tracing::{debug, error, info, warn}; use jsonrpsee::tracing::{debug, error, info, warn};
use shared_types::{DataRoot, Transaction}; use shared_types::{DataRoot, Transaction};
use std::collections::{BTreeMap, HashMap}; use std::collections::{BTreeMap, HashMap};
use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::Duration;
use storage::log_store::{tx_store::BlockHashAndSubmissionIndex, Store}; use storage::log_store::{tx_store::BlockHashAndSubmissionIndex, Store};
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tokio::sync::{ use tokio::{
sync::{
mpsc::{UnboundedReceiver, UnboundedSender}, mpsc::{UnboundedReceiver, UnboundedSender},
RwLock, RwLock,
},
time::Instant,
}; };
pub struct LogEntryFetcher { pub struct LogEntryFetcher {
@ -30,29 +34,28 @@ pub struct LogEntryFetcher {
} }
impl LogEntryFetcher { impl LogEntryFetcher {
pub async fn new(config: &LogSyncConfig) -> Result<Self> { pub async fn new(
url: &str,
contract_address: ContractAddress,
log_page_size: u64,
confirmation_delay: u64,
rate_limit_retries: u32,
timeout_retries: u32,
initial_backoff: u64,
) -> Result<Self> {
let provider = Arc::new(Provider::new( let provider = Arc::new(Provider::new(
RetryClientBuilder::default() RetryClientBuilder::default()
.rate_limit_retries(config.rate_limit_retries) .rate_limit_retries(rate_limit_retries)
.timeout_retries(config.timeout_retries) .timeout_retries(timeout_retries)
.initial_backoff(Duration::from_millis(config.initial_backoff)) .initial_backoff(Duration::from_millis(initial_backoff))
.build( .build(Http::from_str(url)?, Box::new(HttpRateLimitRetryPolicy)),
Http::new_with_client(
url::Url::parse(&config.rpc_endpoint_url)?,
reqwest::Client::builder()
.timeout(config.blockchain_rpc_timeout)
.connect_timeout(config.blockchain_rpc_timeout)
.build()?,
),
Box::new(HttpRateLimitRetryPolicy),
),
)); ));
// TODO: `error` types are removed from the ABI json file. // TODO: `error` types are removed from the ABI json file.
Ok(Self { Ok(Self {
contract_address: config.contract_address, contract_address,
provider, provider,
log_page_size: config.log_page_size, log_page_size,
confirmation_delay: config.confirmation_block_count, confirmation_delay,
}) })
} }
@ -219,7 +222,7 @@ impl LogEntryFetcher {
) -> UnboundedReceiver<LogFetchProgress> { ) -> UnboundedReceiver<LogFetchProgress> {
let provider = self.provider.clone(); let provider = self.provider.clone();
let (recover_tx, recover_rx) = tokio::sync::mpsc::unbounded_channel(); let (recover_tx, recover_rx) = tokio::sync::mpsc::unbounded_channel();
let contract = self.flow_contract(); let contract = ZgsFlow::new(self.contract_address, provider.clone());
let log_page_size = self.log_page_size; let log_page_size = self.log_page_size;
executor.spawn( executor.spawn(
@ -233,20 +236,15 @@ impl LogEntryFetcher {
.filter; .filter;
let mut stream = LogQuery::new(&provider, &filter, log_query_delay) let mut stream = LogQuery::new(&provider, &filter, log_query_delay)
.with_page_size(log_page_size); .with_page_size(log_page_size);
info!( debug!(
"start_recover starts, start={} end={}", "start_recover starts, start={} end={}",
start_block_number, end_block_number start_block_number, end_block_number
); );
let (mut block_hash_sent, mut block_number_sent) = (None, None);
while let Some(maybe_log) = stream.next().await { while let Some(maybe_log) = stream.next().await {
let start_time = Instant::now();
match maybe_log { match maybe_log {
Ok(log) => { Ok(log) => {
let sync_progress = let sync_progress =
if log.block_hash.is_some() && log.block_number.is_some() { if log.block_hash.is_some() && log.block_number.is_some() {
if block_hash_sent != log.block_hash
|| block_number_sent != log.block_number
{
let synced_block = LogFetchProgress::SyncedBlock(( let synced_block = LogFetchProgress::SyncedBlock((
log.block_number.unwrap().as_u64(), log.block_number.unwrap().as_u64(),
log.block_hash.unwrap(), log.block_hash.unwrap(),
@ -256,9 +254,6 @@ impl LogEntryFetcher {
Some(synced_block) Some(synced_block)
} else { } else {
None None
}
} else {
None
}; };
debug!("recover: progress={:?}", sync_progress); debug!("recover: progress={:?}", sync_progress);
@ -273,17 +268,11 @@ impl LogEntryFetcher {
log.block_number.expect("block number exist").as_u64(), log.block_number.expect("block number exist").as_u64(),
)) ))
.and_then(|_| match sync_progress { .and_then(|_| match sync_progress {
Some(b) => { Some(b) => recover_tx.send(b),
recover_tx.send(b)?;
block_hash_sent = log.block_hash;
block_number_sent = log.block_number;
Ok(())
}
None => Ok(()), None => Ok(()),
}) })
{ {
error!("send error: e={:?}", e); error!("send error: e={:?}", e);
break;
} }
} }
Err(e) => { Err(e) => {
@ -299,10 +288,7 @@ impl LogEntryFetcher {
tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await;
} }
} }
metrics::RECOVER_LOG.update_since(start_time);
} }
info!("log recover end");
}, },
"log recover", "log recover",
); );
@ -319,7 +305,7 @@ impl LogEntryFetcher {
mut watch_progress_rx: UnboundedReceiver<u64>, mut watch_progress_rx: UnboundedReceiver<u64>,
) -> UnboundedReceiver<LogFetchProgress> { ) -> UnboundedReceiver<LogFetchProgress> {
let (watch_tx, watch_rx) = tokio::sync::mpsc::unbounded_channel(); let (watch_tx, watch_rx) = tokio::sync::mpsc::unbounded_channel();
let contract = self.flow_contract(); let contract = ZgsFlow::new(self.contract_address, self.provider.clone());
let provider = self.provider.clone(); let provider = self.provider.clone();
let confirmation_delay = self.confirmation_delay; let confirmation_delay = self.confirmation_delay;
let log_page_size = self.log_page_size; let log_page_size = self.log_page_size;
@ -597,10 +583,6 @@ impl LogEntryFetcher {
pub fn provider(&self) -> &Provider<RetryClient<Http>> { pub fn provider(&self) -> &Provider<RetryClient<Http>> {
self.provider.as_ref() self.provider.as_ref()
} }
pub fn flow_contract(&self) -> ZgsFlow<Provider<RetryClient<Http>>> {
ZgsFlow::new(self.contract_address, self.provider.clone())
}
} }
async fn check_watch_process( async fn check_watch_process(
@ -676,7 +658,6 @@ async fn check_watch_process(
"get block hash for block {} from RPC, assume there is no org", "get block hash for block {} from RPC, assume there is no org",
*progress - 1 *progress - 1
); );
let hash = loop {
match provider.get_block(*progress - 1).await { match provider.get_block(*progress - 1).await {
Ok(Some(v)) => { Ok(Some(v)) => {
break v.hash.expect("parent block hash expect exist"); break v.hash.expect("parent block hash expect exist");
@ -685,17 +666,11 @@ async fn check_watch_process(
panic!("parent block {} expect exist", *progress - 1); panic!("parent block {} expect exist", *progress - 1);
} }
Err(e) => { Err(e) => {
if e.to_string().contains("server is too busy") {
warn!("server busy, wait for parent block {}", *progress - 1);
} else {
panic!("parent block {} expect exist, error {}", *progress - 1, e); panic!("parent block {} expect exist, error {}", *progress - 1, e);
} }
} }
} }
}; };
break hash;
}
};
} }
progress_reset_history.retain(|k, _| k + 1000 >= *progress); progress_reset_history.retain(|k, _| k + 1000 >= *progress);

View File

@ -14,7 +14,7 @@ use thiserror::Error;
pub(crate) type PinBoxFut<'a, T> = pub(crate) type PinBoxFut<'a, T> =
Pin<Box<dyn Future<Output = Result<T, ProviderError>> + Send + 'a>>; Pin<Box<dyn Future<Output = Result<T, ProviderError>> + Send + 'a>>;
const TOO_MANY_LOGS_ERROR_MSG: [&str; 2] = ["exceeds the max limit of", "too large with more than"]; const TOO_MANY_LOGS_ERROR_MSG: [&str; 2] = ["query returned more than", "too large with more than"];
/// A log query provides streaming access to historical logs via a paginated /// A log query provides streaming access to historical logs via a paginated
/// request. For streaming access to future logs, use [`Middleware::watch`] or /// request. For streaming access to future logs, use [`Middleware::watch`] or

View File

@ -1,13 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use metrics::{register_timer, Gauge, GaugeUsize, Timer}; use metrics::{register_timer, Timer};
lazy_static::lazy_static! { lazy_static::lazy_static! {
pub static ref LOG_MANAGER_HANDLE_DATA_TRANSACTION: Arc<dyn Timer> = register_timer("log_manager_handle_data_transaction"); pub static ref STORE_PUT_TX: Arc<dyn Timer> = register_timer("log_entry_sync_store_put_tx");
pub static ref STORE_PUT_TX: Arc<dyn Timer> = register_timer("log_entry_sync_manager_put_tx_inner");
pub static ref STORE_PUT_TX_SPEED_IN_BYTES: Arc<dyn Gauge<usize>> = GaugeUsize::register("log_entry_sync_manager_put_tx_speed_in_bytes");
pub static ref RECOVER_LOG: Arc<dyn Timer> = register_timer("log_entry_sync_manager_recover_log");
} }

View File

@ -26,7 +26,6 @@ const RETRY_WAIT_MS: u64 = 500;
// Each tx has less than 10KB, so the cache size should be acceptable. // Each tx has less than 10KB, so the cache size should be acceptable.
const BROADCAST_CHANNEL_CAPACITY: usize = 25000; const BROADCAST_CHANNEL_CAPACITY: usize = 25000;
const CATCH_UP_END_GAP: u64 = 10; const CATCH_UP_END_GAP: u64 = 10;
const CHECK_ROOT_INTERVAL: u64 = 500;
/// Errors while handle data /// Errors while handle data
#[derive(Error, Debug)] #[derive(Error, Debug)]
@ -87,7 +86,16 @@ impl LogSyncManager {
.expect("shutdown send error") .expect("shutdown send error")
}, },
async move { async move {
let log_fetcher = LogEntryFetcher::new(&config).await?; let log_fetcher = LogEntryFetcher::new(
&config.rpc_endpoint_url,
config.contract_address,
config.log_page_size,
config.confirmation_block_count,
config.rate_limit_retries,
config.timeout_retries,
config.initial_backoff,
)
.await?;
let data_cache = DataCache::new(config.cache_config.clone()); let data_cache = DataCache::new(config.cache_config.clone());
let block_hash_cache = Arc::new(RwLock::new( let block_hash_cache = Arc::new(RwLock::new(
@ -269,9 +277,6 @@ impl LogSyncManager {
.remove_finalized_block_interval_minutes, .remove_finalized_block_interval_minutes,
); );
// start the pad data store
log_sync_manager.store.start_padding(&executor_clone);
let (watch_progress_tx, watch_progress_rx) = let (watch_progress_tx, watch_progress_rx) =
tokio::sync::mpsc::unbounded_channel(); tokio::sync::mpsc::unbounded_channel();
let watch_rx = log_sync_manager.log_fetcher.start_watch( let watch_rx = log_sync_manager.log_fetcher.start_watch(
@ -403,7 +408,6 @@ impl LogSyncManager {
} }
LogFetchProgress::Transaction((tx, block_number)) => { LogFetchProgress::Transaction((tx, block_number)) => {
let mut stop = false; let mut stop = false;
let start_time = Instant::now();
match self.put_tx(tx.clone()).await { match self.put_tx(tx.clone()).await {
Some(false) => stop = true, Some(false) => stop = true,
Some(true) => { Some(true) => {
@ -437,8 +441,6 @@ impl LogSyncManager {
// no receivers will be created. // no receivers will be created.
warn!("log sync broadcast error, error={:?}", e); warn!("log sync broadcast error, error={:?}", e);
} }
metrics::LOG_MANAGER_HANDLE_DATA_TRANSACTION.update_since(start_time);
} }
LogFetchProgress::Reverted(reverted) => { LogFetchProgress::Reverted(reverted) => {
self.process_reverted(reverted).await; self.process_reverted(reverted).await;
@ -451,6 +453,7 @@ impl LogSyncManager {
async fn put_tx_inner(&mut self, tx: Transaction) -> bool { async fn put_tx_inner(&mut self, tx: Transaction) -> bool {
let start_time = Instant::now(); let start_time = Instant::now();
let result = self.store.put_tx(tx.clone()); let result = self.store.put_tx(tx.clone());
metrics::STORE_PUT_TX.update_since(start_time);
if let Err(e) = result { if let Err(e) = result {
error!("put_tx error: e={:?}", e); error!("put_tx error: e={:?}", e);
@ -506,50 +509,7 @@ impl LogSyncManager {
} }
} }
self.data_cache.garbage_collect(self.next_tx_seq); self.data_cache.garbage_collect(self.next_tx_seq);
self.next_tx_seq += 1; self.next_tx_seq += 1;
// Check if the computed data root matches on-chain state.
// If the call fails, we won't check the root here and return `true` directly.
if self.next_tx_seq % CHECK_ROOT_INTERVAL == 0 {
let flow_contract = self.log_fetcher.flow_contract();
match flow_contract
.get_flow_root_by_tx_seq(tx.seq.into())
.call()
.await
{
Ok(contract_root_bytes) => {
let contract_root = H256::from_slice(&contract_root_bytes);
// contract_root is zero for tx submitted before upgrading.
if !contract_root.is_zero() {
match self.store.get_context() {
Ok((local_root, _)) => {
if contract_root != local_root {
error!(
?contract_root,
?local_root,
"local flow root and on-chain flow root mismatch"
);
return false;
}
}
Err(e) => {
warn!(?e, "fail to read the local flow root");
}
}
}
}
Err(e) => {
warn!(?e, "fail to read the on-chain flow root");
}
}
}
metrics::STORE_PUT_TX_SPEED_IN_BYTES
.update((tx.size * 1000 / start_time.elapsed().as_micros() as u64) as usize);
metrics::STORE_PUT_TX.update_since(start_time);
true true
} }
} }

View File

@ -67,8 +67,8 @@ impl MinerConfig {
}) })
} }
pub(crate) fn make_provider(&self) -> Result<Arc<Provider<RetryClient<Http>>>, String> { pub(crate) async fn make_provider(&self) -> Result<MineServiceMiddleware, String> {
Ok(Arc::new(Provider::new( let provider = Arc::new(Provider::new(
RetryClientBuilder::default() RetryClientBuilder::default()
.rate_limit_retries(self.rate_limit_retries) .rate_limit_retries(self.rate_limit_retries)
.timeout_retries(self.timeout_retries) .timeout_retries(self.timeout_retries)
@ -78,11 +78,7 @@ impl MinerConfig {
.map_err(|e| format!("Cannot parse blockchain endpoint: {:?}", e))?, .map_err(|e| format!("Cannot parse blockchain endpoint: {:?}", e))?,
Box::new(HttpRateLimitRetryPolicy), Box::new(HttpRateLimitRetryPolicy),
), ),
))) ));
}
pub(crate) async fn make_signing_provider(&self) -> Result<MineServiceMiddleware, String> {
let provider = self.make_provider()?;
let chain_id = provider let chain_id = provider
.get_chainid() .get_chainid()
.await .await

View File

@ -5,20 +5,17 @@ use ethereum_types::Address;
use ethers::contract::ContractCall; use ethers::contract::ContractCall;
use ethers::contract::EthEvent; use ethers::contract::EthEvent;
use std::sync::Arc; use std::sync::Arc;
use storage::log_store::log_manager::DATA_DB_KEY;
use storage::H256; use storage::H256;
use storage_async::Store; use storage_async::Store;
const MINER_ID: &str = "mine.miner_id"; const MINER_ID: &str = "mine.miner_id";
pub async fn load_miner_id(store: &Store) -> storage::error::Result<Option<H256>> { pub async fn load_miner_id(store: &Store) -> storage::error::Result<Option<H256>> {
store.get_config_decoded(&MINER_ID, DATA_DB_KEY).await store.get_config_decoded(&MINER_ID).await
} }
async fn set_miner_id(store: &Store, miner_id: &H256) -> storage::error::Result<()> { async fn set_miner_id(store: &Store, miner_id: &H256) -> storage::error::Result<()> {
store store.set_config_encoded(&MINER_ID, miner_id).await
.set_config_encoded(&MINER_ID, miner_id, DATA_DB_KEY)
.await
} }
pub(crate) async fn check_and_request_miner_id( pub(crate) async fn check_and_request_miner_id(

View File

@ -1,7 +1,6 @@
use std::{collections::BTreeMap, sync::Arc}; use std::{collections::BTreeMap, sync::Arc};
use ethereum_types::H256; use ethereum_types::H256;
use ethers::prelude::{Http, Provider, RetryClient};
use tokio::time::{sleep, Duration, Instant}; use tokio::time::{sleep, Duration, Instant};
use contract_interface::{EpochRangeWithContextDigest, ZgsFlow}; use contract_interface::{EpochRangeWithContextDigest, ZgsFlow};
@ -13,14 +12,14 @@ use storage_async::Store;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use zgs_spec::SECTORS_PER_SEAL; use zgs_spec::SECTORS_PER_SEAL;
use crate::config::MinerConfig; use crate::config::{MineServiceMiddleware, MinerConfig};
const DB_QUERY_PERIOD_ON_NO_TASK: u64 = 1; const DB_QUERY_PERIOD_ON_NO_TASK: u64 = 1;
const DB_QUERY_PERIOD_ON_ERROR: u64 = 5; const DB_QUERY_PERIOD_ON_ERROR: u64 = 5;
const CHAIN_STATUS_QUERY_PERIOD: u64 = 5; const CHAIN_STATUS_QUERY_PERIOD: u64 = 5;
pub struct Sealer { pub struct Sealer {
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>, flow_contract: ZgsFlow<MineServiceMiddleware>,
store: Arc<Store>, store: Arc<Store>,
context_cache: BTreeMap<u128, EpochRangeWithContextDigest>, context_cache: BTreeMap<u128, EpochRangeWithContextDigest>,
last_context_flow_length: u64, last_context_flow_length: u64,
@ -30,7 +29,7 @@ pub struct Sealer {
impl Sealer { impl Sealer {
pub fn spawn( pub fn spawn(
executor: TaskExecutor, executor: TaskExecutor,
provider: Arc<Provider<RetryClient<Http>>>, provider: Arc<MineServiceMiddleware>,
store: Arc<Store>, store: Arc<Store>,
config: &MinerConfig, config: &MinerConfig,
miner_id: H256, miner_id: H256,

View File

@ -3,12 +3,13 @@ use crate::monitor::Monitor;
use crate::sealer::Sealer; use crate::sealer::Sealer;
use crate::submitter::Submitter; use crate::submitter::Submitter;
use crate::{config::MinerConfig, mine::PoraService, watcher::MineContextWatcher}; use crate::{config::MinerConfig, mine::PoraService, watcher::MineContextWatcher};
use network::NetworkSender; use network::NetworkMessage;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use storage::config::ShardConfig; use storage::config::ShardConfig;
use storage_async::Store; use storage_async::Store;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::sync::mpsc;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum MinerMessage { pub enum MinerMessage {
@ -28,17 +29,15 @@ pub struct MineService;
impl MineService { impl MineService {
pub async fn spawn( pub async fn spawn(
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
_network_send: NetworkSender, _network_send: mpsc::UnboundedSender<NetworkMessage>,
config: MinerConfig, config: MinerConfig,
store: Arc<Store>, store: Arc<Store>,
) -> Result<broadcast::Sender<MinerMessage>, String> { ) -> Result<broadcast::Sender<MinerMessage>, String> {
let provider = config.make_provider()?; let provider = Arc::new(config.make_provider().await?);
let signing_provider = Arc::new(config.make_signing_provider().await?);
let (msg_send, msg_recv) = broadcast::channel(1024); let (msg_send, msg_recv) = broadcast::channel(1024);
let miner_id = let miner_id = check_and_request_miner_id(&config, store.as_ref(), &provider).await?;
check_and_request_miner_id(&config, store.as_ref(), &signing_provider).await?;
debug!("miner id setting complete."); debug!("miner id setting complete.");
let mine_context_receiver = MineContextWatcher::spawn( let mine_context_receiver = MineContextWatcher::spawn(
@ -62,7 +61,6 @@ impl MineService {
mine_answer_receiver, mine_answer_receiver,
mine_context_receiver, mine_context_receiver,
provider.clone(), provider.clone(),
signing_provider,
store.clone(), store.clone(),
&config, &config,
); );

View File

@ -2,7 +2,6 @@ use contract_interface::PoraAnswer;
use contract_interface::{PoraMine, ZgsFlow}; use contract_interface::{PoraMine, ZgsFlow};
use ethereum_types::U256; use ethereum_types::U256;
use ethers::contract::ContractCall; use ethers::contract::ContractCall;
use ethers::prelude::{Http, Provider, RetryClient};
use ethers::providers::PendingTransaction; use ethers::providers::PendingTransaction;
use hex::ToHex; use hex::ToHex;
use shared_types::FlowRangeProof; use shared_types::FlowRangeProof;
@ -25,7 +24,7 @@ pub struct Submitter {
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>, mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
mine_context_receiver: broadcast::Receiver<MineContextMessage>, mine_context_receiver: broadcast::Receiver<MineContextMessage>,
mine_contract: PoraMine<MineServiceMiddleware>, mine_contract: PoraMine<MineServiceMiddleware>,
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>, flow_contract: ZgsFlow<MineServiceMiddleware>,
default_gas_limit: Option<U256>, default_gas_limit: Option<U256>,
store: Arc<Store>, store: Arc<Store>,
} }
@ -35,12 +34,11 @@ impl Submitter {
executor: TaskExecutor, executor: TaskExecutor,
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>, mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
mine_context_receiver: broadcast::Receiver<MineContextMessage>, mine_context_receiver: broadcast::Receiver<MineContextMessage>,
provider: Arc<Provider<RetryClient<Http>>>, provider: Arc<MineServiceMiddleware>,
signing_provider: Arc<MineServiceMiddleware>,
store: Arc<Store>, store: Arc<Store>,
config: &MinerConfig, config: &MinerConfig,
) { ) {
let mine_contract = PoraMine::new(config.mine_address, signing_provider); let mine_contract = PoraMine::new(config.mine_address, provider.clone());
let flow_contract = ZgsFlow::new(config.flow_address, provider); let flow_contract = ZgsFlow::new(config.flow_address, provider);
let default_gas_limit = config.submission_gas; let default_gas_limit = config.submission_gas;

View File

@ -14,13 +14,13 @@ use tokio::{
try_join, try_join,
}; };
use crate::{config::MineServiceMiddleware, mine::PoraPuzzle, MinerConfig, MinerMessage};
use ethers::prelude::{Http, RetryClient};
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use std::{ops::DerefMut, str::FromStr}; use std::{ops::DerefMut, str::FromStr};
use crate::{config::MineServiceMiddleware, mine::PoraPuzzle, MinerConfig, MinerMessage};
pub type MineContextMessage = Option<PoraPuzzle>; pub type MineContextMessage = Option<PoraPuzzle>;
lazy_static! { lazy_static! {
@ -29,9 +29,9 @@ lazy_static! {
} }
pub struct MineContextWatcher { pub struct MineContextWatcher {
provider: Arc<Provider<RetryClient<Http>>>, provider: Arc<MineServiceMiddleware>,
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>, flow_contract: ZgsFlow<MineServiceMiddleware>,
mine_contract: PoraMine<Provider<RetryClient<Http>>>, mine_contract: PoraMine<MineServiceMiddleware>,
mine_context_sender: broadcast::Sender<MineContextMessage>, mine_context_sender: broadcast::Sender<MineContextMessage>,
last_report: MineContextMessage, last_report: MineContextMessage,
@ -44,7 +44,7 @@ impl MineContextWatcher {
pub fn spawn( pub fn spawn(
executor: TaskExecutor, executor: TaskExecutor,
msg_recv: broadcast::Receiver<MinerMessage>, msg_recv: broadcast::Receiver<MinerMessage>,
provider: Arc<Provider<RetryClient<Http>>>, provider: Arc<MineServiceMiddleware>,
config: &MinerConfig, config: &MinerConfig,
) -> broadcast::Receiver<MineContextMessage> { ) -> broadcast::Receiver<MineContextMessage> {
let mine_contract = PoraMine::new(config.mine_address, provider.clone()); let mine_contract = PoraMine::new(config.mine_address, provider.clone());

View File

@ -41,7 +41,6 @@ if-addrs = "0.10.1"
slog = "2.7.0" slog = "2.7.0"
igd = "0.12.1" igd = "0.12.1"
duration-str = "0.5.1" duration-str = "0.5.1"
channel = { path = "../../common/channel" }
[dependencies.libp2p] [dependencies.libp2p]
version = "0.45.1" version = "0.45.1"

View File

@ -20,8 +20,6 @@ pub struct GossipCache {
topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>, topic_msgs: HashMap<GossipTopic, HashMap<Vec<u8>, Key>>,
/// Timeout for Example messages. /// Timeout for Example messages.
example: Option<Duration>, example: Option<Duration>,
/// Timeout for NewFile messages.
new_file: Option<Duration>,
/// Timeout for FindFile messages. /// Timeout for FindFile messages.
find_file: Option<Duration>, find_file: Option<Duration>,
/// Timeout for FindChunks messages. /// Timeout for FindChunks messages.
@ -39,8 +37,6 @@ pub struct GossipCacheBuilder {
default_timeout: Option<Duration>, default_timeout: Option<Duration>,
/// Timeout for Example messages. /// Timeout for Example messages.
example: Option<Duration>, example: Option<Duration>,
/// Timeout for NewFile messages.
new_file: Option<Duration>,
/// Timeout for blocks FindFile messages. /// Timeout for blocks FindFile messages.
find_file: Option<Duration>, find_file: Option<Duration>,
/// Timeout for blocks FindChunks messages. /// Timeout for blocks FindChunks messages.
@ -68,12 +64,6 @@ impl GossipCacheBuilder {
self self
} }
/// Timeout for NewFile messages.
pub fn new_file_timeout(mut self, timeout: Duration) -> Self {
self.new_file = Some(timeout);
self
}
/// Timeout for FindFile messages. /// Timeout for FindFile messages.
pub fn find_file_timeout(mut self, timeout: Duration) -> Self { pub fn find_file_timeout(mut self, timeout: Duration) -> Self {
self.find_file = Some(timeout); self.find_file = Some(timeout);
@ -108,7 +98,6 @@ impl GossipCacheBuilder {
let GossipCacheBuilder { let GossipCacheBuilder {
default_timeout, default_timeout,
example, example,
new_file,
find_file, find_file,
find_chunks, find_chunks,
announce_file, announce_file,
@ -120,7 +109,6 @@ impl GossipCacheBuilder {
expirations: DelayQueue::default(), expirations: DelayQueue::default(),
topic_msgs: HashMap::default(), topic_msgs: HashMap::default(),
example: example.or(default_timeout), example: example.or(default_timeout),
new_file: new_file.or(default_timeout),
find_file: find_file.or(default_timeout), find_file: find_file.or(default_timeout),
find_chunks: find_chunks.or(default_timeout), find_chunks: find_chunks.or(default_timeout),
announce_file: announce_file.or(default_timeout), announce_file: announce_file.or(default_timeout),
@ -141,7 +129,6 @@ impl GossipCache {
pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) { pub fn insert(&mut self, topic: GossipTopic, data: Vec<u8>) {
let expire_timeout = match topic.kind() { let expire_timeout = match topic.kind() {
GossipKind::Example => self.example, GossipKind::Example => self.example,
GossipKind::NewFile => self.new_file,
GossipKind::FindFile => self.find_file, GossipKind::FindFile => self.find_file,
GossipKind::FindChunks => self.find_chunks, GossipKind::FindChunks => self.find_chunks,
GossipKind::AnnounceFile => self.announce_file, GossipKind::AnnounceFile => self.announce_file,

View File

@ -6,7 +6,6 @@ use crate::peer_manager::{
ConnectionDirection, PeerManager, PeerManagerEvent, ConnectionDirection, PeerManager, PeerManagerEvent,
}; };
use crate::rpc::methods::DataByHashRequest; use crate::rpc::methods::DataByHashRequest;
use crate::rpc::methods::FileAnnouncement;
use crate::rpc::methods::GetChunksRequest; use crate::rpc::methods::GetChunksRequest;
use crate::rpc::*; use crate::rpc::*;
use crate::service::Context as ServiceContext; use crate::service::Context as ServiceContext;
@ -233,9 +232,6 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
let topic: Topic = GossipTopic::new(kind, GossipEncoding::default()).into(); let topic: Topic = GossipTopic::new(kind, GossipEncoding::default()).into();
topic.hash() topic.hash()
}; };
params
.topics
.insert(get_hash(GossipKind::NewFile), TopicScoreParams::default());
params params
.topics .topics
.insert(get_hash(GossipKind::FindFile), TopicScoreParams::default()); .insert(get_hash(GossipKind::FindFile), TopicScoreParams::default());
@ -267,7 +263,7 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
discovery_enabled: !config.disable_discovery, discovery_enabled: !config.disable_discovery,
metrics_enabled: config.metrics_enabled, metrics_enabled: config.metrics_enabled,
target_peer_count: config.target_peers, target_peer_count: config.target_peers,
..config.peer_manager ..Default::default()
}; };
let slot_duration = std::time::Duration::from_secs(12); let slot_duration = std::time::Duration::from_secs(12);
@ -547,9 +543,6 @@ impl<AppReqId: ReqId> Behaviour<AppReqId> {
Request::DataByHash { .. } => { Request::DataByHash { .. } => {
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_by_hash"]) metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_by_hash"])
} }
Request::AnnounceFile { .. } => {
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["announce_file"])
}
Request::GetChunks { .. } => { Request::GetChunks { .. } => {
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["get_chunks"]) metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["get_chunks"])
} }
@ -592,7 +585,7 @@ where
// peer that originally published the message. // peer that originally published the message.
match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data) { match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data) {
Err(e) => { Err(e) => {
debug!(topic = ?gs_msg.topic, %propagation_source, error = ?e, "Could not decode gossipsub message"); debug!(topic = ?gs_msg.topic, error = ?e, "Could not decode gossipsub message");
//reject the message //reject the message
if let Err(e) = self.gossipsub.report_message_validation_result( if let Err(e) = self.gossipsub.report_message_validation_result(
&id, &id,
@ -601,24 +594,6 @@ where
) { ) {
warn!(message_id = %id, peer_id = %propagation_source, error = ?e, "Failed to report message validation"); warn!(message_id = %id, peer_id = %propagation_source, error = ?e, "Failed to report message validation");
} }
self.peer_manager.report_peer(
&propagation_source,
PeerAction::Fatal,
ReportSource::Gossipsub,
None,
"gossipsub message decode error",
);
if let Some(source) = &gs_msg.source {
self.peer_manager.report_peer(
source,
PeerAction::Fatal,
ReportSource::Gossipsub,
None,
"gossipsub message decode error",
);
}
} }
Ok(msg) => { Ok(msg) => {
// Notify the network // Notify the network
@ -780,9 +755,6 @@ where
InboundRequest::DataByHash(req) => { InboundRequest::DataByHash(req) => {
self.propagate_request(peer_request_id, peer_id, Request::DataByHash(req)) self.propagate_request(peer_request_id, peer_id, Request::DataByHash(req))
} }
InboundRequest::AnnounceFile(req) => {
self.propagate_request(peer_request_id, peer_id, Request::AnnounceFile(req))
}
InboundRequest::GetChunks(req) => { InboundRequest::GetChunks(req) => {
self.propagate_request(peer_request_id, peer_id, Request::GetChunks(req)) self.propagate_request(peer_request_id, peer_id, Request::GetChunks(req))
} }
@ -997,8 +969,6 @@ pub enum Request {
Status(StatusMessage), Status(StatusMessage),
/// A data by hash request. /// A data by hash request.
DataByHash(DataByHashRequest), DataByHash(DataByHashRequest),
/// An AnnounceFile message.
AnnounceFile(FileAnnouncement),
/// A GetChunks request. /// A GetChunks request.
GetChunks(GetChunksRequest), GetChunks(GetChunksRequest),
} }
@ -1008,7 +978,6 @@ impl std::convert::From<Request> for OutboundRequest {
match req { match req {
Request::Status(s) => OutboundRequest::Status(s), Request::Status(s) => OutboundRequest::Status(s),
Request::DataByHash(r) => OutboundRequest::DataByHash(r), Request::DataByHash(r) => OutboundRequest::DataByHash(r),
Request::AnnounceFile(r) => OutboundRequest::AnnounceFile(r),
Request::GetChunks(r) => OutboundRequest::GetChunks(r), Request::GetChunks(r) => OutboundRequest::GetChunks(r),
} }
} }

View File

@ -1,5 +1,6 @@
use crate::peer_manager::peerdb::PeerDBConfig;
use crate::types::GossipKind; use crate::types::GossipKind;
use crate::{peer_manager, Enr, PeerIdSerialized}; use crate::{Enr, PeerIdSerialized};
use directory::{ use directory::{
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
}; };
@ -127,12 +128,7 @@ pub struct Config {
/// The id of the storage network. /// The id of the storage network.
pub network_id: NetworkIdentity, pub network_id: NetworkIdentity,
pub peer_db: peer_manager::peerdb::PeerDBConfig, pub peer_db: PeerDBConfig,
pub peer_manager: peer_manager::config::Config,
/// Whether to disable network identity in ENR.
/// This is for test purpose only.
pub disable_enr_network_id: bool,
} }
impl Default for Config { impl Default for Config {
@ -157,8 +153,8 @@ impl Default for Config {
let filter_rate_limiter = Some( let filter_rate_limiter = Some(
discv5::RateLimiterBuilder::new() discv5::RateLimiterBuilder::new()
.total_n_every(300, Duration::from_secs(1)) // Allow bursts, average 300 per second .total_n_every(300, Duration::from_secs(1)) // Allow bursts, average 300 per second
.ip_n_every(9, Duration::from_secs(1)) // Allow bursts, average 9 per second .ip_n_every(300, Duration::from_secs(1)) // Allow bursts, average 300 per second
.node_n_every(8, Duration::from_secs(1)) // Allow bursts, average 8 per second .node_n_every(300, Duration::from_secs(1)) // Allow bursts, average 300 per second
.build() .build()
.expect("The total rate limit has been specified"), .expect("The total rate limit has been specified"),
); );
@ -212,8 +208,6 @@ impl Default for Config {
metrics_enabled: false, metrics_enabled: false,
network_id: Default::default(), network_id: Default::default(),
peer_db: Default::default(), peer_db: Default::default(),
peer_manager: Default::default(),
disable_enr_network_id: false,
} }
} }
} }

View File

@ -1,10 +1,9 @@
//! Helper functions and an extension trait for Ethereum 2 ENRs. //! Helper functions and an extension trait for Ethereum 2 ENRs.
pub use discv5::enr::{CombinedKey, EnrBuilder}; pub use discv5::enr::{CombinedKey, EnrBuilder};
use ssz::Encode;
use super::enr_ext::{CombinedKeyExt, ENR_CONTENT_KEY_NETWORK_ID}; use super::enr_ext::CombinedKeyExt;
use super::{EnrExt, ENR_FILENAME}; use super::ENR_FILENAME;
use crate::types::Enr; use crate::types::Enr;
use crate::NetworkConfig; use crate::NetworkConfig;
use discv5::enr::EnrKey; use discv5::enr::EnrKey;
@ -33,9 +32,7 @@ pub fn use_or_load_enr(
Ok(disk_enr) => { Ok(disk_enr) => {
// if the same node id, then we may need to update our sequence number // if the same node id, then we may need to update our sequence number
if local_enr.node_id() == disk_enr.node_id() { if local_enr.node_id() == disk_enr.node_id() {
if compare_enr(local_enr, &disk_enr) if compare_enr(local_enr, &disk_enr) {
&& is_disk_enr_network_id_unchanged(&disk_enr, config)
{
debug!(file = ?enr_f, "ENR loaded from disk"); debug!(file = ?enr_f, "ENR loaded from disk");
// the stored ENR has the same configuration, use it // the stored ENR has the same configuration, use it
*local_enr = disk_enr; *local_enr = disk_enr;
@ -97,13 +94,6 @@ pub fn create_enr_builder_from_config<T: EnrKey>(
let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port); let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port);
builder.tcp(tcp_port); builder.tcp(tcp_port);
} }
// add network identity info in ENR if not disabled
if !config.disable_enr_network_id {
builder.add_value(
ENR_CONTENT_KEY_NETWORK_ID,
&config.network_id.as_ssz_bytes(),
);
}
builder builder
} }
@ -127,14 +117,6 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool {
&& (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp()) && (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp())
} }
fn is_disk_enr_network_id_unchanged(disk_enr: &Enr, config: &NetworkConfig) -> bool {
match disk_enr.network_identity() {
Some(Ok(id)) => !config.disable_enr_network_id && id == config.network_id,
Some(Err(_)) => false,
None => config.disable_enr_network_id,
}
}
/// Loads enr from the given directory /// Loads enr from the given directory
pub fn load_enr_from_disk(dir: &Path) -> Result<Enr, String> { pub fn load_enr_from_disk(dir: &Path) -> Result<Enr, String> {
let enr_f = dir.join(ENR_FILENAME); let enr_f = dir.join(ENR_FILENAME);

View File

@ -2,12 +2,8 @@
use crate::{Enr, Multiaddr, PeerId}; use crate::{Enr, Multiaddr, PeerId};
use discv5::enr::{CombinedKey, CombinedPublicKey}; use discv5::enr::{CombinedKey, CombinedPublicKey};
use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}; use libp2p::core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol};
use shared_types::NetworkIdentity;
use ssz::Decode;
use tiny_keccak::{Hasher, Keccak}; use tiny_keccak::{Hasher, Keccak};
pub(crate) const ENR_CONTENT_KEY_NETWORK_ID: &'static str = "network_identity";
/// Extend ENR for libp2p types. /// Extend ENR for libp2p types.
pub trait EnrExt { pub trait EnrExt {
/// The libp2p `PeerId` for the record. /// The libp2p `PeerId` for the record.
@ -28,9 +24,6 @@ pub trait EnrExt {
/// Returns any multiaddrs that contain the TCP protocol. /// Returns any multiaddrs that contain the TCP protocol.
fn multiaddr_tcp(&self) -> Vec<Multiaddr>; fn multiaddr_tcp(&self) -> Vec<Multiaddr>;
/// Returns network identity in content.
fn network_identity(&self) -> Option<Result<NetworkIdentity, ssz::DecodeError>>;
} }
/// Extend ENR CombinedPublicKey for libp2p types. /// Extend ENR CombinedPublicKey for libp2p types.
@ -196,12 +189,6 @@ impl EnrExt for Enr {
} }
multiaddrs multiaddrs
} }
/// Returns network identity in content.
fn network_identity(&self) -> Option<Result<NetworkIdentity, ssz::DecodeError>> {
let value = self.get(ENR_CONTENT_KEY_NETWORK_ID)?;
Some(NetworkIdentity::from_ssz_bytes(value))
}
} }
impl CombinedKeyPublicExt for CombinedPublicKey { impl CombinedKeyPublicExt for CombinedPublicKey {

View File

@ -139,7 +139,6 @@ impl Discovery {
udp = ?local_enr.udp(), udp = ?local_enr.udp(),
tcp = ?local_enr.tcp(), tcp = ?local_enr.tcp(),
udp4_socket = ?local_enr.udp_socket(), udp4_socket = ?local_enr.udp_socket(),
network_id = ?local_enr.network_identity(),
"ENR Initialised", "ENR Initialised",
); );
@ -159,7 +158,6 @@ impl Discovery {
ip = ?bootnode_enr.ip(), ip = ?bootnode_enr.ip(),
udp = ?bootnode_enr.udp(), udp = ?bootnode_enr.udp(),
tcp = ?bootnode_enr.tcp(), tcp = ?bootnode_enr.tcp(),
network_id = ?bootnode_enr.network_identity(),
"Adding node to routing table", "Adding node to routing table",
); );
let repr = bootnode_enr.to_string(); let repr = bootnode_enr.to_string();
@ -207,37 +205,13 @@ impl Discovery {
match result { match result {
Ok(enr) => { Ok(enr) => {
debug!( debug!(
multiaddr = %original_addr.to_string(),
node_id = %enr.node_id(), node_id = %enr.node_id(),
peer_id = %enr.peer_id(), peer_id = %enr.peer_id(),
ip = ?enr.ip(), ip = ?enr.ip(),
udp = ?enr.udp(), udp = ?enr.udp(),
tcp = ?enr.tcp(), tcp = ?enr.tcp(),
network_id = ?enr.network_identity(), "Adding node to routing table",
"Adding bootnode to routing table",
); );
// check network identity in bootnode ENR if required
if !config.disable_enr_network_id {
match enr.network_identity() {
Some(Ok(id)) => {
if id != config.network_id {
error!(bootnode=?id, local=?config.network_id, "Bootnode network identity mismatch");
continue;
}
}
Some(Err(err)) => {
error!(?err, "Failed to decode bootnode network identity");
continue;
}
None => {
error!("Bootnode has no network identity");
continue;
}
}
}
// add bootnode into routing table
let _ = discv5.add_enr(enr).map_err(|e| { let _ = discv5.add_enr(enr).map_err(|e| {
error!( error!(
addr = %original_addr.to_string(), addr = %original_addr.to_string(),
@ -427,16 +401,10 @@ impl Discovery {
// Generate a random target node id. // Generate a random target node id.
let random_node = NodeId::random(); let random_node = NodeId::random();
// only discover nodes with same network identity
let local_network_id = self.network_globals.network_id();
let predicate = move |enr: &Enr| -> bool {
matches!(enr.network_identity(), Some(Ok(id)) if id == local_network_id)
};
// Build the future // Build the future
let query_future = self let query_future = self
.discv5 .discv5
.find_node_predicate(random_node, Box::new(predicate), target_peers) .find_node_predicate(random_node, Box::new(|_| true), target_peers)
.map(|v| QueryResult { .map(|v| QueryResult {
query_type: query, query_type: query,
result: v, result: v,

View File

@ -93,11 +93,7 @@ pub use peer_manager::{
}; };
pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME};
/// Defines the current P2P protocol version. pub const PROTOCOL_VERSION: [u8; 3] = [0, 1, 0];
/// - v1: Broadcast FindFile & AnnounceFile messages in the whole network, which caused network too heavey.
/// - v2: Publish NewFile to neighbors only and announce file via RPC message.
pub const PROTOCOL_VERSION_V1: [u8; 3] = [0, 1, 1];
pub const PROTOCOL_VERSION_V2: [u8; 3] = [0, 2, 1];
/// Application level requests sent to the network. /// Application level requests sent to the network.
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@ -160,10 +156,3 @@ pub enum NetworkMessage {
udp_socket: Option<SocketAddr>, udp_socket: Option<SocketAddr>,
}, },
} }
pub type NetworkSender = channel::metrics::Sender<NetworkMessage>;
pub type NetworkReceiver = channel::metrics::Receiver<NetworkMessage>;
pub fn new_network_channel() -> (NetworkSender, NetworkReceiver) {
channel::metrics::unbounded_channel("network")
}

View File

@ -3,9 +3,10 @@
//! Currently supported strategies: //! Currently supported strategies:
//! - UPnP //! - UPnP
use crate::{NetworkConfig, NetworkMessage, NetworkSender}; use crate::{NetworkConfig, NetworkMessage};
use if_addrs::get_if_addrs; use if_addrs::get_if_addrs;
use std::net::{IpAddr, SocketAddr, SocketAddrV4}; use std::net::{IpAddr, SocketAddr, SocketAddrV4};
use tokio::sync::mpsc;
/// Configuration required to construct the UPnP port mappings. /// Configuration required to construct the UPnP port mappings.
pub struct UPnPConfig { pub struct UPnPConfig {
@ -35,7 +36,10 @@ impl UPnPConfig {
} }
/// Attempts to construct external port mappings with UPnP. /// Attempts to construct external port mappings with UPnP.
pub fn construct_upnp_mappings(config: UPnPConfig, network_send: NetworkSender) { pub fn construct_upnp_mappings(
config: UPnPConfig,
network_send: mpsc::UnboundedSender<NetworkMessage>,
) {
info!("UPnP Attempting to initialise routes"); info!("UPnP Attempting to initialise routes");
match igd::search_gateway(Default::default()) { match igd::search_gateway(Default::default()) {
Err(e) => info!(error = %e, "UPnP not available"), Err(e) => info!(error = %e, "UPnP not available"),

View File

@ -1,8 +1,3 @@
use std::time::Duration;
use duration_str::deserialize_duration;
use serde::{Deserialize, Serialize};
/// The time in seconds between re-status's peers. /// The time in seconds between re-status's peers.
pub const DEFAULT_STATUS_INTERVAL: u64 = 300; pub const DEFAULT_STATUS_INTERVAL: u64 = 300;
@ -16,14 +11,9 @@ pub const DEFAULT_PING_INTERVAL_INBOUND: u64 = 20;
pub const DEFAULT_TARGET_PEERS: usize = 50; pub const DEFAULT_TARGET_PEERS: usize = 50;
/// Configurations for the PeerManager. /// Configurations for the PeerManager.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)] #[derive(Debug)]
#[serde(default)]
pub struct Config { pub struct Config {
/* Peer count related configurations */ /* Peer count related configurations */
/// The heartbeat performs regular updates such as updating reputations and performing discovery
/// requests. This defines the interval in seconds.
#[serde(deserialize_with = "deserialize_duration")]
pub heartbeat_interval: Duration,
/// Whether discovery is enabled. /// Whether discovery is enabled.
pub discovery_enabled: bool, pub discovery_enabled: bool,
/// Whether metrics are enabled. /// Whether metrics are enabled.
@ -45,7 +35,6 @@ pub struct Config {
impl Default for Config { impl Default for Config {
fn default() -> Self { fn default() -> Self {
Config { Config {
heartbeat_interval: Duration::from_secs(30),
discovery_enabled: true, discovery_enabled: true,
metrics_enabled: false, metrics_enabled: false,
target_peer_count: DEFAULT_TARGET_PEERS, target_peer_count: DEFAULT_TARGET_PEERS,

View File

@ -30,6 +30,10 @@ use std::net::IpAddr;
pub mod config; pub mod config;
mod network_behaviour; mod network_behaviour;
/// The heartbeat performs regular updates such as updating reputations and performing discovery
/// requests. This defines the interval in seconds.
const HEARTBEAT_INTERVAL: u64 = 30;
/// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would /// This is used in the pruning logic. We avoid pruning peers on sync-committees if doing so would
/// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet /// lower our peer count below this number. Instead we favour a non-uniform distribution of subnet
/// peers. /// peers.
@ -101,7 +105,6 @@ impl PeerManager {
network_globals: Arc<NetworkGlobals>, network_globals: Arc<NetworkGlobals>,
) -> error::Result<Self> { ) -> error::Result<Self> {
let config::Config { let config::Config {
heartbeat_interval,
discovery_enabled, discovery_enabled,
metrics_enabled, metrics_enabled,
target_peer_count, target_peer_count,
@ -111,7 +114,7 @@ impl PeerManager {
} = cfg; } = cfg;
// Set up the peer manager heartbeat interval // Set up the peer manager heartbeat interval
let heartbeat = tokio::time::interval(heartbeat_interval); let heartbeat = tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL));
Ok(PeerManager { Ok(PeerManager {
network_globals, network_globals,
@ -457,7 +460,6 @@ impl PeerManager {
Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError,
Protocol::Status => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError,
Protocol::DataByHash => PeerAction::MidToleranceError, Protocol::DataByHash => PeerAction::MidToleranceError,
Protocol::AnnounceFile => PeerAction::MidToleranceError,
Protocol::GetChunks => PeerAction::MidToleranceError, Protocol::GetChunks => PeerAction::MidToleranceError,
}, },
}, },
@ -472,7 +474,6 @@ impl PeerManager {
Protocol::Goodbye => return, Protocol::Goodbye => return,
Protocol::Status => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError,
Protocol::DataByHash => return, Protocol::DataByHash => return,
Protocol::AnnounceFile => return,
Protocol::GetChunks => return, Protocol::GetChunks => return,
} }
} }
@ -487,7 +488,6 @@ impl PeerManager {
Protocol::Goodbye => return, Protocol::Goodbye => return,
Protocol::Status => return, Protocol::Status => return,
Protocol::DataByHash => PeerAction::MidToleranceError, Protocol::DataByHash => PeerAction::MidToleranceError,
Protocol::AnnounceFile => PeerAction::MidToleranceError,
Protocol::GetChunks => PeerAction::MidToleranceError, Protocol::GetChunks => PeerAction::MidToleranceError,
}, },
}, },

View File

@ -159,7 +159,6 @@ impl Encoder<OutboundRequest> for SSZSnappyOutboundCodec {
OutboundRequest::Goodbye(req) => req.as_ssz_bytes(), OutboundRequest::Goodbye(req) => req.as_ssz_bytes(),
OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(),
OutboundRequest::DataByHash(req) => req.hashes.as_ssz_bytes(), OutboundRequest::DataByHash(req) => req.hashes.as_ssz_bytes(),
OutboundRequest::AnnounceFile(req) => req.as_ssz_bytes(),
OutboundRequest::GetChunks(req) => req.as_ssz_bytes(), OutboundRequest::GetChunks(req) => req.as_ssz_bytes(),
}; };
// SSZ encoded bytes should be within `max_packet_size` // SSZ encoded bytes should be within `max_packet_size`
@ -347,9 +346,6 @@ fn handle_v1_request(
Protocol::DataByHash => Ok(Some(InboundRequest::DataByHash(DataByHashRequest { Protocol::DataByHash => Ok(Some(InboundRequest::DataByHash(DataByHashRequest {
hashes: VariableList::from_ssz_bytes(decoded_buffer)?, hashes: VariableList::from_ssz_bytes(decoded_buffer)?,
}))), }))),
Protocol::AnnounceFile => Ok(Some(InboundRequest::AnnounceFile(
FileAnnouncement::from_ssz_bytes(decoded_buffer)?,
))),
Protocol::GetChunks => Ok(Some(InboundRequest::GetChunks( Protocol::GetChunks => Ok(Some(InboundRequest::GetChunks(
GetChunksRequest::from_ssz_bytes(decoded_buffer)?, GetChunksRequest::from_ssz_bytes(decoded_buffer)?,
))), ))),
@ -377,10 +373,6 @@ fn handle_v1_response(
Protocol::DataByHash => Ok(Some(RPCResponse::DataByHash(Box::new( Protocol::DataByHash => Ok(Some(RPCResponse::DataByHash(Box::new(
ZgsData::from_ssz_bytes(decoded_buffer)?, ZgsData::from_ssz_bytes(decoded_buffer)?,
)))), )))),
// This case should be unreachable as `AnnounceFile` has no response.
Protocol::AnnounceFile => Err(RPCError::InvalidData(
"AnnounceFile RPC message has no valid response".to_string(),
)),
Protocol::GetChunks => Ok(Some(RPCResponse::Chunks( Protocol::GetChunks => Ok(Some(RPCResponse::Chunks(
ChunkArrayWithProof::from_ssz_bytes(decoded_buffer)?, ChunkArrayWithProof::from_ssz_bytes(decoded_buffer)?,
))), ))),

View File

@ -178,14 +178,6 @@ pub struct DataByHashRequest {
pub hashes: VariableList<Hash256, MaxRequestBlocks>, pub hashes: VariableList<Hash256, MaxRequestBlocks>,
} }
// The message of `AnnounceFile` RPC message.
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
pub struct FileAnnouncement {
pub tx_id: TxID,
pub num_shard: usize,
pub shard_id: usize,
}
/// Request a chunk array from a peer. /// Request a chunk array from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
pub struct GetChunksRequest { pub struct GetChunksRequest {

View File

@ -118,7 +118,6 @@ impl<Id: ReqId> RPC<Id> {
.n_every(Protocol::Status, 5, Duration::from_secs(15)) .n_every(Protocol::Status, 5, Duration::from_secs(15))
.one_every(Protocol::Goodbye, Duration::from_secs(10)) .one_every(Protocol::Goodbye, Duration::from_secs(10))
.n_every(Protocol::DataByHash, 128, Duration::from_secs(10)) .n_every(Protocol::DataByHash, 128, Duration::from_secs(10))
.n_every(Protocol::AnnounceFile, 256, Duration::from_secs(10))
.n_every(Protocol::GetChunks, 4096, Duration::from_secs(10)) .n_every(Protocol::GetChunks, 4096, Duration::from_secs(10))
.build() .build()
.expect("Configuration parameters are valid"); .expect("Configuration parameters are valid");

View File

@ -34,7 +34,6 @@ pub enum OutboundRequest {
Goodbye(GoodbyeReason), Goodbye(GoodbyeReason),
Ping(Ping), Ping(Ping),
DataByHash(DataByHashRequest), DataByHash(DataByHashRequest),
AnnounceFile(FileAnnouncement),
GetChunks(GetChunksRequest), GetChunks(GetChunksRequest),
} }
@ -73,11 +72,6 @@ impl OutboundRequest {
Version::V1, Version::V1,
Encoding::SSZSnappy, Encoding::SSZSnappy,
)], )],
OutboundRequest::AnnounceFile(_) => vec![ProtocolId::new(
Protocol::AnnounceFile,
Version::V1,
Encoding::SSZSnappy,
)],
OutboundRequest::GetChunks(_) => vec![ProtocolId::new( OutboundRequest::GetChunks(_) => vec![ProtocolId::new(
Protocol::GetChunks, Protocol::GetChunks,
Version::V1, Version::V1,
@ -95,7 +89,6 @@ impl OutboundRequest {
OutboundRequest::Goodbye(_) => 0, OutboundRequest::Goodbye(_) => 0,
OutboundRequest::Ping(_) => 1, OutboundRequest::Ping(_) => 1,
OutboundRequest::DataByHash(req) => req.hashes.len() as u64, OutboundRequest::DataByHash(req) => req.hashes.len() as u64,
OutboundRequest::AnnounceFile(_) => 0,
OutboundRequest::GetChunks(_) => 1, OutboundRequest::GetChunks(_) => 1,
} }
} }
@ -107,7 +100,6 @@ impl OutboundRequest {
OutboundRequest::Goodbye(_) => Protocol::Goodbye, OutboundRequest::Goodbye(_) => Protocol::Goodbye,
OutboundRequest::Ping(_) => Protocol::Ping, OutboundRequest::Ping(_) => Protocol::Ping,
OutboundRequest::DataByHash(_) => Protocol::DataByHash, OutboundRequest::DataByHash(_) => Protocol::DataByHash,
OutboundRequest::AnnounceFile(_) => Protocol::AnnounceFile,
OutboundRequest::GetChunks(_) => Protocol::GetChunks, OutboundRequest::GetChunks(_) => Protocol::GetChunks,
} }
} }
@ -122,7 +114,6 @@ impl OutboundRequest {
OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Status(_) => unreachable!(),
OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(),
OutboundRequest::Ping(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(),
OutboundRequest::AnnounceFile(_) => unreachable!(),
OutboundRequest::GetChunks(_) => unreachable!(), OutboundRequest::GetChunks(_) => unreachable!(),
} }
} }
@ -179,9 +170,6 @@ impl std::fmt::Display for OutboundRequest {
OutboundRequest::DataByHash(req) => { OutboundRequest::DataByHash(req) => {
write!(f, "Data by hash: {:?}", req) write!(f, "Data by hash: {:?}", req)
} }
OutboundRequest::AnnounceFile(req) => {
write!(f, "AnnounceFile: {:?}", req)
}
OutboundRequest::GetChunks(req) => { OutboundRequest::GetChunks(req) => {
write!(f, "GetChunks: {:?}", req) write!(f, "GetChunks: {:?}", req)
} }

View File

@ -91,8 +91,6 @@ pub enum Protocol {
/// TODO /// TODO
DataByHash, DataByHash,
/// The file announce protocol.
AnnounceFile,
/// The Chunk sync protocol. /// The Chunk sync protocol.
GetChunks, GetChunks,
} }
@ -117,7 +115,6 @@ impl std::fmt::Display for Protocol {
Protocol::Goodbye => "goodbye", Protocol::Goodbye => "goodbye",
Protocol::Ping => "ping", Protocol::Ping => "ping",
Protocol::DataByHash => "data_by_hash", Protocol::DataByHash => "data_by_hash",
Protocol::AnnounceFile => "announce_file",
Protocol::GetChunks => "get_chunks", Protocol::GetChunks => "get_chunks",
}; };
f.write_str(repr) f.write_str(repr)
@ -158,7 +155,6 @@ impl UpgradeInfo for RPCProtocol {
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::DataByHash, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::DataByHash, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::AnnounceFile, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::GetChunks, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::GetChunks, Version::V1, Encoding::SSZSnappy),
] ]
} }
@ -220,10 +216,6 @@ impl ProtocolId {
// TODO // TODO
RpcLimits::new(1, *DATA_BY_HASH_REQUEST_MAX) RpcLimits::new(1, *DATA_BY_HASH_REQUEST_MAX)
} }
Protocol::AnnounceFile => RpcLimits::new(
<FileAnnouncement as Encode>::ssz_fixed_len(),
<FileAnnouncement as Encode>::ssz_fixed_len(),
),
Protocol::GetChunks => RpcLimits::new( Protocol::GetChunks => RpcLimits::new(
<GetChunksRequest as Encode>::ssz_fixed_len(), <GetChunksRequest as Encode>::ssz_fixed_len(),
<GetChunksRequest as Encode>::ssz_fixed_len(), <GetChunksRequest as Encode>::ssz_fixed_len(),
@ -251,7 +243,6 @@ impl ProtocolId {
<ZgsData as Encode>::ssz_fixed_len(), <ZgsData as Encode>::ssz_fixed_len(),
), ),
Protocol::AnnounceFile => RpcLimits::new(0, 0), // AnnounceFile request has no response
Protocol::GetChunks => RpcLimits::new(*CHUNKS_RESPONSE_MIN, *CHUNKS_RESPONSE_MAX), Protocol::GetChunks => RpcLimits::new(*CHUNKS_RESPONSE_MIN, *CHUNKS_RESPONSE_MAX),
} }
} }
@ -334,7 +325,6 @@ pub enum InboundRequest {
Goodbye(GoodbyeReason), Goodbye(GoodbyeReason),
Ping(Ping), Ping(Ping),
DataByHash(DataByHashRequest), DataByHash(DataByHashRequest),
AnnounceFile(FileAnnouncement),
GetChunks(GetChunksRequest), GetChunks(GetChunksRequest),
} }
@ -373,11 +363,6 @@ impl InboundRequest {
Version::V1, Version::V1,
Encoding::SSZSnappy, Encoding::SSZSnappy,
)], )],
InboundRequest::AnnounceFile(_) => vec![ProtocolId::new(
Protocol::AnnounceFile,
Version::V1,
Encoding::SSZSnappy,
)],
InboundRequest::GetChunks(_) => vec![ProtocolId::new( InboundRequest::GetChunks(_) => vec![ProtocolId::new(
Protocol::GetChunks, Protocol::GetChunks,
Version::V1, Version::V1,
@ -395,7 +380,6 @@ impl InboundRequest {
InboundRequest::Goodbye(_) => 0, InboundRequest::Goodbye(_) => 0,
InboundRequest::DataByHash(req) => req.hashes.len() as u64, InboundRequest::DataByHash(req) => req.hashes.len() as u64,
InboundRequest::Ping(_) => 1, InboundRequest::Ping(_) => 1,
InboundRequest::AnnounceFile(_) => 0,
InboundRequest::GetChunks(_) => 1, InboundRequest::GetChunks(_) => 1,
} }
} }
@ -407,7 +391,6 @@ impl InboundRequest {
InboundRequest::Goodbye(_) => Protocol::Goodbye, InboundRequest::Goodbye(_) => Protocol::Goodbye,
InboundRequest::Ping(_) => Protocol::Ping, InboundRequest::Ping(_) => Protocol::Ping,
InboundRequest::DataByHash(_) => Protocol::DataByHash, InboundRequest::DataByHash(_) => Protocol::DataByHash,
InboundRequest::AnnounceFile(_) => Protocol::AnnounceFile,
InboundRequest::GetChunks(_) => Protocol::GetChunks, InboundRequest::GetChunks(_) => Protocol::GetChunks,
} }
} }
@ -422,7 +405,6 @@ impl InboundRequest {
InboundRequest::Status(_) => unreachable!(), InboundRequest::Status(_) => unreachable!(),
InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(),
InboundRequest::Ping(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(),
InboundRequest::AnnounceFile(_) => unreachable!(),
InboundRequest::GetChunks(_) => unreachable!(), InboundRequest::GetChunks(_) => unreachable!(),
} }
} }
@ -541,9 +523,6 @@ impl std::fmt::Display for InboundRequest {
InboundRequest::DataByHash(req) => { InboundRequest::DataByHash(req) => {
write!(f, "Data by hash: {:?}", req) write!(f, "Data by hash: {:?}", req)
} }
InboundRequest::AnnounceFile(req) => {
write!(f, "Announce File: {:?}", req)
}
InboundRequest::GetChunks(req) => { InboundRequest::GetChunks(req) => {
write!(f, "Get Chunks: {:?}", req) write!(f, "Get Chunks: {:?}", req)
} }

View File

@ -68,8 +68,6 @@ pub struct RPCRateLimiter {
status_rl: Limiter<PeerId>, status_rl: Limiter<PeerId>,
/// DataByHash rate limiter. /// DataByHash rate limiter.
data_by_hash_rl: Limiter<PeerId>, data_by_hash_rl: Limiter<PeerId>,
/// AnnounceFile rate limiter.
announce_file_rl: Limiter<PeerId>,
/// GetChunks rate limiter. /// GetChunks rate limiter.
get_chunks_rl: Limiter<PeerId>, get_chunks_rl: Limiter<PeerId>,
} }
@ -93,8 +91,6 @@ pub struct RPCRateLimiterBuilder {
status_quota: Option<Quota>, status_quota: Option<Quota>,
/// Quota for the DataByHash protocol. /// Quota for the DataByHash protocol.
data_by_hash_quota: Option<Quota>, data_by_hash_quota: Option<Quota>,
/// Quota for the AnnounceFile protocol.
announce_file_quota: Option<Quota>,
/// Quota for the GetChunks protocol. /// Quota for the GetChunks protocol.
get_chunks_quota: Option<Quota>, get_chunks_quota: Option<Quota>,
} }
@ -113,7 +109,6 @@ impl RPCRateLimiterBuilder {
Protocol::Status => self.status_quota = q, Protocol::Status => self.status_quota = q,
Protocol::Goodbye => self.goodbye_quota = q, Protocol::Goodbye => self.goodbye_quota = q,
Protocol::DataByHash => self.data_by_hash_quota = q, Protocol::DataByHash => self.data_by_hash_quota = q,
Protocol::AnnounceFile => self.announce_file_quota = q,
Protocol::GetChunks => self.get_chunks_quota = q, Protocol::GetChunks => self.get_chunks_quota = q,
} }
self self
@ -150,9 +145,6 @@ impl RPCRateLimiterBuilder {
let data_by_hash_quota = self let data_by_hash_quota = self
.data_by_hash_quota .data_by_hash_quota
.ok_or("DataByHash quota not specified")?; .ok_or("DataByHash quota not specified")?;
let announce_file_quota = self
.announce_file_quota
.ok_or("AnnounceFile quota not specified")?;
let get_chunks_quota = self let get_chunks_quota = self
.get_chunks_quota .get_chunks_quota
.ok_or("GetChunks quota not specified")?; .ok_or("GetChunks quota not specified")?;
@ -162,7 +154,6 @@ impl RPCRateLimiterBuilder {
let status_rl = Limiter::from_quota(status_quota)?; let status_rl = Limiter::from_quota(status_quota)?;
let goodbye_rl = Limiter::from_quota(goodbye_quota)?; let goodbye_rl = Limiter::from_quota(goodbye_quota)?;
let data_by_hash_rl = Limiter::from_quota(data_by_hash_quota)?; let data_by_hash_rl = Limiter::from_quota(data_by_hash_quota)?;
let announce_file_rl = Limiter::from_quota(announce_file_quota)?;
let get_chunks_rl = Limiter::from_quota(get_chunks_quota)?; let get_chunks_rl = Limiter::from_quota(get_chunks_quota)?;
// check for peers to prune every 30 seconds, starting in 30 seconds // check for peers to prune every 30 seconds, starting in 30 seconds
@ -175,7 +166,6 @@ impl RPCRateLimiterBuilder {
status_rl, status_rl,
goodbye_rl, goodbye_rl,
data_by_hash_rl, data_by_hash_rl,
announce_file_rl,
get_chunks_rl, get_chunks_rl,
init_time: Instant::now(), init_time: Instant::now(),
}) })
@ -220,7 +210,6 @@ impl RPCRateLimiter {
Protocol::Status => &mut self.status_rl, Protocol::Status => &mut self.status_rl,
Protocol::Goodbye => &mut self.goodbye_rl, Protocol::Goodbye => &mut self.goodbye_rl,
Protocol::DataByHash => &mut self.data_by_hash_rl, Protocol::DataByHash => &mut self.data_by_hash_rl,
Protocol::AnnounceFile => &mut self.announce_file_rl,
Protocol::GetChunks => &mut self.get_chunks_rl, Protocol::GetChunks => &mut self.get_chunks_rl,
}; };
check(limiter) check(limiter)

View File

@ -4,7 +4,7 @@ use crate::discovery::enr;
use crate::multiaddr::Protocol; use crate::multiaddr::Protocol;
use crate::rpc::{GoodbyeReason, RPCResponseErrorCode, ReqId}; use crate::rpc::{GoodbyeReason, RPCResponseErrorCode, ReqId};
use crate::types::{error, GossipKind}; use crate::types::{error, GossipKind};
use crate::{EnrExt, NetworkSender}; use crate::{EnrExt, NetworkMessage};
use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource};
use futures::prelude::*; use futures::prelude::*;
use libp2p::core::{ use libp2p::core::{
@ -21,6 +21,7 @@ use std::io::prelude::*;
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS};
@ -59,7 +60,7 @@ pub struct Context<'a> {
impl<AppReqId: ReqId> Service<AppReqId> { impl<AppReqId: ReqId> Service<AppReqId> {
pub async fn new( pub async fn new(
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
network_sender: NetworkSender, network_sender: UnboundedSender<NetworkMessage>,
ctx: Context<'_>, ctx: Context<'_>,
) -> error::Result<(Arc<NetworkGlobals>, Keypair, Self)> { ) -> error::Result<(Arc<NetworkGlobals>, Keypair, Self)> {
trace!("Libp2p Service starting"); trace!("Libp2p Service starting");

View File

@ -7,7 +7,7 @@ pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
pub use globals::NetworkGlobals; pub use globals::NetworkGlobals;
pub use pubsub::{ pub use pubsub::{
AnnounceChunks, AnnounceFile, AnnounceShardConfig, FindChunks, FindFile, HasSignature, NewFile, AnnounceChunks, AnnounceFile, AnnounceShardConfig, FindChunks, FindFile, HasSignature,
PubsubMessage, SignedAnnounceChunks, SignedAnnounceFile, SignedAnnounceShardConfig, PubsubMessage, SignedAnnounceChunks, SignedAnnounceFile, SignedAnnounceShardConfig,
SignedMessage, SnappyTransform, SignedMessage, SnappyTransform,
}; };

View File

@ -114,22 +114,9 @@ impl ssz::Decode for WrappedPeerId {
} }
} }
/// Published when file uploaded or completed to sync from other peers.
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
pub struct NewFile {
pub tx_id: TxID,
pub num_shard: usize,
pub shard_id: usize,
pub timestamp: u32,
}
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
pub struct FindFile { pub struct FindFile {
pub tx_id: TxID, pub tx_id: TxID,
pub num_shard: usize,
pub shard_id: usize,
/// Indicates whether publish to neighboar nodes only.
pub neighbors_only: bool,
pub timestamp: u32, pub timestamp: u32,
} }
@ -218,7 +205,6 @@ type SignedAnnounceFiles = Vec<SignedAnnounceFile>;
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum PubsubMessage { pub enum PubsubMessage {
ExampleMessage(u64), ExampleMessage(u64),
NewFile(NewFile),
FindFile(FindFile), FindFile(FindFile),
FindChunks(FindChunks), FindChunks(FindChunks),
AnnounceFile(Vec<SignedAnnounceFile>), AnnounceFile(Vec<SignedAnnounceFile>),
@ -297,7 +283,6 @@ impl PubsubMessage {
pub fn kind(&self) -> GossipKind { pub fn kind(&self) -> GossipKind {
match self { match self {
PubsubMessage::ExampleMessage(_) => GossipKind::Example, PubsubMessage::ExampleMessage(_) => GossipKind::Example,
PubsubMessage::NewFile(_) => GossipKind::NewFile,
PubsubMessage::FindFile(_) => GossipKind::FindFile, PubsubMessage::FindFile(_) => GossipKind::FindFile,
PubsubMessage::FindChunks(_) => GossipKind::FindChunks, PubsubMessage::FindChunks(_) => GossipKind::FindChunks,
PubsubMessage::AnnounceFile(_) => GossipKind::AnnounceFile, PubsubMessage::AnnounceFile(_) => GossipKind::AnnounceFile,
@ -324,9 +309,6 @@ impl PubsubMessage {
GossipKind::Example => Ok(PubsubMessage::ExampleMessage( GossipKind::Example => Ok(PubsubMessage::ExampleMessage(
u64::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?, u64::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
)), )),
GossipKind::NewFile => Ok(PubsubMessage::NewFile(
NewFile::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
)),
GossipKind::FindFile => Ok(PubsubMessage::FindFile( GossipKind::FindFile => Ok(PubsubMessage::FindFile(
FindFile::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?, FindFile::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
)), )),
@ -359,7 +341,6 @@ impl PubsubMessage {
// messages for us. // messages for us.
match &self { match &self {
PubsubMessage::ExampleMessage(data) => data.as_ssz_bytes(), PubsubMessage::ExampleMessage(data) => data.as_ssz_bytes(),
PubsubMessage::NewFile(data) => data.as_ssz_bytes(),
PubsubMessage::FindFile(data) => data.as_ssz_bytes(), PubsubMessage::FindFile(data) => data.as_ssz_bytes(),
PubsubMessage::FindChunks(data) => data.as_ssz_bytes(), PubsubMessage::FindChunks(data) => data.as_ssz_bytes(),
PubsubMessage::AnnounceFile(data) => data.as_ssz_bytes(), PubsubMessage::AnnounceFile(data) => data.as_ssz_bytes(),
@ -375,9 +356,6 @@ impl std::fmt::Display for PubsubMessage {
PubsubMessage::ExampleMessage(msg) => { PubsubMessage::ExampleMessage(msg) => {
write!(f, "Example message: {}", msg) write!(f, "Example message: {}", msg)
} }
PubsubMessage::NewFile(msg) => {
write!(f, "NewFile message: {:?}", msg)
}
PubsubMessage::FindFile(msg) => { PubsubMessage::FindFile(msg) => {
write!(f, "FindFile message: {:?}", msg) write!(f, "FindFile message: {:?}", msg)
} }

View File

@ -8,15 +8,13 @@ use strum::AsRefStr;
pub const TOPIC_PREFIX: &str = "eth2"; pub const TOPIC_PREFIX: &str = "eth2";
pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
pub const EXAMPLE_TOPIC: &str = "example"; pub const EXAMPLE_TOPIC: &str = "example";
pub const NEW_FILE_TOPIC: &str = "new_file";
pub const FIND_FILE_TOPIC: &str = "find_file"; pub const FIND_FILE_TOPIC: &str = "find_file";
pub const FIND_CHUNKS_TOPIC: &str = "find_chunks"; pub const FIND_CHUNKS_TOPIC: &str = "find_chunks";
pub const ANNOUNCE_FILE_TOPIC: &str = "announce_file"; pub const ANNOUNCE_FILE_TOPIC: &str = "announce_file";
pub const ANNOUNCE_CHUNKS_TOPIC: &str = "announce_chunks"; pub const ANNOUNCE_CHUNKS_TOPIC: &str = "announce_chunks";
pub const ANNOUNCE_SHARD_CONFIG_TOPIC: &str = "announce_shard_config"; pub const ANNOUNCE_SHARD_CONFIG_TOPIC: &str = "announce_shard_config";
pub const CORE_TOPICS: [GossipKind; 5] = [ pub const CORE_TOPICS: [GossipKind; 4] = [
GossipKind::NewFile,
GossipKind::FindFile, GossipKind::FindFile,
GossipKind::FindChunks, GossipKind::FindChunks,
GossipKind::AnnounceFile, GossipKind::AnnounceFile,
@ -39,7 +37,6 @@ pub struct GossipTopic {
#[strum(serialize_all = "snake_case")] #[strum(serialize_all = "snake_case")]
pub enum GossipKind { pub enum GossipKind {
Example, Example,
NewFile,
FindFile, FindFile,
FindChunks, FindChunks,
AnnounceFile, AnnounceFile,
@ -80,7 +77,6 @@ impl GossipTopic {
let kind = match topic_parts[2] { let kind = match topic_parts[2] {
EXAMPLE_TOPIC => GossipKind::Example, EXAMPLE_TOPIC => GossipKind::Example,
NEW_FILE_TOPIC => GossipKind::NewFile,
FIND_FILE_TOPIC => GossipKind::FindFile, FIND_FILE_TOPIC => GossipKind::FindFile,
FIND_CHUNKS_TOPIC => GossipKind::FindChunks, FIND_CHUNKS_TOPIC => GossipKind::FindChunks,
ANNOUNCE_FILE_TOPIC => GossipKind::AnnounceFile, ANNOUNCE_FILE_TOPIC => GossipKind::AnnounceFile,
@ -110,7 +106,6 @@ impl From<GossipTopic> for String {
let kind = match topic.kind { let kind = match topic.kind {
GossipKind::Example => EXAMPLE_TOPIC, GossipKind::Example => EXAMPLE_TOPIC,
GossipKind::NewFile => NEW_FILE_TOPIC,
GossipKind::FindFile => FIND_FILE_TOPIC, GossipKind::FindFile => FIND_FILE_TOPIC,
GossipKind::FindChunks => FIND_CHUNKS_TOPIC, GossipKind::FindChunks => FIND_CHUNKS_TOPIC,
GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC, GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC,
@ -130,7 +125,6 @@ impl std::fmt::Display for GossipTopic {
let kind = match self.kind { let kind = match self.kind {
GossipKind::Example => EXAMPLE_TOPIC, GossipKind::Example => EXAMPLE_TOPIC,
GossipKind::NewFile => NEW_FILE_TOPIC,
GossipKind::FindFile => FIND_FILE_TOPIC, GossipKind::FindFile => FIND_FILE_TOPIC,
GossipKind::FindChunks => FIND_CHUNKS_TOPIC, GossipKind::FindChunks => FIND_CHUNKS_TOPIC,
GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC, GossipKind::AnnounceFile => ANNOUNCE_FILE_TOPIC,

View File

@ -1,7 +1,6 @@
#![cfg(test)] #![cfg(test)]
use libp2p::gossipsub::GossipsubConfigBuilder; use libp2p::gossipsub::GossipsubConfigBuilder;
use network::new_network_channel;
use network::Enr; use network::Enr;
use network::EnrExt; use network::EnrExt;
use network::Multiaddr; use network::Multiaddr;
@ -23,6 +22,7 @@ pub mod swarm;
type ReqId = usize; type ReqId = usize;
use tempfile::Builder as TempBuilder; use tempfile::Builder as TempBuilder;
use tokio::sync::mpsc::unbounded_channel;
#[allow(unused)] #[allow(unused)]
pub struct Libp2pInstance(LibP2PService<ReqId>, exit_future::Signal); pub struct Libp2pInstance(LibP2PService<ReqId>, exit_future::Signal);
@ -72,7 +72,7 @@ pub async fn build_libp2p_instance(rt: Weak<Runtime>, boot_nodes: Vec<Enr>) -> L
let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let executor = task_executor::TaskExecutor::new(rt, exit, shutdown_tx); let executor = task_executor::TaskExecutor::new(rt, exit, shutdown_tx);
let libp2p_context = network::Context { config: &config }; let libp2p_context = network::Context { config: &config };
let (sender, _) = new_network_channel(); let (sender, _) = unbounded_channel();
Libp2pInstance( Libp2pInstance(
LibP2PService::new(executor, sender, libp2p_context) LibP2PService::new(executor, sender, libp2p_context)
.await .await

View File

@ -11,7 +11,7 @@ use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use storage::config::{ShardConfig, SHARD_CONFIG_KEY}; use storage::config::{ShardConfig, SHARD_CONFIG_KEY};
use storage::log_store::log_manager::{DATA_DB_KEY, PORA_CHUNK_SIZE}; use storage::log_store::log_manager::PORA_CHUNK_SIZE;
use storage_async::Store; use storage_async::Store;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tokio::sync::{broadcast, mpsc}; use tokio::sync::{broadcast, mpsc};
@ -223,8 +223,7 @@ impl Pruner {
} }
async fn prune_tx(&mut self, start_sector: u64, end_sector: u64) -> Result<()> { async fn prune_tx(&mut self, start_sector: u64, end_sector: u64) -> Result<()> {
loop { while let Some(tx) = self.store.get_tx_by_seq_number(self.first_tx_seq).await? {
if let Some(tx) = self.store.get_tx_by_seq_number(self.first_tx_seq).await? {
// If a part of the tx data is pruned, we mark the tx as pruned. // If a part of the tx data is pruned, we mark the tx as pruned.
if tx.start_entry_index() >= start_sector && tx.start_entry_index() < end_sector { if tx.start_entry_index() >= start_sector && tx.start_entry_index() < end_sector {
self.store.prune_tx(tx.seq).await?; self.store.prune_tx(tx.seq).await?;
@ -239,10 +238,6 @@ impl Pruner {
); );
} }
self.first_tx_seq += 1; self.first_tx_seq += 1;
} else {
// Wait for `first_tx_seq` to be processed.
tokio::time::sleep(Duration::from_secs(60)).await;
}
} }
Ok(()) Ok(())
} }
@ -257,7 +252,7 @@ impl Pruner {
.update_shard_config(self.config.shard_config) .update_shard_config(self.config.shard_config)
.await; .await;
self.store self.store
.set_config_encoded(&SHARD_CONFIG_KEY, &self.config.shard_config, DATA_DB_KEY) .set_config_encoded(&SHARD_CONFIG_KEY, &self.config.shard_config)
.await .await
} }
@ -270,22 +265,17 @@ impl Pruner {
.set_config_encoded( .set_config_encoded(
&FIRST_REWARDABLE_CHUNK_KEY, &FIRST_REWARDABLE_CHUNK_KEY,
&(new_first_rewardable_chunk, new_first_tx_seq), &(new_first_rewardable_chunk, new_first_tx_seq),
DATA_DB_KEY,
) )
.await .await
} }
} }
async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> { async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> {
store store.get_config_decoded(&SHARD_CONFIG_KEY).await
.get_config_decoded(&SHARD_CONFIG_KEY, DATA_DB_KEY)
.await
} }
async fn get_first_rewardable_chunk(store: &Store) -> Result<Option<(u64, u64)>> { async fn get_first_rewardable_chunk(store: &Store) -> Result<Option<(u64, u64)>> {
store store.get_config_decoded(&FIRST_REWARDABLE_CHUNK_KEY).await
.get_config_decoded(&FIRST_REWARDABLE_CHUNK_KEY, DATA_DB_KEY)
.await
} }
#[derive(Debug)] #[derive(Debug)]

View File

@ -10,7 +10,7 @@ mod service;
use duration_str::deserialize_duration; use duration_str::deserialize_duration;
use network::Multiaddr; use network::Multiaddr;
use serde::Deserialize; use serde::Deserialize;
use std::{net::IpAddr, time::Duration}; use std::time::Duration;
pub use crate::service::RouterService; pub use crate::service::RouterService;
@ -26,7 +26,6 @@ pub struct Config {
pub libp2p_nodes: Vec<Multiaddr>, pub libp2p_nodes: Vec<Multiaddr>,
pub private_ip_enabled: bool, pub private_ip_enabled: bool,
pub check_announced_ip: bool, pub check_announced_ip: bool,
pub public_address: Option<IpAddr>,
// batcher // batcher
/// Timeout to publish messages in batch /// Timeout to publish messages in batch
@ -48,7 +47,6 @@ impl Default for Config {
libp2p_nodes: vec![], libp2p_nodes: vec![],
private_ip_enabled: false, private_ip_enabled: false,
check_announced_ip: false, check_announced_ip: false,
public_address: None,
batcher_timeout: Duration::from_secs(1), batcher_timeout: Duration::from_secs(1),
batcher_file_capacity: 1, batcher_file_capacity: 1,

View File

@ -5,8 +5,7 @@ use std::{ops::Neg, sync::Arc};
use chunk_pool::ChunkPoolMessage; use chunk_pool::ChunkPoolMessage;
use file_location_cache::FileLocationCache; use file_location_cache::FileLocationCache;
use network::multiaddr::Protocol; use network::multiaddr::Protocol;
use network::rpc::methods::FileAnnouncement; use network::types::{AnnounceShardConfig, SignedAnnounceShardConfig};
use network::types::{AnnounceShardConfig, NewFile, SignedAnnounceShardConfig};
use network::{ use network::{
rpc::StatusMessage, rpc::StatusMessage,
types::{ types::{
@ -16,7 +15,7 @@ use network::{
Keypair, MessageAcceptance, MessageId, NetworkGlobals, NetworkMessage, PeerId, PeerRequestId, Keypair, MessageAcceptance, MessageId, NetworkGlobals, NetworkMessage, PeerId, PeerRequestId,
PublicKey, PubsubMessage, Request, RequestId, Response, PublicKey, PubsubMessage, Request, RequestId, Response,
}; };
use network::{Multiaddr, NetworkSender, PeerAction, ReportSource}; use network::{Multiaddr, PeerAction, ReportSource};
use shared_types::{bytes_to_chunks, timestamp_now, NetworkIdentity, TxID}; use shared_types::{bytes_to_chunks, timestamp_now, NetworkIdentity, TxID};
use storage::config::ShardConfig; use storage::config::ShardConfig;
use storage_async::Store; use storage_async::Store;
@ -30,11 +29,6 @@ use crate::peer_manager::PeerManager;
use crate::Config; use crate::Config;
lazy_static::lazy_static! { lazy_static::lazy_static! {
/// Timeout to publish NewFile message to neighbor nodes.
pub static ref NEW_FILE_TIMEOUT: chrono::Duration = chrono::Duration::seconds(30);
/// Timeout to publish FindFile message to neighbor nodes.
pub static ref FIND_FILE_NEIGHBORS_TIMEOUT: chrono::Duration = chrono::Duration::seconds(30);
/// Timeout to publish FindFile message in the whole network.
pub static ref FIND_FILE_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5); pub static ref FIND_FILE_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5);
pub static ref ANNOUNCE_FILE_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5); pub static ref ANNOUNCE_FILE_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5);
pub static ref ANNOUNCE_SHARD_CONFIG_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5); pub static ref ANNOUNCE_SHARD_CONFIG_TIMEOUT: chrono::Duration = chrono::Duration::minutes(5);
@ -88,7 +82,7 @@ pub struct Libp2pEventHandler {
/// A collection of global variables, accessible outside of the network service. /// A collection of global variables, accessible outside of the network service.
network_globals: Arc<NetworkGlobals>, network_globals: Arc<NetworkGlobals>,
/// A channel to the router service. /// A channel to the router service.
network_send: NetworkSender, network_send: mpsc::UnboundedSender<NetworkMessage>,
/// A channel to the syncing service. /// A channel to the syncing service.
sync_send: SyncSender, sync_send: SyncSender,
/// A channel to the RPC chunk pool service. /// A channel to the RPC chunk pool service.
@ -112,7 +106,7 @@ impl Libp2pEventHandler {
pub fn new( pub fn new(
config: Config, config: Config,
network_globals: Arc<NetworkGlobals>, network_globals: Arc<NetworkGlobals>,
network_send: NetworkSender, network_send: mpsc::UnboundedSender<NetworkMessage>,
sync_send: SyncSender, sync_send: SyncSender,
chunk_pool_send: UnboundedSender<ChunkPoolMessage>, chunk_pool_send: UnboundedSender<ChunkPoolMessage>,
local_keypair: Keypair, local_keypair: Keypair,
@ -225,25 +219,6 @@ impl Libp2pEventHandler {
}); });
metrics::LIBP2P_HANDLE_GET_CHUNKS_REQUEST.mark(1); metrics::LIBP2P_HANDLE_GET_CHUNKS_REQUEST.mark(1);
} }
Request::AnnounceFile(announcement) => {
match ShardConfig::new(announcement.shard_id, announcement.num_shard) {
Ok(v) => {
self.file_location_cache.insert_peer_config(peer_id, v);
self.send_to_sync(SyncMessage::AnnounceFile {
peer_id,
request_id,
announcement,
});
}
Err(_) => self.send_to_network(NetworkMessage::ReportPeer {
peer_id,
action: PeerAction::Fatal,
source: ReportSource::RPC,
msg: "Invalid shard config in AnnounceFile RPC message",
}),
}
}
Request::DataByHash(_) => { Request::DataByHash(_) => {
// ignore // ignore
} }
@ -341,13 +316,9 @@ impl Libp2pEventHandler {
match message { match message {
PubsubMessage::ExampleMessage(_) => MessageAcceptance::Ignore, PubsubMessage::ExampleMessage(_) => MessageAcceptance::Ignore,
PubsubMessage::NewFile(msg) => {
metrics::LIBP2P_HANDLE_PUBSUB_NEW_FILE.mark(1);
self.on_new_file(propagation_source, msg).await
}
PubsubMessage::FindFile(msg) => { PubsubMessage::FindFile(msg) => {
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE.mark(1); metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE.mark(1);
self.on_find_file(propagation_source, msg).await self.on_find_file(msg).await
} }
PubsubMessage::FindChunks(msg) => { PubsubMessage::FindChunks(msg) => {
metrics::LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS.mark(1); metrics::LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS.mark(1);
@ -377,83 +348,17 @@ impl Libp2pEventHandler {
} }
} }
/// Handle NewFile pubsub message `msg` that published by `from` peer. async fn get_listen_addr_or_add(&self) -> Option<Multiaddr> {
async fn on_new_file(&self, from: PeerId, msg: NewFile) -> MessageAcceptance {
// verify timestamp
let d = duration_since(
msg.timestamp,
metrics::LIBP2P_HANDLE_PUBSUB_NEW_FILE_LATENCY.clone(),
);
if d < TOLERABLE_DRIFT.neg() || d > *NEW_FILE_TIMEOUT {
debug!(?d, ?msg, "Invalid timestamp, ignoring NewFile message");
metrics::LIBP2P_HANDLE_PUBSUB_NEW_FILE_TIMEOUT.mark(1);
self.send_to_network(NetworkMessage::ReportPeer {
peer_id: from,
action: PeerAction::LowToleranceError,
source: ReportSource::Gossipsub,
msg: "Received out of date NewFile message",
});
return MessageAcceptance::Ignore;
}
// verify announced shard config
let announced_shard_config = match ShardConfig::new(msg.shard_id, msg.num_shard) {
Ok(v) => v,
Err(_) => return MessageAcceptance::Reject,
};
// ignore if shard config mismatch
let my_shard_config = self.store.get_store().get_shard_config();
if !my_shard_config.intersect(&announced_shard_config) {
return MessageAcceptance::Ignore;
}
// ignore if already exists
match self.store.check_tx_completed(msg.tx_id.seq).await {
Ok(true) => return MessageAcceptance::Ignore,
Ok(false) => {}
Err(err) => {
warn!(?err, tx_seq = %msg.tx_id.seq, "Failed to check tx completed");
return MessageAcceptance::Ignore;
}
}
// ignore if already pruned
match self.store.check_tx_pruned(msg.tx_id.seq).await {
Ok(true) => return MessageAcceptance::Ignore,
Ok(false) => {}
Err(err) => {
warn!(?err, tx_seq = %msg.tx_id.seq, "Failed to check tx pruned");
return MessageAcceptance::Ignore;
}
}
// notify sync layer to handle in advance
self.send_to_sync(SyncMessage::NewFile { from, msg });
MessageAcceptance::Ignore
}
async fn construct_announced_ip(&self) -> Option<Multiaddr> {
// public address configured
if let Some(ip) = self.config.public_address {
let mut addr = Multiaddr::empty();
addr.push(ip.into());
addr.push(Protocol::Tcp(self.network_globals.listen_port_tcp()));
return Some(addr);
}
// public listen address
if let Some(addr) = self.get_listen_addr() { if let Some(addr) = self.get_listen_addr() {
return Some(addr); return Some(addr);
} }
// auto detect public IP address
let ipv4_addr = public_ip::addr_v4().await?; let ipv4_addr = public_ip::addr_v4().await?;
let mut addr = Multiaddr::empty(); let mut addr = Multiaddr::empty();
addr.push(Protocol::Ip4(ipv4_addr)); addr.push(Protocol::Ip4(ipv4_addr));
addr.push(Protocol::Tcp(self.network_globals.listen_port_tcp())); addr.push(Protocol::Tcp(self.network_globals.listen_port_tcp()));
addr.push(Protocol::P2p(self.network_globals.local_peer_id().into()));
self.network_globals self.network_globals
.listen_multiaddrs .listen_multiaddrs
@ -515,7 +420,7 @@ impl Libp2pEventHandler {
let peer_id = *self.network_globals.peer_id.read(); let peer_id = *self.network_globals.peer_id.read();
let addr = self.construct_announced_ip().await?; let addr = self.get_listen_addr_or_add().await?;
let timestamp = timestamp_now(); let timestamp = timestamp_now();
let shard_config = self.store.get_store().get_shard_config(); let shard_config = self.store.get_store().get_shard_config();
@ -547,7 +452,7 @@ impl Libp2pEventHandler {
shard_config: ShardConfig, shard_config: ShardConfig,
) -> Option<PubsubMessage> { ) -> Option<PubsubMessage> {
let peer_id = *self.network_globals.peer_id.read(); let peer_id = *self.network_globals.peer_id.read();
let addr = self.construct_announced_ip().await?; let addr = self.get_listen_addr_or_add().await?;
let timestamp = timestamp_now(); let timestamp = timestamp_now();
let msg = AnnounceShardConfig { let msg = AnnounceShardConfig {
@ -571,69 +476,27 @@ impl Libp2pEventHandler {
Some(PubsubMessage::AnnounceShardConfig(signed)) Some(PubsubMessage::AnnounceShardConfig(signed))
} }
async fn on_find_file(&self, from: PeerId, msg: FindFile) -> MessageAcceptance { async fn on_find_file(&self, msg: FindFile) -> MessageAcceptance {
let FindFile { let FindFile { tx_id, timestamp } = msg;
tx_id, timestamp, ..
} = msg;
// verify timestamp // verify timestamp
let d = duration_since( let d = duration_since(
timestamp, timestamp,
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY.clone(), metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY.clone(),
); );
let timeout = if msg.neighbors_only { if d < TOLERABLE_DRIFT.neg() || d > *FIND_FILE_TIMEOUT {
*FIND_FILE_NEIGHBORS_TIMEOUT
} else {
*FIND_FILE_TIMEOUT
};
if d < TOLERABLE_DRIFT.neg() || d > timeout {
debug!(%timestamp, ?d, "Invalid timestamp, ignoring FindFile message"); debug!(%timestamp, ?d, "Invalid timestamp, ignoring FindFile message");
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_TIMEOUT.mark(1); metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_TIMEOUT.mark(1);
if msg.neighbors_only {
self.send_to_network(NetworkMessage::ReportPeer {
peer_id: from,
action: PeerAction::LowToleranceError,
source: ReportSource::Gossipsub,
msg: "Received out of date FindFile message",
});
}
return MessageAcceptance::Ignore; return MessageAcceptance::Ignore;
} }
// verify announced shard config
let announced_shard_config = match ShardConfig::new(msg.shard_id, msg.num_shard) {
Ok(v) => v,
Err(_) => return MessageAcceptance::Reject,
};
// handle on shard config mismatch
let my_shard_config = self.store.get_store().get_shard_config();
if !my_shard_config.intersect(&announced_shard_config) {
return if msg.neighbors_only {
MessageAcceptance::Ignore
} else {
MessageAcceptance::Accept
};
}
// check if we have it // check if we have it
if matches!(self.store.check_tx_completed(tx_id.seq).await, Ok(true)) { if matches!(self.store.check_tx_completed(tx_id.seq).await, Ok(true)) {
if let Ok(Some(tx)) = self.store.get_tx_by_seq_number(tx_id.seq).await { if let Ok(Some(tx)) = self.store.get_tx_by_seq_number(tx_id.seq).await {
if tx.id() == tx_id { if tx.id() == tx_id {
trace!(?tx_id, "Found file locally, responding to FindFile query"); trace!(?tx_id, "Found file locally, responding to FindFile query");
if msg.neighbors_only { if self.publish_file(tx_id).await.is_some() {
// announce file via RPC to avoid flooding pubsub message
self.send_to_network(NetworkMessage::SendRequest {
peer_id: from,
request: Request::AnnounceFile(FileAnnouncement {
tx_id,
num_shard: my_shard_config.num_shard,
shard_id: my_shard_config.shard_id,
}),
request_id: RequestId::Router(Instant::now()),
});
} else if self.publish_file(tx_id).await.is_some() {
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE.mark(1); metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE.mark(1);
return MessageAcceptance::Ignore; return MessageAcceptance::Ignore;
} }
@ -641,11 +504,6 @@ impl Libp2pEventHandler {
} }
} }
// do not forward to whole network if only find file from neighbor nodes
if msg.neighbors_only {
return MessageAcceptance::Ignore;
}
// try from cache // try from cache
if let Some(mut msg) = self.file_location_cache.get_one(tx_id) { if let Some(mut msg) = self.file_location_cache.get_one(tx_id) {
trace!(?tx_id, "Found file in cache, responding to FindFile query"); trace!(?tx_id, "Found file in cache, responding to FindFile query");
@ -670,7 +528,7 @@ impl Libp2pEventHandler {
index_end: u64, index_end: u64,
) -> Option<PubsubMessage> { ) -> Option<PubsubMessage> {
let peer_id = *self.network_globals.peer_id.read(); let peer_id = *self.network_globals.peer_id.read();
let addr = self.construct_announced_ip().await?; let addr = self.get_listen_addr_or_add().await?;
let timestamp = timestamp_now(); let timestamp = timestamp_now();
let msg = AnnounceChunks { let msg = AnnounceChunks {
@ -967,7 +825,7 @@ impl Libp2pEventHandler {
} }
} }
async fn publish_file(&self, tx_id: TxID) -> Option<bool> { pub async fn publish_file(&self, tx_id: TxID) -> Option<bool> {
match self.file_batcher.write().await.add(tx_id) { match self.file_batcher.write().await.add(tx_id) {
Some(batch) => { Some(batch) => {
let announcement = self.construct_announce_file_message(batch).await?; let announcement = self.construct_announce_file_message(batch).await?;
@ -1010,12 +868,10 @@ mod tests {
use network::{ use network::{
discovery::{CombinedKey, ConnectionId}, discovery::{CombinedKey, ConnectionId},
discv5::enr::EnrBuilder, discv5::enr::EnrBuilder,
new_network_channel,
rpc::{GetChunksRequest, StatusMessage, SubstreamId}, rpc::{GetChunksRequest, StatusMessage, SubstreamId},
types::FindFile, types::FindFile,
CombinedKeyExt, Keypair, MessageAcceptance, MessageId, Multiaddr, NetworkGlobals, CombinedKeyExt, Keypair, MessageAcceptance, MessageId, Multiaddr, NetworkGlobals,
NetworkMessage, NetworkReceiver, PeerId, PubsubMessage, Request, RequestId, Response, NetworkMessage, PeerId, PubsubMessage, Request, RequestId, Response, SyncId,
SyncId,
}; };
use shared_types::{timestamp_now, ChunkArray, ChunkArrayWithProof, FlowRangeProof, TxID}; use shared_types::{timestamp_now, ChunkArray, ChunkArrayWithProof, FlowRangeProof, TxID};
use storage::{ use storage::{
@ -1037,8 +893,8 @@ mod tests {
runtime: TestRuntime, runtime: TestRuntime,
network_globals: Arc<NetworkGlobals>, network_globals: Arc<NetworkGlobals>,
keypair: Keypair, keypair: Keypair,
network_send: NetworkSender, network_send: mpsc::UnboundedSender<NetworkMessage>,
network_recv: NetworkReceiver, network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
sync_send: SyncSender, sync_send: SyncSender,
sync_recv: SyncReceiver, sync_recv: SyncReceiver,
chunk_pool_send: mpsc::UnboundedSender<ChunkPoolMessage>, chunk_pool_send: mpsc::UnboundedSender<ChunkPoolMessage>,
@ -1052,11 +908,12 @@ mod tests {
fn default() -> Self { fn default() -> Self {
let runtime = TestRuntime::default(); let runtime = TestRuntime::default();
let (network_globals, keypair) = Context::new_network_globals(); let (network_globals, keypair) = Context::new_network_globals();
let (network_send, network_recv) = new_network_channel(); let (network_send, network_recv) = mpsc::unbounded_channel();
let (sync_send, sync_recv) = channel::Channel::unbounded("test"); let (sync_send, sync_recv) = channel::Channel::unbounded("test");
let (chunk_pool_send, _chunk_pool_recv) = mpsc::unbounded_channel(); let (chunk_pool_send, _chunk_pool_recv) = mpsc::unbounded_channel();
let store = LogManager::memorydb(LogConfig::default()).unwrap(); let executor = runtime.task_executor.clone();
let store = LogManager::memorydb(LogConfig::default(), executor).unwrap();
Self { Self {
runtime, runtime,
network_globals: Arc::new(network_globals), network_globals: Arc::new(network_globals),
@ -1337,13 +1194,7 @@ mod tests {
) -> MessageAcceptance { ) -> MessageAcceptance {
let (alice, bob) = (PeerId::random(), PeerId::random()); let (alice, bob) = (PeerId::random(), PeerId::random());
let id = MessageId::new(b"dummy message"); let id = MessageId::new(b"dummy message");
let message = PubsubMessage::FindFile(FindFile { let message = PubsubMessage::FindFile(FindFile { tx_id, timestamp });
tx_id,
num_shard: 1,
shard_id: 0,
neighbors_only: false,
timestamp,
});
handler.on_pubsub_message(alice, bob, &id, message).await handler.on_pubsub_message(alice, bob, &id, message).await
} }

View File

@ -44,11 +44,6 @@ lazy_static::lazy_static! {
pub static ref LIBP2P_HANDLE_RESPONSE_ERROR: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_response_error", "qps"); pub static ref LIBP2P_HANDLE_RESPONSE_ERROR: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_response_error", "qps");
pub static ref LIBP2P_HANDLE_RESPONSE_ERROR_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_response_error", "latency", 1024); pub static ref LIBP2P_HANDLE_RESPONSE_ERROR_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_response_error", "latency", 1024);
// libp2p_event_handler: new file
pub static ref LIBP2P_HANDLE_PUBSUB_NEW_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_new_file", "qps");
pub static ref LIBP2P_HANDLE_PUBSUB_NEW_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_new_file", "latency", 1024);
pub static ref LIBP2P_HANDLE_PUBSUB_NEW_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_new_file", "timeout");
// libp2p_event_handler: find & announce file // libp2p_event_handler: find & announce file
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "qps"); pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "qps");
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_find_file", "latency", 1024); pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_find_file", "latency", 1024);

View File

@ -6,11 +6,10 @@ use file_location_cache::FileLocationCache;
use futures::{channel::mpsc::Sender, prelude::*}; use futures::{channel::mpsc::Sender, prelude::*};
use miner::MinerMessage; use miner::MinerMessage;
use network::{ use network::{
types::NewFile, BehaviourEvent, Keypair, Libp2pEvent, NetworkGlobals, NetworkMessage, BehaviourEvent, Keypair, Libp2pEvent, NetworkGlobals, NetworkMessage, RequestId,
NetworkReceiver, NetworkSender, PubsubMessage, RequestId, Service as LibP2PService, Swarm, Service as LibP2PService, Swarm,
}; };
use pruner::PrunerMessage; use pruner::PrunerMessage;
use shared_types::timestamp_now;
use std::sync::Arc; use std::sync::Arc;
use storage::log_store::Store as LogStore; use storage::log_store::Store as LogStore;
use storage_async::Store; use storage_async::Store;
@ -31,7 +30,7 @@ pub struct RouterService {
network_globals: Arc<NetworkGlobals>, network_globals: Arc<NetworkGlobals>,
/// The receiver channel for Zgs to communicate with the network service. /// The receiver channel for Zgs to communicate with the network service.
network_recv: NetworkReceiver, network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
/// The receiver channel for Zgs to communicate with the pruner service. /// The receiver channel for Zgs to communicate with the pruner service.
pruner_recv: Option<mpsc::UnboundedReceiver<PrunerMessage>>, pruner_recv: Option<mpsc::UnboundedReceiver<PrunerMessage>>,
@ -45,8 +44,6 @@ pub struct RouterService {
/// Stores potentially created UPnP mappings to be removed on shutdown. (TCP port and UDP /// Stores potentially created UPnP mappings to be removed on shutdown. (TCP port and UDP
/// port). /// port).
upnp_mappings: (Option<u16>, Option<u16>), upnp_mappings: (Option<u16>, Option<u16>),
store: Arc<dyn LogStore>,
} }
impl RouterService { impl RouterService {
@ -55,8 +52,8 @@ impl RouterService {
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
libp2p: LibP2PService<RequestId>, libp2p: LibP2PService<RequestId>,
network_globals: Arc<NetworkGlobals>, network_globals: Arc<NetworkGlobals>,
network_recv: NetworkReceiver, network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
network_send: NetworkSender, network_send: mpsc::UnboundedSender<NetworkMessage>,
sync_send: SyncSender, sync_send: SyncSender,
_miner_send: Option<broadcast::Sender<MinerMessage>>, _miner_send: Option<broadcast::Sender<MinerMessage>>,
chunk_pool_send: UnboundedSender<ChunkPoolMessage>, chunk_pool_send: UnboundedSender<ChunkPoolMessage>,
@ -66,6 +63,7 @@ impl RouterService {
local_keypair: Keypair, local_keypair: Keypair,
config: Config, config: Config,
) { ) {
let store = Store::new(store, executor.clone());
let peers = Arc::new(RwLock::new(PeerManager::new(config.clone()))); let peers = Arc::new(RwLock::new(PeerManager::new(config.clone())));
// create the network service and spawn the task // create the network service and spawn the task
@ -83,12 +81,11 @@ impl RouterService {
sync_send, sync_send,
chunk_pool_send, chunk_pool_send,
local_keypair, local_keypair,
Store::new(store.clone(), executor.clone()), store,
file_location_cache, file_location_cache,
peers, peers,
), ),
upnp_mappings: (None, None), upnp_mappings: (None, None),
store,
}; };
// spawn service // spawn service
@ -331,16 +328,15 @@ impl RouterService {
} }
} }
NetworkMessage::AnnounceLocalFile { tx_id } => { NetworkMessage::AnnounceLocalFile { tx_id } => {
let shard_config = self.store.get_shard_config(); if self
let msg = PubsubMessage::NewFile(NewFile { .libp2p_event_handler
tx_id, .publish_file(tx_id)
num_shard: shard_config.num_shard, .await
shard_id: shard_config.shard_id, .is_some()
timestamp: timestamp_now(), {
});
self.libp2p.swarm.behaviour_mut().publish(vec![msg]);
metrics::SERVICE_ROUTE_NETWORK_MESSAGE_ANNOUNCE_LOCAL_FILE.mark(1); metrics::SERVICE_ROUTE_NETWORK_MESSAGE_ANNOUNCE_LOCAL_FILE.mark(1);
} }
}
NetworkMessage::UPnPMappingEstablished { NetworkMessage::UPnPMappingEstablished {
tcp_socket, tcp_socket,
udp_socket, udp_socket,

View File

@ -17,13 +17,15 @@ use file_location_cache::FileLocationCache;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use jsonrpsee::core::RpcResult; use jsonrpsee::core::RpcResult;
use jsonrpsee::http_server::{HttpServerBuilder, HttpServerHandle}; use jsonrpsee::http_server::{HttpServerBuilder, HttpServerHandle};
use network::{NetworkGlobals, NetworkMessage, NetworkSender}; use network::NetworkGlobals;
use network::NetworkMessage;
use std::error::Error; use std::error::Error;
use std::sync::Arc; use std::sync::Arc;
use storage_async::Store; use storage_async::Store;
use sync::{SyncRequest, SyncResponse, SyncSender}; use sync::{SyncRequest, SyncResponse, SyncSender};
use task_executor::ShutdownReason; use task_executor::ShutdownReason;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::sync::mpsc::UnboundedSender;
use zgs::RpcServer as ZgsRpcServer; use zgs::RpcServer as ZgsRpcServer;
use zgs_miner::MinerMessage; use zgs_miner::MinerMessage;
@ -40,7 +42,7 @@ pub struct Context {
pub config: RPCConfig, pub config: RPCConfig,
pub file_location_cache: Arc<FileLocationCache>, pub file_location_cache: Arc<FileLocationCache>,
pub network_globals: Arc<NetworkGlobals>, pub network_globals: Arc<NetworkGlobals>,
pub network_send: NetworkSender, pub network_send: UnboundedSender<NetworkMessage>,
pub sync_send: SyncSender, pub sync_send: SyncSender,
pub chunk_pool: Arc<MemoryChunkPool>, pub chunk_pool: Arc<MemoryChunkPool>,
pub log_store: Arc<Store>, pub log_store: Arc<Store>,

View File

@ -53,8 +53,6 @@ pub struct FileInfo {
pub finalized: bool, pub finalized: bool,
pub is_cached: bool, pub is_cached: bool,
pub uploaded_seg_num: usize, pub uploaded_seg_num: usize,
/// Whether file is pruned, in which case `finalized` will be `false`.
pub pruned: bool,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]

View File

@ -2,7 +2,7 @@ use crate::types::{FileInfo, Segment, SegmentWithProof, Status};
use jsonrpsee::core::RpcResult; use jsonrpsee::core::RpcResult;
use jsonrpsee::proc_macros::rpc; use jsonrpsee::proc_macros::rpc;
use shared_types::{DataRoot, FlowProof, TxSeqOrRoot}; use shared_types::{DataRoot, FlowProof, TxSeqOrRoot};
use storage::{config::ShardConfig, H256}; use storage::config::ShardConfig;
#[rpc(server, client, namespace = "zgs")] #[rpc(server, client, namespace = "zgs")]
pub trait Rpc { pub trait Rpc {
@ -77,7 +77,4 @@ pub trait Rpc {
sector_index: u64, sector_index: u64,
flow_root: Option<DataRoot>, flow_root: Option<DataRoot>,
) -> RpcResult<FlowProof>; ) -> RpcResult<FlowProof>;
#[method(name = "getFlowContext")]
async fn get_flow_context(&self) -> RpcResult<(H256, u64)>;
} }

View File

@ -8,8 +8,7 @@ use jsonrpsee::core::RpcResult;
use shared_types::{DataRoot, FlowProof, Transaction, TxSeqOrRoot, CHUNK_SIZE}; use shared_types::{DataRoot, FlowProof, Transaction, TxSeqOrRoot, CHUNK_SIZE};
use std::fmt::{Debug, Formatter, Result}; use std::fmt::{Debug, Formatter, Result};
use storage::config::ShardConfig; use storage::config::ShardConfig;
use storage::log_store::tx_store::TxStatus; use storage::try_option;
use storage::{try_option, H256};
pub struct RpcServerImpl { pub struct RpcServerImpl {
pub ctx: Context, pub ctx: Context,
@ -199,10 +198,6 @@ impl RpcServer for RpcServerImpl {
assert_eq!(proof.left_proof, proof.right_proof); assert_eq!(proof.left_proof, proof.right_proof);
Ok(proof.right_proof) Ok(proof.right_proof)
} }
async fn get_flow_context(&self) -> RpcResult<(H256, u64)> {
Ok(self.ctx.log_store.get_context().await?)
}
} }
impl RpcServerImpl { impl RpcServerImpl {
@ -246,12 +241,7 @@ impl RpcServerImpl {
} }
async fn get_file_info_by_tx(&self, tx: Transaction) -> RpcResult<FileInfo> { async fn get_file_info_by_tx(&self, tx: Transaction) -> RpcResult<FileInfo> {
let (finalized, pruned) = match self.ctx.log_store.get_store().get_tx_status(tx.seq)? { let finalized = self.ctx.log_store.check_tx_completed(tx.seq).await?;
Some(TxStatus::Finalized) => (true, false),
Some(TxStatus::Pruned) => (false, true),
None => (false, false),
};
let (uploaded_seg_num, is_cached) = match self let (uploaded_seg_num, is_cached) = match self
.ctx .ctx
.chunk_pool .chunk_pool
@ -260,7 +250,7 @@ impl RpcServerImpl {
{ {
Some(v) => v, Some(v) => v,
_ => ( _ => (
if finalized || pruned { if finalized {
let chunks_per_segment = self.ctx.config.chunks_per_segment; let chunks_per_segment = self.ctx.config.chunks_per_segment;
let (num_segments, _) = SegmentWithProof::split_file_into_segments( let (num_segments, _) = SegmentWithProof::split_file_into_segments(
tx.size as usize, tx.size as usize,
@ -279,7 +269,6 @@ impl RpcServerImpl {
finalized, finalized,
is_cached, is_cached,
uploaded_seg_num, uploaded_seg_num,
pruned,
}) })
} }

View File

@ -9,11 +9,13 @@ use merkle_light::merkle::MerkleTree;
use merkle_light::proof::Proof as RawFileProof; use merkle_light::proof::Proof as RawFileProof;
use merkle_light::{hash::Algorithm, merkle::next_pow2}; use merkle_light::{hash::Algorithm, merkle::next_pow2};
use merkle_tree::RawLeafSha3Algorithm; use merkle_tree::RawLeafSha3Algorithm;
use serde::de::Visitor;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ssz::Encode; use ssz::Encode;
use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode}; use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode};
use std::fmt; use std::fmt;
use std::hash::Hasher; use std::hash::Hasher;
use std::str::FromStr;
use tiny_keccak::{Hasher as KeccakHasher, Keccak}; use tiny_keccak::{Hasher as KeccakHasher, Keccak};
use tracing::debug; use tracing::debug;
@ -396,39 +398,82 @@ pub struct ProtocolVersion {
pub build: u8, pub build: u8,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug)]
#[serde(untagged)]
pub enum TxSeqOrRoot { pub enum TxSeqOrRoot {
TxSeq(u64), TxSeq(u64),
Root(DataRoot), Root(DataRoot),
} }
impl Serialize for TxSeqOrRoot {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
TxSeqOrRoot::TxSeq(seq) => seq.serialize(serializer),
TxSeqOrRoot::Root(root) => root.serialize(serializer),
}
}
}
impl<'a> Deserialize<'a> for TxSeqOrRoot {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'a>,
{
deserializer.deserialize_any(TxSeqOrRootVisitor)
}
}
struct TxSeqOrRootVisitor;
impl<'a> Visitor<'a> for TxSeqOrRootVisitor {
type Value = TxSeqOrRoot;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "an u64 integer or a hex64 value")
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(TxSeqOrRoot::TxSeq(v))
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
let root: H256 = H256::from_str(v).map_err(E::custom)?;
Ok(TxSeqOrRoot::Root(root))
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str::FromStr;
use super::*; use super::*;
#[test] #[test]
fn test_tx_seq_or_root_serde() { fn test_tx_seq_or_root_serde() {
// serialize tx seq as number // serialize tx seq
let tx_seq = TxSeqOrRoot::TxSeq(666); let tx_seq = TxSeqOrRoot::TxSeq(666);
assert_eq!(serde_json::to_string(&tx_seq).unwrap(), "666".to_string()); assert_eq!(serde_json::to_string(&tx_seq).unwrap(), "666".to_string());
// serialize root as quoted string // serialize root
let hash_str = "0xa906f46f8b9f15908dbee7adc5492ff30779c3abe114ccdb7079ecdcb72eb855"; let hash_str = "0xa906f46f8b9f15908dbee7adc5492ff30779c3abe114ccdb7079ecdcb72eb855";
let hash_quoted = format!("\"{}\"", hash_str); let hash_quoted = format!("\"{}\"", hash_str);
let hash = H256::from_str(hash_str).unwrap(); let hash = H256::from_str(hash_str).unwrap();
let root = TxSeqOrRoot::Root(hash); let root = TxSeqOrRoot::Root(hash);
assert_eq!(serde_json::to_string(&root).unwrap(), hash_quoted); assert_eq!(serde_json::to_string(&root).unwrap(), hash_quoted);
// deserialize tx seq from number // deserialize tx seq
assert!(matches!( assert!(matches!(
serde_json::from_str::<TxSeqOrRoot>("777").unwrap(), serde_json::from_str::<TxSeqOrRoot>("777").unwrap(),
TxSeqOrRoot::TxSeq(777) TxSeqOrRoot::TxSeq(777)
)); ));
// deserialize root from quoted string // deserialize root
assert!(matches!( assert!(matches!(
serde_json::from_str::<TxSeqOrRoot>(hash_quoted.as_str()).unwrap(), serde_json::from_str::<TxSeqOrRoot>(hash_quoted.as_str()).unwrap(),
TxSeqOrRoot::Root(v) if v == hash, TxSeqOrRoot::Root(v) if v == hash,

View File

@ -1,11 +1,11 @@
use super::{Client, RuntimeContext}; use super::{Client, RuntimeContext};
use chunk_pool::{Config as ChunkPoolConfig, MemoryChunkPool}; use chunk_pool::{ChunkPoolMessage, Config as ChunkPoolConfig, MemoryChunkPool};
use file_location_cache::FileLocationCache; use file_location_cache::FileLocationCache;
use log_entry_sync::{LogSyncConfig, LogSyncEvent, LogSyncManager}; use log_entry_sync::{LogSyncConfig, LogSyncEvent, LogSyncManager};
use miner::{MineService, MinerConfig, MinerMessage, ShardConfig}; use miner::{MineService, MinerConfig, MinerMessage};
use network::{ use network::{
self, new_network_channel, Keypair, NetworkConfig, NetworkGlobals, NetworkReceiver, self, Keypair, NetworkConfig, NetworkGlobals, NetworkMessage, RequestId,
NetworkSender, RequestId, Service as LibP2PService, Service as LibP2PService,
}; };
use pruner::{Pruner, PrunerConfig, PrunerMessage}; use pruner::{Pruner, PrunerConfig, PrunerMessage};
use router::RouterService; use router::RouterService;
@ -27,12 +27,15 @@ macro_rules! require {
} }
struct NetworkComponents { struct NetworkComponents {
send: NetworkSender, send: mpsc::UnboundedSender<NetworkMessage>,
globals: Arc<NetworkGlobals>, globals: Arc<NetworkGlobals>,
keypair: Keypair, keypair: Keypair,
// note: these will be owned by the router service // note: these will be owned by the router service
owned: Option<(LibP2PService<RequestId>, NetworkReceiver)>, owned: Option<(
LibP2PService<RequestId>,
mpsc::UnboundedReceiver<NetworkMessage>,
)>,
} }
struct SyncComponents { struct SyncComponents {
@ -54,7 +57,7 @@ struct PrunerComponents {
} }
struct ChunkPoolComponents { struct ChunkPoolComponents {
chunk_pool: Arc<MemoryChunkPool>, send: mpsc::UnboundedSender<ChunkPoolMessage>,
} }
/// Builds a `Client` instance. /// Builds a `Client` instance.
@ -86,9 +89,10 @@ impl ClientBuilder {
/// Initializes in-memory storage. /// Initializes in-memory storage.
pub fn with_memory_store(mut self) -> Result<Self, String> { pub fn with_memory_store(mut self) -> Result<Self, String> {
let executor = require!("sync", self, runtime_context).clone().executor;
// TODO(zz): Set config. // TODO(zz): Set config.
let store = Arc::new( let store = Arc::new(
LogManager::memorydb(LogConfig::default()) LogManager::memorydb(LogConfig::default(), executor)
.map_err(|e| format!("Unable to start in-memory store: {:?}", e))?, .map_err(|e| format!("Unable to start in-memory store: {:?}", e))?,
); );
@ -106,12 +110,9 @@ impl ClientBuilder {
/// Initializes RocksDB storage. /// Initializes RocksDB storage.
pub fn with_rocksdb_store(mut self, config: &StorageConfig) -> Result<Self, String> { pub fn with_rocksdb_store(mut self, config: &StorageConfig) -> Result<Self, String> {
let executor = require!("sync", self, runtime_context).clone().executor;
let store = Arc::new( let store = Arc::new(
LogManager::rocksdb( LogManager::rocksdb(LogConfig::default(), &config.db_dir, executor)
config.log_config.clone(),
config.db_dir.join("flow_db"),
config.db_dir.join("data_db"),
)
.map_err(|e| format!("Unable to start RocksDB store: {:?}", e))?, .map_err(|e| format!("Unable to start RocksDB store: {:?}", e))?,
); );
@ -141,7 +142,7 @@ impl ClientBuilder {
let service_context = network::Context { config }; let service_context = network::Context { config };
// construct communication channel // construct communication channel
let (send, recv) = new_network_channel(); let (send, recv) = mpsc::unbounded_channel::<NetworkMessage>();
// launch libp2p service // launch libp2p service
let (globals, keypair, libp2p) = let (globals, keypair, libp2p) =
@ -215,22 +216,12 @@ impl ClientBuilder {
Ok(self) Ok(self)
} }
pub async fn with_shard(self, config: ShardConfig) -> Result<Self, String> {
self.async_store
.as_ref()
.unwrap()
.update_shard_config(config)
.await;
Ok(self)
}
/// Starts the networking stack. /// Starts the networking stack.
pub fn with_router(mut self, router_config: router::Config) -> Result<Self, String> { pub fn with_router(mut self, router_config: router::Config) -> Result<Self, String> {
let executor = require!("router", self, runtime_context).clone().executor; let executor = require!("router", self, runtime_context).clone().executor;
let sync_send = require!("router", self, sync).send.clone(); // note: we can make this optional in the future let sync_send = require!("router", self, sync).send.clone(); // note: we can make this optional in the future
let miner_send = self.miner.as_ref().map(|x| x.send.clone()); let miner_send = self.miner.as_ref().map(|x| x.send.clone());
let chunk_pool_send = require!("router", self, chunk_pool).chunk_pool.sender(); let chunk_pool_send = require!("router", self, chunk_pool).send.clone();
let store = require!("router", self, store).clone(); let store = require!("router", self, store).clone();
let file_location_cache = require!("router", self, file_location_cache).clone(); let file_location_cache = require!("router", self, file_location_cache).clone();
@ -260,7 +251,11 @@ impl ClientBuilder {
Ok(self) Ok(self)
} }
pub async fn with_rpc(self, rpc_config: RPCConfig) -> Result<Self, String> { pub async fn with_rpc(
mut self,
rpc_config: RPCConfig,
chunk_pool_config: ChunkPoolConfig,
) -> Result<Self, String> {
if !rpc_config.enabled { if !rpc_config.enabled {
return Ok(self); return Ok(self);
} }
@ -269,9 +264,16 @@ impl ClientBuilder {
let async_store = require!("rpc", self, async_store).clone(); let async_store = require!("rpc", self, async_store).clone();
let network_send = require!("rpc", self, network).send.clone(); let network_send = require!("rpc", self, network).send.clone();
let mine_send = self.miner.as_ref().map(|x| x.send.clone()); let mine_send = self.miner.as_ref().map(|x| x.send.clone());
let synced_tx_recv = require!("rpc", self, log_sync).send.subscribe();
let file_location_cache = require!("rpc", self, file_location_cache).clone(); let file_location_cache = require!("rpc", self, file_location_cache).clone();
let chunk_pool = require!("rpc", self, chunk_pool).chunk_pool.clone();
let (chunk_pool, chunk_pool_handler) =
chunk_pool::unbounded(chunk_pool_config, async_store.clone(), network_send.clone());
let chunk_pool_components = ChunkPoolComponents {
send: chunk_pool.sender(),
};
let chunk_pool_clone = chunk_pool.clone();
let ctx = rpc::Context { let ctx = rpc::Context {
config: rpc_config, config: rpc_config,
file_location_cache, file_location_cache,
@ -284,7 +286,7 @@ impl ClientBuilder {
mine_service_sender: mine_send, mine_service_sender: mine_send,
}; };
let (rpc_handle, maybe_admin_rpc_handle) = rpc::run_server(ctx) let (rpc_handle, maybe_admin_rpc_handle) = rpc::run_server(ctx.clone())
.await .await
.map_err(|e| format!("Unable to start HTTP RPC server: {:?}", e))?; .map_err(|e| format!("Unable to start HTTP RPC server: {:?}", e))?;
@ -292,29 +294,13 @@ impl ClientBuilder {
if let Some(admin_rpc_handle) = maybe_admin_rpc_handle { if let Some(admin_rpc_handle) = maybe_admin_rpc_handle {
executor.spawn(admin_rpc_handle, "rpc_admin"); executor.spawn(admin_rpc_handle, "rpc_admin");
} }
Ok(self)
}
pub async fn with_chunk_pool(
mut self,
chunk_pool_config: ChunkPoolConfig,
) -> Result<Self, String> {
let executor = require!("rpc", self, runtime_context).clone().executor;
let async_store = require!("rpc", self, async_store).clone();
let network_send = require!("rpc", self, network).send.clone();
let synced_tx_recv = require!("rpc", self, log_sync).send.subscribe();
let (chunk_pool, chunk_pool_handler) =
chunk_pool::unbounded(chunk_pool_config, async_store.clone(), network_send.clone());
executor.spawn(chunk_pool_handler.run(), "chunk_pool_handler"); executor.spawn(chunk_pool_handler.run(), "chunk_pool_handler");
executor.spawn( executor.spawn(
MemoryChunkPool::monitor_log_entry(chunk_pool.clone(), synced_tx_recv), MemoryChunkPool::monitor_log_entry(chunk_pool_clone, synced_tx_recv),
"chunk_pool_log_monitor", "chunk_pool_log_monitor",
); );
self.chunk_pool = Some(ChunkPoolComponents { chunk_pool }); self.chunk_pool = Some(chunk_pool_components);
Ok(self) Ok(self)
} }

View File

@ -5,14 +5,12 @@ use ethereum_types::{H256, U256};
use ethers::prelude::{Http, Middleware, Provider}; use ethers::prelude::{Http, Middleware, Provider};
use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig}; use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig};
use miner::MinerConfig; use miner::MinerConfig;
use network::{EnrExt, NetworkConfig}; use network::NetworkConfig;
use pruner::PrunerConfig; use pruner::PrunerConfig;
use shared_types::{NetworkIdentity, ProtocolVersion}; use shared_types::{NetworkIdentity, ProtocolVersion};
use std::net::IpAddr; use std::net::IpAddr;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use storage::config::ShardConfig; use storage::config::ShardConfig;
use storage::log_store::log_manager::LogConfig;
use storage::StorageConfig; use storage::StorageConfig;
impl ZgsConfig { impl ZgsConfig {
@ -39,21 +37,15 @@ impl ZgsConfig {
.await .await
.map_err(|e| format!("Unable to get chain id: {:?}", e))? .map_err(|e| format!("Unable to get chain id: {:?}", e))?
.as_u64(); .as_u64();
let network_protocol_version = if self.sync.neighbors_only { network_config.network_id = NetworkIdentity {
network::PROTOCOL_VERSION_V2
} else {
network::PROTOCOL_VERSION_V1
};
let local_network_id = NetworkIdentity {
chain_id, chain_id,
flow_address, flow_address,
p2p_protocol_version: ProtocolVersion { p2p_protocol_version: ProtocolVersion {
major: network_protocol_version[0], major: network::PROTOCOL_VERSION[0],
minor: network_protocol_version[1], minor: network::PROTOCOL_VERSION[1],
build: network_protocol_version[2], build: network::PROTOCOL_VERSION[2],
}, },
}; };
network_config.network_id = local_network_id.clone();
if !self.network_disable_discovery { if !self.network_disable_discovery {
network_config.enr_tcp_port = Some(self.network_enr_tcp_port); network_config.enr_tcp_port = Some(self.network_enr_tcp_port);
@ -89,13 +81,7 @@ impl ZgsConfig {
.collect::<Result<_, _>>() .collect::<Result<_, _>>()
.map_err(|e| format!("Unable to parse network_libp2p_nodes: {:?}", e))?; .map_err(|e| format!("Unable to parse network_libp2p_nodes: {:?}", e))?;
network_config.discv5_config.table_filter = if self.discv5_disable_enr_network_id { network_config.discv5_config.table_filter = |_| true;
Arc::new(|_| true)
} else {
Arc::new(
move |enr| matches!(enr.network_identity(), Some(Ok(id)) if id == local_network_id),
)
};
network_config.discv5_config.request_timeout = network_config.discv5_config.request_timeout =
Duration::from_secs(self.discv5_request_timeout_secs); Duration::from_secs(self.discv5_request_timeout_secs);
network_config.discv5_config.query_peer_timeout = network_config.discv5_config.query_peer_timeout =
@ -110,18 +96,13 @@ impl ZgsConfig {
network_config.private = self.network_private; network_config.private = self.network_private;
network_config.peer_db = self.network_peer_db; network_config.peer_db = self.network_peer_db;
network_config.peer_manager = self.network_peer_manager;
network_config.disable_enr_network_id = self.discv5_disable_enr_network_id;
Ok(network_config) Ok(network_config)
} }
pub fn storage_config(&self) -> Result<StorageConfig, String> { pub fn storage_config(&self) -> Result<StorageConfig, String> {
let mut log_config = LogConfig::default();
log_config.flow.merkle_node_cache_capacity = self.merkle_node_cache_capacity;
Ok(StorageConfig { Ok(StorageConfig {
db_dir: self.db_dir.clone().into(), db_dir: self.db_dir.clone().into(),
log_config,
}) })
} }
@ -151,7 +132,6 @@ impl ZgsConfig {
self.remove_finalized_block_interval_minutes, self.remove_finalized_block_interval_minutes,
self.watch_loop_wait_time_ms, self.watch_loop_wait_time_ms,
self.force_log_sync_from_start_block_number, self.force_log_sync_from_start_block_number,
Duration::from_secs(self.blockchain_rpc_timeout_secs),
)) ))
} }
@ -220,13 +200,6 @@ impl ZgsConfig {
pub fn router_config(&self, network_config: &NetworkConfig) -> Result<router::Config, String> { pub fn router_config(&self, network_config: &NetworkConfig) -> Result<router::Config, String> {
let mut router_config = self.router.clone(); let mut router_config = self.router.clone();
router_config.libp2p_nodes = network_config.libp2p_nodes.to_vec(); router_config.libp2p_nodes = network_config.libp2p_nodes.to_vec();
if router_config.public_address.is_none() {
if let Some(addr) = &self.network_enr_address {
router_config.public_address = Some(addr.parse().unwrap());
}
}
Ok(router_config) Ok(router_config)
} }
@ -255,7 +228,7 @@ impl ZgsConfig {
} }
} }
pub fn shard_config(&self) -> Result<ShardConfig, String> { fn shard_config(&self) -> Result<ShardConfig, String> {
self.shard_position.clone().try_into() self.shard_position.clone().try_into()
} }
} }

View File

@ -28,7 +28,6 @@ build_config! {
(discv5_report_discovered_peers, (bool), false) (discv5_report_discovered_peers, (bool), false)
(discv5_disable_packet_filter, (bool), false) (discv5_disable_packet_filter, (bool), false)
(discv5_disable_ip_limit, (bool), false) (discv5_disable_ip_limit, (bool), false)
(discv5_disable_enr_network_id, (bool), false)
// log sync // log sync
(blockchain_rpc_endpoint, (String), "http://127.0.0.1:8545".to_string()) (blockchain_rpc_endpoint, (String), "http://127.0.0.1:8545".to_string())
@ -49,8 +48,6 @@ build_config! {
(remove_finalized_block_interval_minutes, (u64), 30) (remove_finalized_block_interval_minutes, (u64), 30)
(watch_loop_wait_time_ms, (u64), 500) (watch_loop_wait_time_ms, (u64), 500)
(blockchain_rpc_timeout_secs, (u64), 120)
// chunk pool // chunk pool
(chunk_pool_write_window_size, (usize), 4) (chunk_pool_write_window_size, (usize), 4)
(chunk_pool_max_cached_chunks_all, (usize), 4*1024*1024) // 1G (chunk_pool_max_cached_chunks_all, (usize), 4*1024*1024) // 1G
@ -63,7 +60,6 @@ build_config! {
(prune_check_time_s, (u64), 60) (prune_check_time_s, (u64), 60)
(prune_batch_size, (usize), 16 * 1024) (prune_batch_size, (usize), 16 * 1024)
(prune_batch_wait_time_ms, (u64), 1000) (prune_batch_wait_time_ms, (u64), 1000)
(merkle_node_cache_capacity, (usize), 32 * 1024 * 1024)
// misc // misc
(log_config_file, (String), "log_config".to_string()) (log_config_file, (String), "log_config".to_string())
@ -90,9 +86,6 @@ pub struct ZgsConfig {
/// Network peer db config, configured by [network_peer_db] section by `config` crate. /// Network peer db config, configured by [network_peer_db] section by `config` crate.
pub network_peer_db: network::peer_manager::peerdb::PeerDBConfig, pub network_peer_db: network::peer_manager::peerdb::PeerDBConfig,
/// Network peer manager config, configured by [network_peer_manager] section by `config` crate.
pub network_peer_manager: network::peer_manager::config::Config,
// router config, configured by [router] section by `config` crate. // router config, configured by [router] section by `config` crate.
pub router: router::Config, pub router: router::Config,

View File

@ -14,11 +14,9 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
let network_config = config.network_config().await?; let network_config = config.network_config().await?;
let storage_config = config.storage_config()?; let storage_config = config.storage_config()?;
let log_sync_config = config.log_sync_config()?; let log_sync_config = config.log_sync_config()?;
let chunk_pool_config = config.chunk_pool_config()?;
let miner_config = config.mine_config()?; let miner_config = config.mine_config()?;
let router_config = config.router_config(&network_config)?; let router_config = config.router_config(&network_config)?;
let pruner_config = config.pruner_config()?; let pruner_config = config.pruner_config()?;
let shard_config = config.shard_config()?;
ClientBuilder::default() ClientBuilder::default()
.with_runtime_context(context) .with_runtime_context(context)
@ -28,17 +26,13 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
.with_file_location_cache(config.file_location_cache) .with_file_location_cache(config.file_location_cache)
.with_network(&network_config) .with_network(&network_config)
.await? .await?
.with_chunk_pool(chunk_pool_config)
.await?
.with_sync(config.sync) .with_sync(config.sync)
.await? .await?
.with_miner(miner_config) .with_miner(miner_config)
.await? .await?
.with_shard(shard_config)
.await?
.with_pruner(pruner_config) .with_pruner(pruner_config)
.await? .await?
.with_rpc(config.rpc) .with_rpc(config.rpc, config.chunk_pool_config()?)
.await? .await?
.with_router(router_config)? .with_router(router_config)?
.build() .build()

View File

@ -11,4 +11,3 @@ task_executor = { path = "../../common/task_executor" }
tokio = { version = "1.19.2", features = ["sync"] } tokio = { version = "1.19.2", features = ["sync"] }
tracing = "0.1.35" tracing = "0.1.35"
eth2_ssz = "0.4.0" eth2_ssz = "0.4.0"
backtrace = "0.3"

View File

@ -2,7 +2,6 @@
extern crate tracing; extern crate tracing;
use anyhow::bail; use anyhow::bail;
use backtrace::Backtrace;
use shared_types::{ use shared_types::{
Chunk, ChunkArray, ChunkArrayWithProof, DataRoot, FlowProof, FlowRangeProof, Transaction, Chunk, ChunkArray, ChunkArrayWithProof, DataRoot, FlowProof, FlowRangeProof, Transaction,
}; };
@ -75,11 +74,9 @@ impl Store {
pub async fn get_config_decoded<K: AsRef<[u8]> + Send + Sync, T: Decode + Send + 'static>( pub async fn get_config_decoded<K: AsRef<[u8]> + Send + Sync, T: Decode + Send + 'static>(
&self, &self,
key: &K, key: &K,
dest: &str,
) -> Result<Option<T>> { ) -> Result<Option<T>> {
let key = key.as_ref().to_vec(); let key = key.as_ref().to_vec();
let dest = dest.to_string(); self.spawn(move |store| store.get_config_decoded(&key))
self.spawn(move |store| store.get_config_decoded(&key, &dest))
.await .await
} }
@ -87,12 +84,10 @@ impl Store {
&self, &self,
key: &K, key: &K,
value: &T, value: &T,
dest: &str,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let key = key.as_ref().to_vec(); let key = key.as_ref().to_vec();
let value = value.as_ssz_bytes(); let value = value.as_ssz_bytes();
let dest = dest.to_string(); self.spawn(move |store| store.set_config(&key, &value))
self.spawn(move |store| store.set_config(&key, &value, &dest))
.await .await
} }
@ -140,9 +135,6 @@ impl Store {
{ {
let store = self.store.clone(); let store = self.store.clone();
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let mut backtrace = Backtrace::new();
let frames = backtrace.frames().to_vec();
backtrace = frames.into();
self.executor.spawn_blocking( self.executor.spawn_blocking(
move || { move || {
@ -150,7 +142,6 @@ impl Store {
let res = f(&*store); let res = f(&*store);
if tx.send(res).is_err() { if tx.send(res).is_err() {
warn!("Backtrace: {:?}", backtrace);
error!("Unable to complete async storage operation: the receiver dropped"); error!("Unable to complete async storage operation: the receiver dropped");
} }
}, },

View File

@ -29,11 +29,8 @@ itertools = "0.13.0"
serde = { version = "1.0.197", features = ["derive"] } serde = { version = "1.0.197", features = ["derive"] }
parking_lot = "0.12.3" parking_lot = "0.12.3"
serde_json = "1.0.127" serde_json = "1.0.127"
tokio = { version = "1.38.0", features = ["full"] } tokio = { version = "1.10.0", features = ["sync"] }
task_executor = { path = "../../common/task_executor" } task_executor = { path = "../../common/task_executor" }
lazy_static = "1.4.0"
metrics = { workspace = true }
once_cell = { version = "1.19.0", features = [] }
[dev-dependencies] [dev-dependencies]
rand = "0.8.5" rand = "0.8.5"

View File

@ -14,14 +14,18 @@ use storage::{
}, },
LogManager, LogManager,
}; };
use task_executor::test_utils::TestRuntime;
fn write_performance(c: &mut Criterion) { fn write_performance(c: &mut Criterion) {
if Path::new("db_write").exists() { if Path::new("db_write").exists() {
fs::remove_dir_all("db_write").unwrap(); fs::remove_dir_all("db_write").unwrap();
} }
let runtime = TestRuntime::default();
let executor = runtime.task_executor.clone();
let store: Arc<RwLock<dyn Store>> = Arc::new(RwLock::new( let store: Arc<RwLock<dyn Store>> = Arc::new(RwLock::new(
LogManager::rocksdb(LogConfig::default(), "db_flow_write", "db_data_write") LogManager::rocksdb(LogConfig::default(), "db_write", executor)
.map_err(|e| format!("Unable to start RocksDB store: {:?}", e)) .map_err(|e| format!("Unable to start RocksDB store: {:?}", e))
.unwrap(), .unwrap(),
)); ));
@ -105,8 +109,12 @@ fn read_performance(c: &mut Criterion) {
fs::remove_dir_all("db_read").unwrap(); fs::remove_dir_all("db_read").unwrap();
} }
let runtime = TestRuntime::default();
let executor = runtime.task_executor.clone();
let store: Arc<RwLock<dyn Store>> = Arc::new(RwLock::new( let store: Arc<RwLock<dyn Store>> = Arc::new(RwLock::new(
LogManager::rocksdb(LogConfig::default(), "db_flow_read", "db_data_read") LogManager::rocksdb(LogConfig::default(), "db_read", executor)
.map_err(|e| format!("Unable to start RocksDB store: {:?}", e)) .map_err(|e| format!("Unable to start RocksDB store: {:?}", e))
.unwrap(), .unwrap(),
)); ));

View File

@ -1,4 +1,3 @@
use crate::log_store::log_manager::LogConfig;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use std::{cell::RefCell, path::PathBuf, rc::Rc, str::FromStr}; use std::{cell::RefCell, path::PathBuf, rc::Rc, str::FromStr};
@ -8,7 +7,6 @@ pub const SHARD_CONFIG_KEY: &str = "shard_config";
#[derive(Clone)] #[derive(Clone)]
pub struct Config { pub struct Config {
pub db_dir: PathBuf, pub db_dir: PathBuf,
pub log_config: LogConfig,
} }
#[derive(Clone, Copy, Debug, Decode, Encode, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Copy, Debug, Decode, Encode, Serialize, Deserialize, Eq, PartialEq)]

View File

@ -2,55 +2,16 @@ use anyhow::{anyhow, Result};
use kvdb::{DBKey, DBOp}; use kvdb::{DBKey, DBOp};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use crate::log_store::log_manager::{COL_MISC, DATA_DB_KEY, FLOW_DB_KEY};
use crate::LogManager; use crate::LogManager;
macro_rules! db_operation { use super::log_manager::COL_MISC;
($self:expr, $dest:expr, get, $key:expr) => {{
let db = match $dest {
DATA_DB_KEY => &$self.data_db,
FLOW_DB_KEY => &$self.flow_db,
_ => return Err(anyhow!("Invalid destination")),
};
Ok(db.get(COL_MISC, $key)?)
}};
($self:expr, $dest:expr, put, $key:expr, $value:expr) => {{
let db = match $dest {
DATA_DB_KEY => &$self.data_db,
FLOW_DB_KEY => &$self.flow_db,
_ => return Err(anyhow!("Invalid destination")),
};
Ok(db.put(COL_MISC, $key, $value)?)
}};
($self:expr, $dest:expr, delete, $key:expr) => {{
let db = match $dest {
DATA_DB_KEY => &$self.data_db,
FLOW_DB_KEY => &$self.flow_db,
_ => return Err(anyhow!("Invalid destination")),
};
Ok(db.delete(COL_MISC, $key)?)
}};
($self:expr, $dest:expr, transaction, $tx:expr) => {{
let db = match $dest {
DATA_DB_KEY => &$self.data_db,
FLOW_DB_KEY => &$self.flow_db,
_ => return Err(anyhow!("Invalid destination")),
};
let mut db_tx = db.transaction();
db_tx.ops = $tx.ops;
Ok(db.write(db_tx)?)
}};
}
pub trait Configurable { pub trait Configurable {
fn get_config(&self, key: &[u8], dest: &str) -> Result<Option<Vec<u8>>>; fn get_config(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
fn set_config(&self, key: &[u8], value: &[u8], dest: &str) -> Result<()>; fn set_config(&self, key: &[u8], value: &[u8]) -> Result<()>;
fn remove_config(&self, key: &[u8], dest: &str) -> Result<()>; fn remove_config(&self, key: &[u8]) -> Result<()>;
fn exec_configs(&self, tx: ConfigTx, dest: &str) -> Result<()>; fn exec_configs(&self, tx: ConfigTx) -> Result<()>;
} }
#[derive(Default)] #[derive(Default)]
@ -80,12 +41,8 @@ impl ConfigTx {
} }
pub trait ConfigurableExt: Configurable { pub trait ConfigurableExt: Configurable {
fn get_config_decoded<K: AsRef<[u8]>, T: Decode>( fn get_config_decoded<K: AsRef<[u8]>, T: Decode>(&self, key: &K) -> Result<Option<T>> {
&self, match self.get_config(key.as_ref())? {
key: &K,
dest: &str,
) -> Result<Option<T>> {
match self.get_config(key.as_ref(), dest)? {
Some(val) => Ok(Some( Some(val) => Ok(Some(
T::from_ssz_bytes(&val).map_err(|e| anyhow!("SSZ decode error: {:?}", e))?, T::from_ssz_bytes(&val).map_err(|e| anyhow!("SSZ decode error: {:?}", e))?,
)), )),
@ -93,36 +50,36 @@ pub trait ConfigurableExt: Configurable {
} }
} }
fn set_config_encoded<K: AsRef<[u8]>, T: Encode>( fn set_config_encoded<K: AsRef<[u8]>, T: Encode>(&self, key: &K, value: &T) -> Result<()> {
&self, self.set_config(key.as_ref(), &value.as_ssz_bytes())
key: &K,
value: &T,
dest: &str,
) -> Result<()> {
self.set_config(key.as_ref(), &value.as_ssz_bytes(), dest)
} }
fn remove_config_by_key<K: AsRef<[u8]>>(&self, key: &K, dest: &str) -> Result<()> { fn remove_config_by_key<K: AsRef<[u8]>>(&self, key: &K) -> Result<()> {
self.remove_config(key.as_ref(), dest) self.remove_config(key.as_ref())
} }
} }
impl<T: ?Sized + Configurable> ConfigurableExt for T {} impl<T: ?Sized + Configurable> ConfigurableExt for T {}
impl Configurable for LogManager { impl Configurable for LogManager {
fn get_config(&self, key: &[u8], dest: &str) -> Result<Option<Vec<u8>>> { fn get_config(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
db_operation!(self, dest, get, key) Ok(self.db.get(COL_MISC, key)?)
} }
fn set_config(&self, key: &[u8], value: &[u8], dest: &str) -> Result<()> { fn set_config(&self, key: &[u8], value: &[u8]) -> Result<()> {
db_operation!(self, dest, put, key, value) self.db.put(COL_MISC, key, value)?;
Ok(())
} }
fn remove_config(&self, key: &[u8], dest: &str) -> Result<()> { fn remove_config(&self, key: &[u8]) -> Result<()> {
db_operation!(self, dest, delete, key) Ok(self.db.delete(COL_MISC, key)?)
} }
fn exec_configs(&self, tx: ConfigTx, dest: &str) -> Result<()> { fn exec_configs(&self, tx: ConfigTx) -> Result<()> {
db_operation!(self, dest, transaction, tx) let mut db_tx = self.db.transaction();
db_tx.ops = tx.ops;
self.db.write(db_tx)?;
Ok(())
} }
} }

View File

@ -1,69 +1,66 @@
use super::load_chunk::EntryBatch;
use super::seal_task_manager::SealTaskManager;
use super::{MineLoadChunk, SealAnswer, SealTask};
use crate::config::ShardConfig; use crate::config::ShardConfig;
use crate::error::Error; use crate::error::Error;
use crate::log_store::load_chunk::EntryBatch;
use crate::log_store::log_manager::{ use crate::log_store::log_manager::{
bytes_to_entries, COL_ENTRY_BATCH, COL_FLOW_MPT_NODES, COL_PAD_DATA_LIST, bytes_to_entries, data_to_merkle_leaves, COL_ENTRY_BATCH, COL_ENTRY_BATCH_ROOT,
COL_PAD_DATA_SYNC_HEIGH, PORA_CHUNK_SIZE, COL_FLOW_MPT_NODES, ENTRY_SIZE, PORA_CHUNK_SIZE,
};
use crate::log_store::seal_task_manager::SealTaskManager;
use crate::log_store::{
metrics, FlowRead, FlowSeal, FlowWrite, MineLoadChunk, SealAnswer, SealTask,
}; };
use crate::log_store::{FlowRead, FlowSeal, FlowWrite};
use crate::{try_option, ZgsKeyValueDB}; use crate::{try_option, ZgsKeyValueDB};
use any::Any;
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use append_merkle::{MerkleTreeRead, NodeDatabase, NodeTransaction}; use append_merkle::{MerkleTreeInitialData, MerkleTreeRead};
use itertools::Itertools; use itertools::Itertools;
use kvdb::DBTransaction;
use parking_lot::RwLock; use parking_lot::RwLock;
use shared_types::{ChunkArray, DataRoot, FlowProof}; use shared_types::{ChunkArray, DataRoot, FlowProof, Merkle};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode}; use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::fmt::Debug; use std::fmt::Debug;
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant; use std::{cmp, mem};
use std::{any, cmp};
use tracing::{debug, error, trace}; use tracing::{debug, error, trace};
use zgs_spec::{BYTES_PER_SECTOR, SEALS_PER_LOAD, SECTORS_PER_LOAD, SECTORS_PER_SEAL}; use zgs_spec::{BYTES_PER_SECTOR, SEALS_PER_LOAD, SECTORS_PER_LOAD, SECTORS_PER_SEAL};
pub struct FlowStore { pub struct FlowStore {
flow_db: Arc<FlowDBStore>, db: FlowDBStore,
data_db: Arc<FlowDBStore>,
seal_manager: SealTaskManager, seal_manager: SealTaskManager,
config: FlowConfig, config: FlowConfig,
} }
impl FlowStore { impl FlowStore {
pub fn new(flow_db: Arc<FlowDBStore>, data_db: Arc<FlowDBStore>, config: FlowConfig) -> Self { pub fn new(db: Arc<dyn ZgsKeyValueDB>, config: FlowConfig) -> Self {
Self { Self {
flow_db, db: FlowDBStore::new(db),
data_db,
seal_manager: Default::default(), seal_manager: Default::default(),
config, config,
} }
} }
pub fn put_batch_root_list(&self, root_map: BTreeMap<usize, (DataRoot, usize)>) -> Result<()> {
self.db.put_batch_root_list(root_map)
}
pub fn insert_subtree_list_for_batch( pub fn insert_subtree_list_for_batch(
&self, &self,
batch_index: usize, batch_index: usize,
subtree_list: Vec<(usize, usize, DataRoot)>, subtree_list: Vec<(usize, usize, DataRoot)>,
) -> Result<()> { ) -> Result<()> {
let start_time = Instant::now();
let mut batch = self let mut batch = self
.data_db .db
.get_entry_batch(batch_index as u64)? .get_entry_batch(batch_index as u64)?
.unwrap_or_else(|| EntryBatch::new(batch_index as u64)); .unwrap_or_else(|| EntryBatch::new(batch_index as u64));
batch.set_subtree_list(subtree_list); batch.set_subtree_list(subtree_list);
self.data_db self.db.put_entry_raw(vec![(batch_index as u64, batch)])?;
.put_entry_raw(vec![(batch_index as u64, batch)])?;
metrics::INSERT_SUBTREE_LIST.update_since(start_time);
Ok(()) Ok(())
} }
pub fn gen_proof_in_batch(&self, batch_index: usize, sector_index: usize) -> Result<FlowProof> { pub fn gen_proof_in_batch(&self, batch_index: usize, sector_index: usize) -> Result<FlowProof> {
let batch = self let batch = self
.data_db .db
.get_entry_batch(batch_index as u64)? .get_entry_batch(batch_index as u64)?
.ok_or_else(|| anyhow!("batch missing, index={}", batch_index))?; .ok_or_else(|| anyhow!("batch missing, index={}", batch_index))?;
let merkle = batch.to_merkle_tree(batch_index == 0)?.ok_or_else(|| { let merkle = batch.to_merkle_tree(batch_index == 0)?.ok_or_else(|| {
@ -75,16 +72,27 @@ impl FlowStore {
merkle.gen_proof(sector_index) merkle.gen_proof(sector_index)
} }
pub fn put_mpt_node_list(&self, node_list: Vec<(usize, usize, DataRoot)>) -> Result<()> {
self.db.put_mpt_node_list(node_list)
}
pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> { pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> {
self.seal_manager.delete_batch_list(batch_list); self.seal_manager.delete_batch_list(batch_list);
self.data_db.delete_batch_list(batch_list) self.db.delete_batch_list(batch_list)
}
pub fn get_raw_batch(&self, batch_index: u64) -> Result<Option<EntryBatch>> {
self.db.get_entry_batch(batch_index)
}
pub fn get_batch_root(&self, batch_index: u64) -> Result<Option<DataRoot>> {
self.db.get_batch_root(batch_index)
} }
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct FlowConfig { pub struct FlowConfig {
pub batch_size: usize, pub batch_size: usize,
pub merkle_node_cache_capacity: usize,
pub shard_config: Arc<RwLock<ShardConfig>>, pub shard_config: Arc<RwLock<ShardConfig>>,
} }
@ -92,8 +100,6 @@ impl Default for FlowConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
batch_size: SECTORS_PER_LOAD, batch_size: SECTORS_PER_LOAD,
// Each node takes (8+8+32=)48 Bytes, so the default value is 1.5 GB memory size.
merkle_node_cache_capacity: 32 * 1024 * 1024,
shard_config: Default::default(), shard_config: Default::default(),
} }
} }
@ -123,7 +129,7 @@ impl FlowRead for FlowStore {
length -= 1; length -= 1;
} }
let entry_batch = try_option!(self.data_db.get_entry_batch(chunk_index)?); let entry_batch = try_option!(self.db.get_entry_batch(chunk_index)?);
let mut entry_batch_data = let mut entry_batch_data =
try_option!(entry_batch.get_unsealed_data(offset as usize, length as usize)); try_option!(entry_batch.get_unsealed_data(offset as usize, length as usize));
data.append(&mut entry_batch_data); data.append(&mut entry_batch_data);
@ -152,7 +158,7 @@ impl FlowRead for FlowStore {
let chunk_index = start_entry_index / self.config.batch_size as u64; let chunk_index = start_entry_index / self.config.batch_size as u64;
if let Some(mut data_list) = self if let Some(mut data_list) = self
.data_db .db
.get_entry_batch(chunk_index)? .get_entry_batch(chunk_index)?
.map(|b| b.into_data_list(start_entry_index)) .map(|b| b.into_data_list(start_entry_index))
{ {
@ -176,8 +182,13 @@ impl FlowRead for FlowStore {
Ok(entry_list) Ok(entry_list)
} }
/// Return the list of all stored chunk roots.
fn get_chunk_root_list(&self) -> Result<MerkleTreeInitialData<DataRoot>> {
self.db.get_batch_root_list()
}
fn load_sealed_data(&self, chunk_index: u64) -> Result<Option<MineLoadChunk>> { fn load_sealed_data(&self, chunk_index: u64) -> Result<Option<MineLoadChunk>> {
let batch = try_option!(self.data_db.get_entry_batch(chunk_index)?); let batch = try_option!(self.db.get_entry_batch(chunk_index)?);
let mut mine_chunk = MineLoadChunk::default(); let mut mine_chunk = MineLoadChunk::default();
for (seal_index, (sealed, validity)) in mine_chunk for (seal_index, (sealed, validity)) in mine_chunk
.loaded_chunk .loaded_chunk
@ -195,7 +206,7 @@ impl FlowRead for FlowStore {
fn get_num_entries(&self) -> Result<u64> { fn get_num_entries(&self) -> Result<u64> {
// This is an over-estimation as it assumes each batch is full. // This is an over-estimation as it assumes each batch is full.
self.data_db self.db
.kvdb .kvdb
.num_keys(COL_ENTRY_BATCH) .num_keys(COL_ENTRY_BATCH)
.map(|num_batches| num_batches * PORA_CHUNK_SIZE as u64) .map(|num_batches| num_batches * PORA_CHUNK_SIZE as u64)
@ -205,21 +216,12 @@ impl FlowRead for FlowStore {
fn get_shard_config(&self) -> ShardConfig { fn get_shard_config(&self) -> ShardConfig {
*self.config.shard_config.read() *self.config.shard_config.read()
} }
fn get_pad_data(&self, start_index: u64) -> crate::error::Result<Option<Vec<PadPair>>> {
self.flow_db.get_pad_data(start_index)
}
fn get_pad_data_sync_height(&self) -> Result<Option<u64>> {
self.data_db.get_pad_data_sync_height()
}
} }
impl FlowWrite for FlowStore { impl FlowWrite for FlowStore {
/// Return the roots of completed chunks. The order is guaranteed to be increasing /// Return the roots of completed chunks. The order is guaranteed to be increasing
/// by chunk index. /// by chunk index.
fn append_entries(&self, data: ChunkArray) -> Result<Vec<(u64, DataRoot)>> { fn append_entries(&self, data: ChunkArray) -> Result<Vec<(u64, DataRoot)>> {
let start_time = Instant::now();
let mut to_seal_set = self.seal_manager.to_seal_set.write(); let mut to_seal_set = self.seal_manager.to_seal_set.write();
trace!("append_entries: {} {}", data.start_index, data.data.len()); trace!("append_entries: {} {}", data.start_index, data.data.len());
if data.data.len() % BYTES_PER_SECTOR != 0 { if data.data.len() % BYTES_PER_SECTOR != 0 {
@ -244,7 +246,7 @@ impl FlowWrite for FlowStore {
// TODO: Try to avoid loading from db if possible. // TODO: Try to avoid loading from db if possible.
let mut batch = self let mut batch = self
.data_db .db
.get_entry_batch(chunk_index)? .get_entry_batch(chunk_index)?
.unwrap_or_else(|| EntryBatch::new(chunk_index)); .unwrap_or_else(|| EntryBatch::new(chunk_index));
let completed_seals = batch.insert_data( let completed_seals = batch.insert_data(
@ -262,14 +264,12 @@ impl FlowWrite for FlowStore {
batch_list.push((chunk_index, batch)); batch_list.push((chunk_index, batch));
} }
self.db.put_entry_batch_list(batch_list)
metrics::APPEND_ENTRIES.update_since(start_time);
self.data_db.put_entry_batch_list(batch_list)
} }
fn truncate(&self, start_index: u64) -> crate::error::Result<()> { fn truncate(&self, start_index: u64) -> crate::error::Result<()> {
let mut to_seal_set = self.seal_manager.to_seal_set.write(); let mut to_seal_set = self.seal_manager.to_seal_set.write();
let to_reseal = self.data_db.truncate(start_index, self.config.batch_size)?; let to_reseal = self.db.truncate(start_index, self.config.batch_size)?;
to_seal_set.split_off(&(start_index as usize / SECTORS_PER_SEAL)); to_seal_set.split_off(&(start_index as usize / SECTORS_PER_SEAL));
let new_seal_version = self.seal_manager.inc_seal_version(); let new_seal_version = self.seal_manager.inc_seal_version();
@ -283,14 +283,6 @@ impl FlowWrite for FlowStore {
fn update_shard_config(&self, shard_config: ShardConfig) { fn update_shard_config(&self, shard_config: ShardConfig) {
*self.config.shard_config.write() = shard_config; *self.config.shard_config.write() = shard_config;
} }
fn put_pad_data(&self, data_sizes: &[PadPair], tx_seq: u64) -> crate::error::Result<()> {
self.flow_db.put_pad_data(data_sizes, tx_seq)
}
fn put_pad_data_sync_height(&self, sync_index: u64) -> crate::error::Result<()> {
self.data_db.put_pad_data_sync_height(sync_index)
}
} }
impl FlowSeal for FlowStore { impl FlowSeal for FlowStore {
@ -307,7 +299,7 @@ impl FlowSeal for FlowStore {
let mut tasks = Vec::with_capacity(SEALS_PER_LOAD); let mut tasks = Vec::with_capacity(SEALS_PER_LOAD);
let batch_data = self let batch_data = self
.data_db .db
.get_entry_batch((first_index / SEALS_PER_LOAD) as u64)? .get_entry_batch((first_index / SEALS_PER_LOAD) as u64)?
.expect("Lost data chunk in to_seal_set"); .expect("Lost data chunk in to_seal_set");
@ -346,7 +338,7 @@ impl FlowSeal for FlowStore {
.chunk_by(|answer| answer.seal_index / SEALS_PER_LOAD as u64) .chunk_by(|answer| answer.seal_index / SEALS_PER_LOAD as u64)
{ {
let mut batch_chunk = self let mut batch_chunk = self
.data_db .db
.get_entry_batch(load_index)? .get_entry_batch(load_index)?
.expect("Can not find chunk data"); .expect("Can not find chunk data");
for answer in answers_in_chunk { for answer in answers_in_chunk {
@ -362,18 +354,12 @@ impl FlowSeal for FlowStore {
to_seal_set.remove(&idx); to_seal_set.remove(&idx);
} }
self.data_db.put_entry_raw(updated_chunk)?; self.db.put_entry_raw(updated_chunk)?;
Ok(()) Ok(())
} }
} }
#[derive(Debug, PartialEq, DeriveEncode, DeriveDecode)]
pub struct PadPair {
pub start_index: u64,
pub data_size: u64,
}
pub struct FlowDBStore { pub struct FlowDBStore {
kvdb: Arc<dyn ZgsKeyValueDB>, kvdb: Arc<dyn ZgsKeyValueDB>,
} }
@ -387,7 +373,6 @@ impl FlowDBStore {
&self, &self,
batch_list: Vec<(u64, EntryBatch)>, batch_list: Vec<(u64, EntryBatch)>,
) -> Result<Vec<(u64, DataRoot)>> { ) -> Result<Vec<(u64, DataRoot)>> {
let start_time = Instant::now();
let mut completed_batches = Vec::new(); let mut completed_batches = Vec::new();
let mut tx = self.kvdb.transaction(); let mut tx = self.kvdb.transaction();
for (batch_index, batch) in batch_list { for (batch_index, batch) in batch_list {
@ -398,11 +383,16 @@ impl FlowDBStore {
); );
if let Some(root) = batch.build_root(batch_index == 0)? { if let Some(root) = batch.build_root(batch_index == 0)? {
trace!("complete batch: index={}", batch_index); trace!("complete batch: index={}", batch_index);
tx.put(
COL_ENTRY_BATCH_ROOT,
// (batch_index, subtree_depth)
&encode_batch_root_key(batch_index as usize, 1),
root.as_bytes(),
);
completed_batches.push((batch_index, root)); completed_batches.push((batch_index, root));
} }
} }
self.kvdb.write(tx)?; self.kvdb.write(tx)?;
metrics::PUT_ENTRY_BATCH_LIST.update_since(start_time);
Ok(completed_batches) Ok(completed_batches)
} }
@ -424,6 +414,94 @@ impl FlowDBStore {
Ok(Some(EntryBatch::from_ssz_bytes(&raw).map_err(Error::from)?)) Ok(Some(EntryBatch::from_ssz_bytes(&raw).map_err(Error::from)?))
} }
fn put_batch_root_list(&self, root_map: BTreeMap<usize, (DataRoot, usize)>) -> Result<()> {
let mut tx = self.kvdb.transaction();
for (batch_index, (root, subtree_depth)) in root_map {
tx.put(
COL_ENTRY_BATCH_ROOT,
&encode_batch_root_key(batch_index, subtree_depth),
root.as_bytes(),
);
}
Ok(self.kvdb.write(tx)?)
}
fn get_batch_root_list(&self) -> Result<MerkleTreeInitialData<DataRoot>> {
let mut range_root = None;
// A list of `BatchRoot` that can reconstruct the whole merkle tree structure.
let mut root_list = Vec::new();
// A list of leaf `(index, root_hash)` in the subtrees of some nodes in `root_list`,
// and they will be updated in the merkle tree with `fill_leaf` by the caller.
let mut leaf_list = Vec::new();
let mut expected_index = 0;
let empty_data = vec![0; PORA_CHUNK_SIZE * ENTRY_SIZE];
let empty_root = *Merkle::new(data_to_merkle_leaves(&empty_data)?, 0, None).root();
for r in self.kvdb.iter(COL_ENTRY_BATCH_ROOT) {
let (index_bytes, root_bytes) = r?;
let (batch_index, subtree_depth) = decode_batch_root_key(index_bytes.as_ref())?;
let root = DataRoot::from_slice(root_bytes.as_ref());
debug!(
"load root depth={}, index expected={} get={} root={:?}",
subtree_depth, expected_index, batch_index, root,
);
if subtree_depth == 1 {
if range_root.is_none() {
// This is expected to be the next leaf.
if batch_index == expected_index {
root_list.push((1, root));
expected_index += 1;
} else {
bail!(
"unexpected chunk leaf, expected={}, get={}",
expected_index,
batch_index
);
}
} else {
match batch_index.cmp(&expected_index) {
Ordering::Less => {
// This leaf is within a subtree whose root is known.
leaf_list.push((batch_index, root));
}
Ordering::Equal => {
// A subtree range ends.
range_root = None;
root_list.push((1, root));
expected_index += 1;
}
Ordering::Greater => {
while batch_index > expected_index {
// Fill the gap with empty leaves.
root_list.push((1, empty_root));
expected_index += 1;
}
range_root = None;
root_list.push((1, root));
expected_index += 1;
}
}
}
} else {
while batch_index > expected_index {
// Fill the gap with empty leaves.
root_list.push((1, empty_root));
expected_index += 1;
}
range_root = Some(BatchRoot::Multiple((subtree_depth, root)));
root_list.push((subtree_depth, root));
expected_index += 1 << (subtree_depth - 1);
}
}
let extra_node_list = self.get_mpt_node_list()?;
Ok(MerkleTreeInitialData {
subtree_list: root_list,
known_leaves: leaf_list,
extra_mpt_nodes: extra_node_list,
})
}
fn truncate(&self, start_index: u64, batch_size: usize) -> crate::error::Result<Vec<usize>> { fn truncate(&self, start_index: u64, batch_size: usize) -> crate::error::Result<Vec<usize>> {
let mut tx = self.kvdb.transaction(); let mut tx = self.kvdb.transaction();
let mut start_batch_index = start_index / batch_size as u64; let mut start_batch_index = start_index / batch_size as u64;
@ -464,11 +542,38 @@ impl FlowDBStore {
}; };
for batch_index in start_batch_index as usize..=end { for batch_index in start_batch_index as usize..=end {
tx.delete(COL_ENTRY_BATCH, &batch_index.to_be_bytes()); tx.delete(COL_ENTRY_BATCH, &batch_index.to_be_bytes());
tx.delete_prefix(COL_ENTRY_BATCH_ROOT, &batch_index.to_be_bytes());
} }
self.kvdb.write(tx)?; self.kvdb.write(tx)?;
Ok(index_to_reseal) Ok(index_to_reseal)
} }
fn put_mpt_node_list(&self, mpt_node_list: Vec<(usize, usize, DataRoot)>) -> Result<()> {
let mut tx = self.kvdb.transaction();
for (layer_index, position, data) in mpt_node_list {
tx.put(
COL_FLOW_MPT_NODES,
&encode_mpt_node_key(layer_index, position),
data.as_bytes(),
);
}
Ok(self.kvdb.write(tx)?)
}
fn get_mpt_node_list(&self) -> Result<Vec<(usize, usize, DataRoot)>> {
let mut node_list = Vec::new();
for r in self.kvdb.iter(COL_FLOW_MPT_NODES) {
let (index_bytes, node_bytes) = r?;
let (layer_index, position) = decode_mpt_node_key(index_bytes.as_ref())?;
node_list.push((
layer_index,
position,
DataRoot::from_slice(node_bytes.as_ref()),
));
}
Ok(node_list)
}
fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> { fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> {
let mut tx = self.kvdb.transaction(); let mut tx = self.kvdb.transaction();
for i in batch_list { for i in batch_list {
@ -477,46 +582,14 @@ impl FlowDBStore {
Ok(self.kvdb.write(tx)?) Ok(self.kvdb.write(tx)?)
} }
fn put_pad_data(&self, data_sizes: &[PadPair], tx_seq: u64) -> Result<()> { fn get_batch_root(&self, batch_index: u64) -> Result<Option<DataRoot>> {
let mut tx = self.kvdb.transaction(); Ok(self
.kvdb
let mut buffer = Vec::new(); .get(
for item in data_sizes { COL_ENTRY_BATCH_ROOT,
buffer.extend(item.as_ssz_bytes()); &encode_batch_root_key(batch_index as usize, 1),
} )?
.map(|v| DataRoot::from_slice(&v)))
tx.put(COL_PAD_DATA_LIST, &tx_seq.to_be_bytes(), &buffer);
self.kvdb.write(tx)?;
Ok(())
}
fn put_pad_data_sync_height(&self, tx_seq: u64) -> Result<()> {
let mut tx = self.kvdb.transaction();
tx.put(
COL_PAD_DATA_SYNC_HEIGH,
b"sync_height",
&tx_seq.to_be_bytes(),
);
self.kvdb.write(tx)?;
Ok(())
}
fn get_pad_data_sync_height(&self) -> Result<Option<u64>> {
match self.kvdb.get(COL_PAD_DATA_SYNC_HEIGH, b"sync_height")? {
Some(v) => Ok(Some(u64::from_be_bytes(
v.try_into().map_err(|e| anyhow!("{:?}", e))?,
))),
None => Ok(None),
}
}
fn get_pad_data(&self, tx_seq: u64) -> Result<Option<Vec<PadPair>>> {
match self.kvdb.get(COL_PAD_DATA_LIST, &tx_seq.to_be_bytes())? {
Some(v) => Ok(Some(
Vec::<PadPair>::from_ssz_bytes(&v).map_err(Error::from)?,
)),
None => Ok(None),
}
} }
} }
@ -563,89 +636,33 @@ fn decode_batch_index(data: &[u8]) -> Result<usize> {
try_decode_usize(data) try_decode_usize(data)
} }
/// For the same batch_index, we want to process the larger subtree_depth first in iteration.
fn encode_batch_root_key(batch_index: usize, subtree_depth: usize) -> Vec<u8> {
let mut key = batch_index.to_be_bytes().to_vec();
key.extend_from_slice(&(usize::MAX - subtree_depth).to_be_bytes());
key
}
fn decode_batch_root_key(data: &[u8]) -> Result<(usize, usize)> {
if data.len() != mem::size_of::<usize>() * 2 {
bail!("invalid data length");
}
let batch_index = try_decode_usize(&data[..mem::size_of::<u64>()])?;
let subtree_depth = usize::MAX - try_decode_usize(&data[mem::size_of::<u64>()..])?;
Ok((batch_index, subtree_depth))
}
fn encode_mpt_node_key(layer_index: usize, position: usize) -> Vec<u8> { fn encode_mpt_node_key(layer_index: usize, position: usize) -> Vec<u8> {
let mut key = layer_index.to_be_bytes().to_vec(); let mut key = layer_index.to_be_bytes().to_vec();
key.extend_from_slice(&position.to_be_bytes()); key.extend_from_slice(&position.to_be_bytes());
key key
} }
fn layer_size_key(layer: usize) -> Vec<u8> { fn decode_mpt_node_key(data: &[u8]) -> Result<(usize, usize)> {
let mut key = "layer_size".as_bytes().to_vec(); if data.len() != mem::size_of::<usize>() * 2 {
key.extend_from_slice(&layer.to_be_bytes()); bail!("invalid data length");
key
}
pub struct NodeDBTransaction(DBTransaction);
impl NodeDatabase<DataRoot> for FlowDBStore {
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<DataRoot>> {
Ok(self
.kvdb
.get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))?
.map(|v| DataRoot::from_slice(&v)))
}
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
match self.kvdb.get(COL_FLOW_MPT_NODES, &layer_size_key(layer))? {
Some(v) => Ok(Some(try_decode_usize(&v)?)),
None => Ok(None),
}
}
fn start_transaction(&self) -> Box<dyn NodeTransaction<DataRoot>> {
Box::new(NodeDBTransaction(self.kvdb.transaction()))
}
fn commit(&self, tx: Box<dyn NodeTransaction<DataRoot>>) -> Result<()> {
let db_tx: Box<NodeDBTransaction> = tx
.into_any()
.downcast()
.map_err(|e| anyhow!("downcast failed, e={:?}", e))?;
self.kvdb.write(db_tx.0).map_err(Into::into)
}
}
impl NodeTransaction<DataRoot> for NodeDBTransaction {
fn save_node(&mut self, layer: usize, pos: usize, node: &DataRoot) {
self.0.put(
COL_FLOW_MPT_NODES,
&encode_mpt_node_key(layer, pos),
node.as_bytes(),
);
}
fn save_node_list(&mut self, nodes: &[(usize, usize, &DataRoot)]) {
for (layer_index, position, data) in nodes {
self.0.put(
COL_FLOW_MPT_NODES,
&encode_mpt_node_key(*layer_index, *position),
data.as_bytes(),
);
}
}
fn remove_node_list(&mut self, nodes: &[(usize, usize)]) {
for (layer_index, position) in nodes {
self.0.delete(
COL_FLOW_MPT_NODES,
&encode_mpt_node_key(*layer_index, *position),
);
}
}
fn save_layer_size(&mut self, layer: usize, size: usize) {
self.0.put(
COL_FLOW_MPT_NODES,
&layer_size_key(layer),
&size.to_be_bytes(),
);
}
fn remove_layer_size(&mut self, layer: usize) {
self.0.delete(COL_FLOW_MPT_NODES, &layer_size_key(layer));
}
fn into_any(self: Box<Self>) -> Box<dyn Any> {
self
} }
let layer_index = try_decode_usize(&data[..mem::size_of::<u64>()])?;
let position = try_decode_usize(&data[mem::size_of::<u64>()..])?;
Ok((layer_index, position))
} }

View File

@ -128,12 +128,6 @@ impl DataRange for Subtree {
} }
} }
impl Default for EntryBatchData {
fn default() -> Self {
Self::new()
}
}
impl EntryBatchData { impl EntryBatchData {
pub fn new() -> Self { pub fn new() -> Self {
EntryBatchData::Incomplete(IncompleteData { EntryBatchData::Incomplete(IncompleteData {

View File

@ -4,10 +4,11 @@ mod seal;
mod serde; mod serde;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use std::cmp::min;
use anyhow::Result; use anyhow::Result;
use ethereum_types::H256; use ethereum_types::H256;
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use std::cmp::min;
use crate::log_store::log_manager::data_to_merkle_leaves; use crate::log_store::log_manager::data_to_merkle_leaves;
use crate::try_option; use crate::try_option;
@ -20,7 +21,7 @@ use zgs_spec::{
}; };
use super::SealAnswer; use super::SealAnswer;
pub use chunk_data::EntryBatchData; use chunk_data::EntryBatchData;
use seal::SealInfo; use seal::SealInfo;
#[derive(Debug, Encode, Decode, Deserialize, Serialize)] #[derive(Debug, Encode, Decode, Deserialize, Serialize)]
@ -205,7 +206,7 @@ impl EntryBatch {
} }
} }
Ok(Some( Ok(Some(
try_option!(self.to_merkle_tree(is_first_chunk)?).root(), *try_option!(self.to_merkle_tree(is_first_chunk)?).root(),
)) ))
} }

View File

@ -1,11 +1,8 @@
use crate::config::ShardConfig; use crate::config::ShardConfig;
use crate::log_store::flow_store::{ use crate::log_store::flow_store::{batch_iter_sharded, FlowConfig, FlowStore};
batch_iter_sharded, FlowConfig, FlowDBStore, FlowStore, PadPair, use crate::log_store::tx_store::TransactionStore;
};
use crate::log_store::tx_store::{BlockHashAndSubmissionIndex, TransactionStore, TxStatus};
use crate::log_store::{ use crate::log_store::{
FlowRead, FlowSeal, FlowWrite, LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, FlowRead, FlowWrite, LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite,
LogStoreWrite, MineLoadChunk, SealAnswer, SealTask,
}; };
use crate::{try_option, ZgsKeyValueDB}; use crate::{try_option, ZgsKeyValueDB};
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
@ -14,7 +11,6 @@ use ethereum_types::H256;
use kvdb_rocksdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use merkle_light::merkle::{log2_pow2, MerkleTree}; use merkle_light::merkle::{log2_pow2, MerkleTree};
use merkle_tree::RawLeafSha3Algorithm; use merkle_tree::RawLeafSha3Algorithm;
use once_cell::sync::Lazy;
use parking_lot::RwLock; use parking_lot::RwLock;
use rayon::iter::ParallelIterator; use rayon::iter::ParallelIterator;
use rayon::prelude::ParallelSlice; use rayon::prelude::ParallelSlice;
@ -23,57 +19,46 @@ use shared_types::{
ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof, Merkle, Transaction, ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof, Merkle, Transaction,
}; };
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::path::Path; use std::path::Path;
use std::sync::mpsc;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant};
use tracing::{debug, error, info, instrument, trace, warn}; use tracing::{debug, error, info, instrument, trace, warn};
use crate::log_store::metrics; use super::tx_store::BlockHashAndSubmissionIndex;
use super::{FlowSeal, MineLoadChunk, SealAnswer, SealTask};
/// 256 Bytes /// 256 Bytes
pub const ENTRY_SIZE: usize = 256; pub const ENTRY_SIZE: usize = 256;
/// 1024 Entries. /// 1024 Entries.
pub const PORA_CHUNK_SIZE: usize = 1024; pub const PORA_CHUNK_SIZE: usize = 1024;
pub const COL_TX: u32 = 0; // flow db pub const COL_TX: u32 = 0;
pub const COL_ENTRY_BATCH: u32 = 1; // data db pub const COL_ENTRY_BATCH: u32 = 1;
pub const COL_TX_DATA_ROOT_INDEX: u32 = 2; // flow db pub const COL_TX_DATA_ROOT_INDEX: u32 = 2;
pub const COL_TX_COMPLETED: u32 = 3; // data db pub const COL_ENTRY_BATCH_ROOT: u32 = 3;
pub const COL_MISC: u32 = 4; // flow db & data db pub const COL_TX_COMPLETED: u32 = 4;
pub const COL_FLOW_MPT_NODES: u32 = 5; // flow db pub const COL_MISC: u32 = 5;
pub const COL_BLOCK_PROGRESS: u32 = 6; // flow db pub const COL_SEAL_CONTEXT: u32 = 6;
pub const COL_PAD_DATA_LIST: u32 = 7; // flow db pub const COL_FLOW_MPT_NODES: u32 = 7;
pub const COL_PAD_DATA_SYNC_HEIGH: u32 = 8; // data db pub const COL_BLOCK_PROGRESS: u32 = 8;
pub const COL_NUM: u32 = 9; pub const COL_NUM: u32 = 9;
pub const DATA_DB_KEY: &str = "data_db";
pub const FLOW_DB_KEY: &str = "flow_db";
const PAD_DELAY: Duration = Duration::from_secs(2);
// Process at most 1M entries (256MB) pad data at a time. // Process at most 1M entries (256MB) pad data at a time.
const PAD_MAX_SIZE: usize = 1 << 20; const PAD_MAX_SIZE: usize = 1 << 20;
static PAD_SEGMENT_ROOT: Lazy<H256> = Lazy::new(|| {
Merkle::new(
data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap(),
0,
None,
)
.root()
});
pub struct UpdateFlowMessage { pub struct UpdateFlowMessage {
pub root_map: BTreeMap<usize, (H256, usize)>,
pub pad_data: usize, pub pad_data: usize,
pub tx_start_flow_index: u64, pub tx_start_flow_index: u64,
} }
pub struct LogManager { pub struct LogManager {
pub(crate) flow_db: Arc<dyn ZgsKeyValueDB>, pub(crate) db: Arc<dyn ZgsKeyValueDB>,
pub(crate) data_db: Arc<dyn ZgsKeyValueDB>,
tx_store: TransactionStore, tx_store: TransactionStore,
flow_store: Arc<FlowStore>, flow_store: Arc<FlowStore>,
merkle: RwLock<MerkleManager>, merkle: RwLock<MerkleManager>,
sender: mpsc::Sender<UpdateFlowMessage>,
} }
struct MerkleManager { struct MerkleManager {
@ -109,7 +94,6 @@ impl MerkleManager {
} }
fn revert_merkle_tree(&mut self, tx_seq: u64, tx_store: &TransactionStore) -> Result<()> { fn revert_merkle_tree(&mut self, tx_seq: u64, tx_store: &TransactionStore) -> Result<()> {
debug!("revert merkle tree {}", tx_seq);
// Special case for reverting tx_seq == 0 // Special case for reverting tx_seq == 0
if tx_seq == u64::MAX { if tx_seq == u64::MAX {
self.pora_chunks_merkle.reset(); self.pora_chunks_merkle.reset();
@ -132,7 +116,7 @@ impl MerkleManager {
if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 { if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 {
self.last_chunk_merkle.append(H256::zero()); self.last_chunk_merkle.append(H256::zero());
self.pora_chunks_merkle self.pora_chunks_merkle
.update_last(self.last_chunk_merkle.root()); .update_last(*self.last_chunk_merkle.root());
} else if self.last_chunk_merkle.leaves() != 0 { } else if self.last_chunk_merkle.leaves() != 0 {
let last_chunk_start_index = self.last_chunk_start_index(); let last_chunk_start_index = self.last_chunk_start_index();
let last_chunk_data = flow_store.get_available_entries( let last_chunk_data = flow_store.get_available_entries(
@ -196,7 +180,6 @@ impl LogStoreChunkWrite for LogManager {
chunks: ChunkArray, chunks: ChunkArray,
maybe_file_proof: Option<FlowProof>, maybe_file_proof: Option<FlowProof>,
) -> Result<bool> { ) -> Result<bool> {
let start_time = Instant::now();
let mut merkle = self.merkle.write(); let mut merkle = self.merkle.write();
let tx = self let tx = self
.tx_store .tx_store
@ -222,13 +205,13 @@ impl LogStoreChunkWrite for LogManager {
self.append_entries(flow_entry_array, &mut merkle)?; self.append_entries(flow_entry_array, &mut merkle)?;
if let Some(file_proof) = maybe_file_proof { if let Some(file_proof) = maybe_file_proof {
merkle.pora_chunks_merkle.fill_with_file_proof( let updated_node_list = merkle.pora_chunks_merkle.fill_with_file_proof(
file_proof, file_proof,
tx.merkle_nodes, tx.merkle_nodes,
tx.start_entry_index, tx.start_entry_index,
)?; )?;
self.flow_store.put_mpt_node_list(updated_node_list)?;
} }
metrics::PUT_CHUNKS.update_since(start_time);
Ok(true) Ok(true)
} }
@ -259,7 +242,6 @@ impl LogStoreWrite for LogManager {
/// `put_tx` for the last tx when we restart the node to ensure that it succeeds. /// `put_tx` for the last tx when we restart the node to ensure that it succeeds.
/// ///
fn put_tx(&self, tx: Transaction) -> Result<()> { fn put_tx(&self, tx: Transaction) -> Result<()> {
let start_time = Instant::now();
let mut merkle = self.merkle.write(); let mut merkle = self.merkle.write();
debug!("put_tx: tx={:?}", tx); debug!("put_tx: tx={:?}", tx);
let expected_seq = self.tx_store.next_tx_seq(); let expected_seq = self.tx_store.next_tx_seq();
@ -275,12 +257,7 @@ impl LogStoreWrite for LogManager {
} }
let maybe_same_data_tx_seq = self.tx_store.put_tx(tx.clone())?.first().cloned(); let maybe_same_data_tx_seq = self.tx_store.put_tx(tx.clone())?.first().cloned();
// TODO(zz): Should we validate received tx? // TODO(zz): Should we validate received tx?
self.append_subtree_list( self.append_subtree_list(tx.start_entry_index, tx.merkle_nodes.clone(), &mut merkle)?;
tx.seq,
tx.start_entry_index,
tx.merkle_nodes.clone(),
&mut merkle,
)?;
merkle.commit_merkle(tx.seq)?; merkle.commit_merkle(tx.seq)?;
debug!( debug!(
"commit flow root: root={:?}", "commit flow root: root={:?}",
@ -294,7 +271,6 @@ impl LogStoreWrite for LogManager {
self.copy_tx_and_finalize(old_tx_seq, vec![tx.seq])?; self.copy_tx_and_finalize(old_tx_seq, vec![tx.seq])?;
} }
} }
metrics::PUT_TX.update_since(start_time);
Ok(()) Ok(())
} }
@ -325,7 +301,6 @@ impl LogStoreWrite for LogManager {
} }
fn finalize_tx_with_hash(&self, tx_seq: u64, tx_hash: H256) -> crate::error::Result<bool> { fn finalize_tx_with_hash(&self, tx_seq: u64, tx_hash: H256) -> crate::error::Result<bool> {
let start_time = Instant::now();
trace!( trace!(
"finalize_tx_with_hash: tx_seq={} tx_hash={:?}", "finalize_tx_with_hash: tx_seq={} tx_hash={:?}",
tx_seq, tx_seq,
@ -354,7 +329,6 @@ impl LogStoreWrite for LogManager {
if same_root_seq_list.first() == Some(&tx_seq) { if same_root_seq_list.first() == Some(&tx_seq) {
self.copy_tx_and_finalize(tx_seq, same_root_seq_list[1..].to_vec())?; self.copy_tx_and_finalize(tx_seq, same_root_seq_list[1..].to_vec())?;
} }
metrics::FINALIZE_TX_WITH_HASH.update_since(start_time);
Ok(true) Ok(true)
} else { } else {
bail!("finalize tx hash with data missing: tx_seq={}", tx_seq) bail!("finalize tx hash with data missing: tx_seq={}", tx_seq)
@ -381,7 +355,7 @@ impl LogStoreWrite for LogManager {
merkle.revert_merkle_tree(tx_seq, &self.tx_store)?; merkle.revert_merkle_tree(tx_seq, &self.tx_store)?;
merkle.try_initialize(&self.flow_store)?; merkle.try_initialize(&self.flow_store)?;
assert_eq!( assert_eq!(
Some(merkle.last_chunk_merkle.root()), Some(*merkle.last_chunk_merkle.root()),
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.leaf_at(merkle.pora_chunks_merkle.leaves() - 1)? .leaf_at(merkle.pora_chunks_merkle.leaves() - 1)?
@ -402,9 +376,10 @@ impl LogStoreWrite for LogManager {
// `merkle` is used in `validate_range_proof`. // `merkle` is used in `validate_range_proof`.
let mut merkle = self.merkle.write(); let mut merkle = self.merkle.write();
if valid { if valid {
merkle let updated_nodes = merkle
.pora_chunks_merkle .pora_chunks_merkle
.fill_with_range_proof(data.proof.clone())?; .fill_with_range_proof(data.proof.clone())?;
self.flow_store.put_mpt_node_list(updated_nodes)?;
} }
Ok(valid) Ok(valid)
} }
@ -420,42 +395,6 @@ impl LogStoreWrite for LogManager {
fn submit_seal_result(&self, answers: Vec<SealAnswer>) -> Result<()> { fn submit_seal_result(&self, answers: Vec<SealAnswer>) -> Result<()> {
self.flow_store.submit_seal_result(answers) self.flow_store.submit_seal_result(answers)
} }
fn start_padding(&self, executor: &task_executor::TaskExecutor) {
let store = self.flow_store.clone();
executor.spawn(
async move {
let current_height = store.get_pad_data_sync_height().unwrap();
let mut start_index = current_height.unwrap_or(0);
loop {
match store.get_pad_data(start_index) {
std::result::Result::Ok(data) => {
// Update the flow database.
// This should be called before `complete_last_chunk_merkle` so that we do not save
// subtrees with data known.
if let Some(data) = data {
for pad in data {
store
.append_entries(ChunkArray {
data: vec![0; pad.data_size as usize],
start_index: pad.start_index,
})
.unwrap();
}
};
store.put_pad_data_sync_height(start_index).unwrap();
start_index += 1;
}
std::result::Result::Err(_) => {
debug!("Unable to get pad data, start_index={}", start_index);
tokio::time::sleep(PAD_DELAY).await;
}
};
}
},
"pad_tx",
);
}
} }
impl LogStoreChunkRead for LogManager { impl LogStoreChunkRead for LogManager {
@ -537,15 +476,7 @@ impl LogStoreRead for LogManager {
} }
fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> crate::error::Result<Option<u64>> { fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> crate::error::Result<Option<u64>> {
let seq_list = self.tx_store.get_tx_seq_list_by_data_root(data_root)?; self.tx_store.get_first_tx_seq_by_data_root(data_root)
for tx_seq in &seq_list {
if self.tx_store.check_tx_completed(*tx_seq)? {
// Return the first finalized tx if possible.
return Ok(Some(*tx_seq));
}
}
// No tx is finalized, return the first one.
Ok(seq_list.first().cloned())
} }
fn get_chunk_with_proof_by_tx_and_index( fn get_chunk_with_proof_by_tx_and_index(
@ -589,10 +520,6 @@ impl LogStoreRead for LogManager {
})) }))
} }
fn get_tx_status(&self, tx_seq: u64) -> Result<Option<TxStatus>> {
self.tx_store.get_tx_status(tx_seq)
}
fn check_tx_completed(&self, tx_seq: u64) -> crate::error::Result<bool> { fn check_tx_completed(&self, tx_seq: u64) -> crate::error::Result<bool> {
self.tx_store.check_tx_completed(tx_seq) self.tx_store.check_tx_completed(tx_seq)
} }
@ -650,7 +577,7 @@ impl LogStoreRead for LogManager {
fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> { fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
Ok(( Ok((
merkle.pora_chunks_merkle.root(), *merkle.pora_chunks_merkle.root(),
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64, merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64,
)) ))
} }
@ -679,37 +606,33 @@ impl LogStoreRead for LogManager {
impl LogManager { impl LogManager {
pub fn rocksdb( pub fn rocksdb(
config: LogConfig, config: LogConfig,
flow_path: impl AsRef<Path>, path: impl AsRef<Path>,
data_path: impl AsRef<Path>, executor: task_executor::TaskExecutor,
) -> Result<Self> { ) -> Result<Self> {
let mut db_config = DatabaseConfig::with_columns(COL_NUM); let mut db_config = DatabaseConfig::with_columns(COL_NUM);
db_config.enable_statistics = true; db_config.enable_statistics = true;
let flow_db_source = Arc::new(Database::open(&db_config, flow_path)?); let db = Arc::new(Database::open(&db_config, path)?);
let data_db_source = Arc::new(Database::open(&db_config, data_path)?); Self::new(db, config, executor)
Self::new(flow_db_source, data_db_source, config)
} }
pub fn memorydb(config: LogConfig) -> Result<Self> { pub fn memorydb(config: LogConfig, executor: task_executor::TaskExecutor) -> Result<Self> {
let flow_db = Arc::new(kvdb_memorydb::create(COL_NUM)); let db = Arc::new(kvdb_memorydb::create(COL_NUM));
let data_db = Arc::new(kvdb_memorydb::create(COL_NUM)); Self::new(db, config, executor)
Self::new(flow_db, data_db, config)
} }
fn new( fn new(
flow_db_source: Arc<dyn ZgsKeyValueDB>, db: Arc<dyn ZgsKeyValueDB>,
data_db_source: Arc<dyn ZgsKeyValueDB>,
config: LogConfig, config: LogConfig,
executor: task_executor::TaskExecutor,
) -> Result<Self> { ) -> Result<Self> {
let tx_store = TransactionStore::new(flow_db_source.clone(), data_db_source.clone())?; let tx_store = TransactionStore::new(db.clone())?;
let flow_db = Arc::new(FlowDBStore::new(flow_db_source.clone())); let flow_store = Arc::new(FlowStore::new(db.clone(), config.flow));
let data_db = Arc::new(FlowDBStore::new(data_db_source.clone())); let mut initial_data = flow_store.get_chunk_root_list()?;
let flow_store = Arc::new(FlowStore::new( // If the last tx `put_tx` does not complete, we will revert it in `initial_data.subtree_list`
flow_db.clone(), // first and call `put_tx` later. The known leaves in its data will be saved in `extra_leaves`
data_db.clone(), // and inserted later.
config.flow.clone(), let mut extra_leaves = Vec::new();
));
// If the last tx `put_tx` does not complete, we will revert it in `pora_chunks_merkle`
// first and call `put_tx` later.
let next_tx_seq = tx_store.next_tx_seq(); let next_tx_seq = tx_store.next_tx_seq();
let mut start_tx_seq = if next_tx_seq > 0 { let mut start_tx_seq = if next_tx_seq > 0 {
Some(next_tx_seq - 1) Some(next_tx_seq - 1)
@ -717,25 +640,15 @@ impl LogManager {
None None
}; };
let mut last_tx_to_insert = None; let mut last_tx_to_insert = None;
let mut pora_chunks_merkle = Merkle::new_with_subtrees(
flow_db,
config.flow.merkle_node_cache_capacity,
log2_pow2(PORA_CHUNK_SIZE),
)?;
if let Some(last_tx_seq) = start_tx_seq { if let Some(last_tx_seq) = start_tx_seq {
if !tx_store.check_tx_completed(last_tx_seq)? { if !tx_store.check_tx_completed(last_tx_seq)? {
// Last tx not finalized, we need to check if its `put_tx` is completed. // Last tx not finalized, we need to check if its `put_tx` is completed.
let last_tx = tx_store let last_tx = tx_store
.get_tx_by_seq_number(last_tx_seq)? .get_tx_by_seq_number(last_tx_seq)?
.expect("tx missing"); .expect("tx missing");
let current_len = pora_chunks_merkle.leaves(); let mut current_len = initial_data.leaves();
let expected_len = sector_to_segment( let expected_len =
last_tx.start_entry_index sector_to_segment(last_tx.start_entry_index + last_tx.num_entries() as u64);
+ last_tx.num_entries() as u64
+ PORA_CHUNK_SIZE as u64
- 1,
);
match expected_len.cmp(&(current_len)) { match expected_len.cmp(&(current_len)) {
Ordering::Less => { Ordering::Less => {
bail!( bail!(
@ -763,33 +676,43 @@ impl LogManager {
previous_tx.start_entry_index + previous_tx.num_entries() as u64, previous_tx.start_entry_index + previous_tx.num_entries() as u64,
); );
if current_len > expected_len { if current_len > expected_len {
pora_chunks_merkle.revert_to_leaves(expected_len)?; while let Some((subtree_depth, _)) = initial_data.subtree_list.pop()
} else { {
assert_eq!(current_len, expected_len); current_len -= 1 << (subtree_depth - 1);
if current_len == expected_len {
break;
} }
start_tx_seq = Some(previous_tx.seq); }
} else {
warn!(
"revert last tx with no-op: {} {}",
current_len, expected_len
);
}
assert_eq!(current_len, expected_len);
while let Some((index, h)) = initial_data.known_leaves.pop() {
if index < current_len {
initial_data.known_leaves.push((index, h));
break;
} else {
extra_leaves.push((index, h));
}
}
start_tx_seq = Some(last_tx_seq - 1);
}; };
} }
} }
} }
} }
let mut pora_chunks_merkle =
Merkle::new_with_subtrees(initial_data, log2_pow2(PORA_CHUNK_SIZE), start_tx_seq)?;
let last_chunk_merkle = match start_tx_seq { let last_chunk_merkle = match start_tx_seq {
Some(tx_seq) => { Some(tx_seq) => {
let tx = tx_store.get_tx_by_seq_number(tx_seq)?.expect("tx missing"); tx_store.rebuild_last_chunk_merkle(pora_chunks_merkle.leaves(), tx_seq)?
if (tx.start_entry_index() + tx.num_entries() as u64) % PORA_CHUNK_SIZE as u64 == 0
{
// The last chunk should be aligned, so it's empty.
Merkle::new_with_depth(vec![], log2_pow2(PORA_CHUNK_SIZE) + 1, None)
} else {
tx_store.rebuild_last_chunk_merkle(pora_chunks_merkle.leaves() - 1, tx_seq)?
}
} }
// Initialize // Initialize
None => { None => Merkle::new_with_depth(vec![], log2_pow2(PORA_CHUNK_SIZE) + 1, None),
pora_chunks_merkle.reset();
Merkle::new_with_depth(vec![], 1, None)
}
}; };
debug!( debug!(
@ -799,37 +722,83 @@ impl LogManager {
last_chunk_merkle.leaves(), last_chunk_merkle.leaves(),
); );
if last_chunk_merkle.leaves() != 0 { if last_chunk_merkle.leaves() != 0 {
pora_chunks_merkle.update_last(last_chunk_merkle.root()); pora_chunks_merkle.append(*last_chunk_merkle.root());
}
// update the merkle root // update the merkle root
pora_chunks_merkle.commit(start_tx_seq); pora_chunks_merkle.commit(start_tx_seq);
}
let merkle = RwLock::new(MerkleManager { let merkle = RwLock::new(MerkleManager {
pora_chunks_merkle, pora_chunks_merkle,
last_chunk_merkle, last_chunk_merkle,
}); });
let log_manager = Self { let (sender, receiver) = mpsc::channel();
flow_db: flow_db_source,
data_db: data_db_source, let mut log_manager = Self {
db,
tx_store, tx_store,
flow_store, flow_store,
merkle, merkle,
sender,
}; };
log_manager.start_receiver(receiver, executor);
if let Some(tx) = last_tx_to_insert { if let Some(tx) = last_tx_to_insert {
log_manager.revert_to(tx.seq - 1)?;
log_manager.put_tx(tx)?; log_manager.put_tx(tx)?;
let mut merkle = log_manager.merkle.write();
for (index, h) in extra_leaves {
if index < merkle.pora_chunks_merkle.leaves() {
merkle.pora_chunks_merkle.fill_leaf(index, h);
} else {
error!("out of range extra leaf: index={} hash={:?}", index, h);
}
}
} else {
assert!(extra_leaves.is_empty());
} }
log_manager log_manager
.merkle .merkle
.write() .write()
.try_initialize(&log_manager.flow_store)?; .try_initialize(&log_manager.flow_store)?;
info!(
"Log manager initialized, state={:?}",
log_manager.get_context()?
);
Ok(log_manager) Ok(log_manager)
} }
fn start_receiver(
&mut self,
rx: mpsc::Receiver<UpdateFlowMessage>,
executor: task_executor::TaskExecutor,
) {
let flow_store = self.flow_store.clone();
executor.spawn(
async move {
loop {
match rx.recv() {
std::result::Result::Ok(data) => {
// Update the root index.
flow_store.put_batch_root_list(data.root_map).unwrap();
// Update the flow database.
// This should be called before `complete_last_chunk_merkle` so that we do not save
// subtrees with data known.
flow_store
.append_entries(ChunkArray {
data: vec![0; data.pad_data],
start_index: data.tx_start_flow_index,
})
.unwrap();
}
std::result::Result::Err(_) => {
error!("Receiver error");
}
};
}
},
"pad_tx",
);
// Wait for the spawned thread to finish
// let _ = handle.join().expect("Thread panicked");
}
fn gen_proof(&self, flow_index: u64, maybe_root: Option<DataRoot>) -> Result<FlowProof> { fn gen_proof(&self, flow_index: u64, maybe_root: Option<DataRoot>) -> Result<FlowProof> {
match maybe_root { match maybe_root {
None => self.gen_proof_at_version(flow_index, None), None => self.gen_proof_at_version(flow_index, None),
@ -879,13 +848,26 @@ impl LogManager {
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?, .gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
} }
}; };
entry_proof(&top_proof, &sub_proof) let r = entry_proof(&top_proof, &sub_proof);
if r.is_err() {
let raw_batch = self.flow_store.get_raw_batch(seg_index as u64)?.unwrap();
let db_root = self.flow_store.get_batch_root(seg_index as u64)?;
error!(
?r,
?db_root,
?seg_index,
"gen proof error: top_leaves={}, last={}, raw_batch={}",
merkle.pora_chunks_merkle.leaves(),
merkle.last_chunk_merkle.leaves(),
serde_json::to_string(&raw_batch).unwrap(),
);
}
r
} }
#[instrument(skip(self, merkle))] #[instrument(skip(self, merkle))]
fn append_subtree_list( fn append_subtree_list(
&self, &self,
tx_seq: u64,
tx_start_index: u64, tx_start_index: u64,
merkle_list: Vec<(usize, DataRoot)>, merkle_list: Vec<(usize, DataRoot)>,
merkle: &mut MerkleManager, merkle: &mut MerkleManager,
@ -893,10 +875,10 @@ impl LogManager {
if merkle_list.is_empty() { if merkle_list.is_empty() {
return Ok(()); return Ok(());
} }
let start_time = Instant::now();
self.pad_tx(tx_seq, tx_start_index, &mut *merkle)?; self.pad_tx(tx_start_index, &mut *merkle)?;
let mut batch_root_map = BTreeMap::new();
for (subtree_depth, subtree_root) in merkle_list { for (subtree_depth, subtree_root) in merkle_list {
let subtree_size = 1 << (subtree_depth - 1); let subtree_size = 1 << (subtree_depth - 1);
if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE { if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE {
@ -907,13 +889,17 @@ impl LogManager {
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree. // `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.append_subtree(1, merkle.last_chunk_merkle.root())?; .append_subtree(1, *merkle.last_chunk_merkle.root())?;
} else { } else {
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(*merkle.last_chunk_merkle.root());
} }
if merkle.last_chunk_merkle.leaves() == PORA_CHUNK_SIZE { if merkle.last_chunk_merkle.leaves() == PORA_CHUNK_SIZE {
batch_root_map.insert(
merkle.pora_chunks_merkle.leaves() - 1,
(*merkle.last_chunk_merkle.root(), 1),
);
self.complete_last_chunk_merkle( self.complete_last_chunk_merkle(
merkle.pora_chunks_merkle.leaves() - 1, merkle.pora_chunks_merkle.leaves() - 1,
&mut *merkle, &mut *merkle,
@ -924,20 +910,22 @@ impl LogManager {
// the chunks boundary. // the chunks boundary.
assert_eq!(merkle.last_chunk_merkle.leaves(), 0); assert_eq!(merkle.last_chunk_merkle.leaves(), 0);
assert!(subtree_size >= PORA_CHUNK_SIZE); assert!(subtree_size >= PORA_CHUNK_SIZE);
batch_root_map.insert(
merkle.pora_chunks_merkle.leaves(),
(subtree_root, subtree_depth - log2_pow2(PORA_CHUNK_SIZE)),
);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.append_subtree(subtree_depth - log2_pow2(PORA_CHUNK_SIZE), subtree_root)?; .append_subtree(subtree_depth - log2_pow2(PORA_CHUNK_SIZE), subtree_root)?;
} }
} }
self.flow_store.put_batch_root_list(batch_root_map)?;
metrics::APPEND_SUBTREE_LIST.update_since(start_time);
Ok(()) Ok(())
} }
#[instrument(skip(self, merkle))] #[instrument(skip(self, merkle))]
fn pad_tx(&self, tx_seq: u64, tx_start_index: u64, merkle: &mut MerkleManager) -> Result<()> { fn pad_tx(&self, tx_start_index: u64, merkle: &mut MerkleManager) -> Result<()> {
// Check if we need to pad the flow. // Check if we need to pad the flow.
let start_time = Instant::now();
let mut tx_start_flow_index = let mut tx_start_flow_index =
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64; merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64;
let pad_size = tx_start_index - tx_start_flow_index; let pad_size = tx_start_index - tx_start_flow_index;
@ -946,10 +934,10 @@ impl LogManager {
merkle.pora_chunks_merkle.leaves(), merkle.pora_chunks_merkle.leaves(),
merkle.last_chunk_merkle.leaves() merkle.last_chunk_merkle.leaves()
); );
let mut pad_list = vec![];
if pad_size != 0 { if pad_size != 0 {
for pad_data in Self::padding(pad_size as usize) { for pad_data in Self::padding(pad_size as usize) {
let mut is_full_empty = true; let mut is_full_empty = true;
let mut root_map = BTreeMap::new();
// Update the in-memory merkle tree. // Update the in-memory merkle tree.
let last_chunk_pad = if merkle.last_chunk_merkle.leaves() == 0 { let last_chunk_pad = if merkle.last_chunk_merkle.leaves() == 0 {
@ -966,7 +954,7 @@ impl LogManager {
.append_list(data_to_merkle_leaves(&pad_data)?); .append_list(data_to_merkle_leaves(&pad_data)?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(*merkle.last_chunk_merkle.root());
} else { } else {
if last_chunk_pad != 0 { if last_chunk_pad != 0 {
is_full_empty = false; is_full_empty = false;
@ -976,14 +964,23 @@ impl LogManager {
.append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?); .append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(*merkle.last_chunk_merkle.root());
root_map.insert(
merkle.pora_chunks_merkle.leaves() - 1,
(*merkle.last_chunk_merkle.root(), 1),
);
completed_chunk_index = Some(merkle.pora_chunks_merkle.leaves() - 1); completed_chunk_index = Some(merkle.pora_chunks_merkle.leaves() - 1);
} }
// Pad with more complete chunks. // Pad with more complete chunks.
let mut start_index = last_chunk_pad / ENTRY_SIZE; let mut start_index = last_chunk_pad / ENTRY_SIZE;
while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE { while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE {
merkle.pora_chunks_merkle.append(*PAD_SEGMENT_ROOT); let data = pad_data[start_index * ENTRY_SIZE
..(start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE]
.to_vec();
let root = *Merkle::new(data_to_merkle_leaves(&data)?, 0, None).root();
merkle.pora_chunks_merkle.append(root);
root_map.insert(merkle.pora_chunks_merkle.leaves() - 1, (root, 1));
start_index += PORA_CHUNK_SIZE; start_index += PORA_CHUNK_SIZE;
} }
assert_eq!(pad_data.len(), start_index * ENTRY_SIZE); assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);
@ -991,11 +988,13 @@ impl LogManager {
let data_size = pad_data.len() / ENTRY_SIZE; let data_size = pad_data.len() / ENTRY_SIZE;
if is_full_empty { if is_full_empty {
pad_list.push(PadPair { self.sender.send(UpdateFlowMessage {
data_size: pad_data.len() as u64, root_map,
start_index: tx_start_flow_index, pad_data: pad_data.len(),
}); tx_start_flow_index,
})?;
} else { } else {
self.flow_store.put_batch_root_list(root_map).unwrap();
// Update the flow database. // Update the flow database.
// This should be called before `complete_last_chunk_merkle` so that we do not save // This should be called before `complete_last_chunk_merkle` so that we do not save
// subtrees with data known. // subtrees with data known.
@ -1016,10 +1015,6 @@ impl LogManager {
merkle.pora_chunks_merkle.leaves(), merkle.pora_chunks_merkle.leaves(),
merkle.last_chunk_merkle.leaves() merkle.last_chunk_merkle.leaves()
); );
self.flow_store.put_pad_data(&pad_list, tx_seq)?;
metrics::PAD_TX.update_since(start_time);
Ok(()) Ok(())
} }
@ -1061,7 +1056,7 @@ impl LogManager {
} }
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(*merkle.last_chunk_merkle.root());
} }
let chunk_roots = self.flow_store.append_entries(flow_entry_array)?; let chunk_roots = self.flow_store.append_entries(flow_entry_array)?;
for (chunk_index, chunk_root) in chunk_roots { for (chunk_index, chunk_root) in chunk_roots {
@ -1148,8 +1143,6 @@ impl LogManager {
} }
fn copy_tx_and_finalize(&self, from_tx_seq: u64, to_tx_seq_list: Vec<u64>) -> Result<()> { fn copy_tx_and_finalize(&self, from_tx_seq: u64, to_tx_seq_list: Vec<u64>) -> Result<()> {
let start_time = Instant::now();
let mut merkle = self.merkle.write(); let mut merkle = self.merkle.write();
let shard_config = self.flow_store.get_shard_config(); let shard_config = self.flow_store.get_shard_config();
// We have all the data need for this tx, so just copy them. // We have all the data need for this tx, so just copy them.
@ -1198,8 +1191,6 @@ impl LogManager {
for (seq, _) in to_tx_offset_list { for (seq, _) in to_tx_offset_list {
self.tx_store.finalize_tx(seq)?; self.tx_store.finalize_tx(seq)?;
} }
metrics::COPY_TX_AND_FINALIZE.update_since(start_time);
Ok(()) Ok(())
} }
@ -1272,7 +1263,6 @@ pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result<FileMerkleTree> {
} }
pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> { pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> {
let start_time = Instant::now();
if leaf_data.len() % ENTRY_SIZE != 0 { if leaf_data.len() % ENTRY_SIZE != 0 {
bail!("merkle_tree: mismatched data size"); bail!("merkle_tree: mismatched data size");
} }
@ -1288,9 +1278,6 @@ pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> {
.map(Sha3Algorithm::leaf) .map(Sha3Algorithm::leaf)
.collect() .collect()
}; };
metrics::DATA_TO_MERKLE_LEAVES_SIZE.update(leaf_data.len());
metrics::DATA_TO_MERKLE_LEAVES.update_since(start_time);
Ok(r) Ok(r)
} }

View File

@ -1,43 +0,0 @@
use std::sync::Arc;
use metrics::{register_timer, Gauge, GaugeUsize, Timer};
lazy_static::lazy_static! {
pub static ref PUT_TX: Arc<dyn Timer> = register_timer("log_store_put_tx");
pub static ref PUT_CHUNKS: Arc<dyn Timer> = register_timer("log_store_put_chunks");
pub static ref TX_STORE_PUT: Arc<dyn Timer> = register_timer("log_store_tx_store_put_tx");
pub static ref CHECK_TX_COMPLETED: Arc<dyn Timer> =
register_timer("log_store_log_manager_check_tx_completed");
pub static ref APPEND_SUBTREE_LIST: Arc<dyn Timer> =
register_timer("log_store_log_manager_append_subtree_list");
pub static ref DATA_TO_MERKLE_LEAVES: Arc<dyn Timer> =
register_timer("log_store_log_manager_data_to_merkle_leaves");
pub static ref COPY_TX_AND_FINALIZE: Arc<dyn Timer> =
register_timer("log_store_log_manager_copy_tx_and_finalize");
pub static ref PAD_TX: Arc<dyn Timer> = register_timer("log_store_log_manager_pad_tx");
pub static ref PUT_BATCH_ROOT_LIST: Arc<dyn Timer> = register_timer("log_store_flow_store_put_batch_root_list");
pub static ref INSERT_SUBTREE_LIST: Arc<dyn Timer> =
register_timer("log_store_flow_store_insert_subtree_list");
pub static ref PUT_MPT_NODE: Arc<dyn Timer> = register_timer("log_store_flow_store_put_mpt_node");
pub static ref PUT_ENTRY_BATCH_LIST: Arc<dyn Timer> =
register_timer("log_store_flow_store_put_entry_batch_list");
pub static ref APPEND_ENTRIES: Arc<dyn Timer> = register_timer("log_store_flow_store_append_entries");
pub static ref FINALIZE_TX_WITH_HASH: Arc<dyn Timer> = register_timer("log_store_log_manager_finalize_tx_with_hash");
pub static ref DATA_TO_MERKLE_LEAVES_SIZE: Arc<dyn Gauge<usize>> = GaugeUsize::register("log_store_data_to_merkle_leaves_size");
pub static ref TX_BY_SEQ_NUMBER: Arc<dyn Timer> = register_timer("log_store_tx_store_get_tx_by_seq_number");
}

View File

@ -1,7 +1,6 @@
use crate::config::ShardConfig; use crate::config::ShardConfig;
use append_merkle::MerkleTreeInitialData;
use ethereum_types::H256; use ethereum_types::H256;
use flow_store::PadPair;
use shared_types::{ use shared_types::{
Chunk, ChunkArray, ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof, Chunk, ChunkArray, ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof,
Transaction, Transaction,
@ -10,13 +9,12 @@ use zgs_spec::{BYTES_PER_SEAL, SEALS_PER_LOAD};
use crate::error::Result; use crate::error::Result;
use self::tx_store::{BlockHashAndSubmissionIndex, TxStatus}; use self::tx_store::BlockHashAndSubmissionIndex;
pub mod config; pub mod config;
mod flow_store; mod flow_store;
pub mod load_chunk; mod load_chunk;
pub mod log_manager; pub mod log_manager;
mod metrics;
mod seal_task_manager; mod seal_task_manager;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
@ -31,12 +29,8 @@ pub trait LogStoreRead: LogStoreChunkRead {
fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>>; fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>>;
/// Get a transaction by the data root of its data. /// Get a transaction by the data root of its data.
/// If all txs are not finalized, return the first one.
/// Otherwise, return the first finalized tx.
fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>>; fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>>;
/// If all txs are not finalized, return the first one.
/// Otherwise, return the first finalized tx.
fn get_tx_by_data_root(&self, data_root: &DataRoot) -> Result<Option<Transaction>> { fn get_tx_by_data_root(&self, data_root: &DataRoot) -> Result<Option<Transaction>> {
match self.get_tx_seq_by_data_root(data_root)? { match self.get_tx_seq_by_data_root(data_root)? {
Some(seq) => self.get_tx_by_seq_number(seq), Some(seq) => self.get_tx_by_seq_number(seq),
@ -62,8 +56,6 @@ pub trait LogStoreRead: LogStoreChunkRead {
fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool>; fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool>;
fn get_tx_status(&self, tx_seq: u64) -> Result<Option<TxStatus>>;
fn next_tx_seq(&self) -> u64; fn next_tx_seq(&self) -> u64;
fn get_sync_progress(&self) -> Result<Option<(u64, H256)>>; fn get_sync_progress(&self) -> Result<Option<(u64, H256)>>;
@ -166,8 +158,6 @@ pub trait LogStoreWrite: LogStoreChunkWrite {
fn update_shard_config(&self, shard_config: ShardConfig); fn update_shard_config(&self, shard_config: ShardConfig);
fn submit_seal_result(&self, answers: Vec<SealAnswer>) -> Result<()>; fn submit_seal_result(&self, answers: Vec<SealAnswer>) -> Result<()>;
fn start_padding(&self, executor: &task_executor::TaskExecutor);
} }
pub trait LogStoreChunkWrite { pub trait LogStoreChunkWrite {
@ -221,16 +211,14 @@ pub trait FlowRead {
/// For simplicity, `index_start` and `index_end` must be at the batch boundaries. /// For simplicity, `index_start` and `index_end` must be at the batch boundaries.
fn get_available_entries(&self, index_start: u64, index_end: u64) -> Result<Vec<ChunkArray>>; fn get_available_entries(&self, index_start: u64, index_end: u64) -> Result<Vec<ChunkArray>>;
fn get_chunk_root_list(&self) -> Result<MerkleTreeInitialData<DataRoot>>;
fn load_sealed_data(&self, chunk_index: u64) -> Result<Option<MineLoadChunk>>; fn load_sealed_data(&self, chunk_index: u64) -> Result<Option<MineLoadChunk>>;
// An estimation of the number of entries in the flow db. // An estimation of the number of entries in the flow db.
fn get_num_entries(&self) -> Result<u64>; fn get_num_entries(&self) -> Result<u64>;
fn get_shard_config(&self) -> ShardConfig; fn get_shard_config(&self) -> ShardConfig;
fn get_pad_data(&self, start_index: u64) -> Result<Option<Vec<PadPair>>>;
fn get_pad_data_sync_height(&self) -> Result<Option<u64>>;
} }
pub trait FlowWrite { pub trait FlowWrite {
@ -245,10 +233,6 @@ pub trait FlowWrite {
/// Update the shard config. /// Update the shard config.
fn update_shard_config(&self, shard_config: ShardConfig); fn update_shard_config(&self, shard_config: ShardConfig);
fn put_pad_data(&self, data_sizes: &[PadPair], tx_seq: u64) -> Result<()>;
fn put_pad_data_sync_height(&self, tx_seq: u64) -> Result<()>;
} }
pub struct SealTask { pub struct SealTask {
@ -287,23 +271,3 @@ pub trait FlowSeal {
pub trait Flow: FlowRead + FlowWrite + FlowSeal {} pub trait Flow: FlowRead + FlowWrite + FlowSeal {}
impl<T: FlowRead + FlowWrite + FlowSeal> Flow for T {} impl<T: FlowRead + FlowWrite + FlowSeal> Flow for T {}
pub trait PadDataStoreRead {
fn get_pad_data(&self, start_index: u64) -> Result<Option<Vec<PadPair>>>;
fn get_pad_data_sync_height(&self) -> Result<Option<u64>>;
}
pub trait PadDataStoreWrite {
fn put_pad_data(&self, data_sizes: &[PadPair], tx_seq: u64) -> Result<()>;
fn put_pad_data_sync_height(&self, tx_seq: u64) -> Result<()>;
fn start_padding(&mut self, executor: &task_executor::TaskExecutor);
}
pub trait PadDataStore:
PadDataStoreRead + PadDataStoreWrite + config::Configurable + Send + Sync + 'static
{
}
impl<T: PadDataStoreRead + PadDataStoreWrite + config::Configurable + Send + Sync + 'static>
PadDataStore for T
{
}

View File

@ -8,11 +8,15 @@ use ethereum_types::H256;
use rand::random; use rand::random;
use shared_types::{compute_padded_chunk_size, ChunkArray, Transaction, CHUNK_SIZE}; use shared_types::{compute_padded_chunk_size, ChunkArray, Transaction, CHUNK_SIZE};
use std::cmp; use std::cmp;
use task_executor::test_utils::TestRuntime;
#[test] #[test]
fn test_put_get() { fn test_put_get() {
let config = LogConfig::default(); let config = LogConfig::default();
let store = LogManager::memorydb(config.clone()).unwrap(); let runtime = TestRuntime::default();
let executor = runtime.task_executor.clone();
let store = LogManager::memorydb(config.clone(), executor).unwrap();
let chunk_count = config.flow.batch_size + config.flow.batch_size / 2 - 1; let chunk_count = config.flow.batch_size + config.flow.batch_size / 2 - 1;
// Aligned with size. // Aligned with size.
let start_offset = 1024; let start_offset = 1024;
@ -169,7 +173,10 @@ fn test_put_tx() {
fn create_store() -> LogManager { fn create_store() -> LogManager {
let config = LogConfig::default(); let config = LogConfig::default();
LogManager::memorydb(config).unwrap() let runtime = TestRuntime::default();
let executor = runtime.task_executor.clone();
LogManager::memorydb(config, executor).unwrap()
} }
fn put_tx(store: &mut LogManager, chunk_count: usize, seq: u64) { fn put_tx(store: &mut LogManager, chunk_count: usize, seq: u64) {

View File

@ -3,7 +3,6 @@ use crate::log_store::log_manager::{
data_to_merkle_leaves, sub_merkle_tree, COL_BLOCK_PROGRESS, COL_MISC, COL_TX, COL_TX_COMPLETED, data_to_merkle_leaves, sub_merkle_tree, COL_BLOCK_PROGRESS, COL_MISC, COL_TX, COL_TX_COMPLETED,
COL_TX_DATA_ROOT_INDEX, ENTRY_SIZE, PORA_CHUNK_SIZE, COL_TX_DATA_ROOT_INDEX, ENTRY_SIZE, PORA_CHUNK_SIZE,
}; };
use crate::log_store::metrics;
use crate::{try_option, LogManager, ZgsKeyValueDB}; use crate::{try_option, LogManager, ZgsKeyValueDB};
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use append_merkle::{AppendMerkleTree, MerkleTreeRead, Sha3Algorithm}; use append_merkle::{AppendMerkleTree, MerkleTreeRead, Sha3Algorithm};
@ -16,39 +15,14 @@ use std::collections::hash_map::Entry;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant;
use tracing::{error, instrument}; use tracing::{error, instrument};
const LOG_SYNC_PROGRESS_KEY: &str = "log_sync_progress"; const LOG_SYNC_PROGRESS_KEY: &str = "log_sync_progress";
const NEXT_TX_KEY: &str = "next_tx_seq"; const NEXT_TX_KEY: &str = "next_tx_seq";
const LOG_LATEST_BLOCK_NUMBER_KEY: &str = "log_latest_block_number_key"; const LOG_LATEST_BLOCK_NUMBER_KEY: &str = "log_latest_block_number_key";
#[derive(Debug)] const TX_STATUS_FINALIZED: u8 = 0;
pub enum TxStatus { const TX_STATUS_PRUNED: u8 = 1;
Finalized,
Pruned,
}
impl From<TxStatus> for u8 {
fn from(value: TxStatus) -> Self {
match value {
TxStatus::Finalized => 0,
TxStatus::Pruned => 1,
}
}
}
impl TryFrom<u8> for TxStatus {
type Error = anyhow::Error;
fn try_from(value: u8) -> std::result::Result<Self, Self::Error> {
match value {
0 => Ok(TxStatus::Finalized),
1 => Ok(TxStatus::Pruned),
_ => Err(anyhow!("invalid value for tx status {}", value)),
}
}
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct BlockHashAndSubmissionIndex { pub struct BlockHashAndSubmissionIndex {
@ -57,24 +31,19 @@ pub struct BlockHashAndSubmissionIndex {
} }
pub struct TransactionStore { pub struct TransactionStore {
flow_kvdb: Arc<dyn ZgsKeyValueDB>, kvdb: Arc<dyn ZgsKeyValueDB>,
data_kvdb: Arc<dyn ZgsKeyValueDB>,
/// This is always updated before writing the database to ensure no intermediate states. /// This is always updated before writing the database to ensure no intermediate states.
next_tx_seq: AtomicU64, next_tx_seq: AtomicU64,
} }
impl TransactionStore { impl TransactionStore {
pub fn new( pub fn new(kvdb: Arc<dyn ZgsKeyValueDB>) -> Result<Self> {
flow_kvdb: Arc<dyn ZgsKeyValueDB>, let next_tx_seq = kvdb
data_kvdb: Arc<dyn ZgsKeyValueDB>,
) -> Result<Self> {
let next_tx_seq = flow_kvdb
.get(COL_TX, NEXT_TX_KEY.as_bytes())? .get(COL_TX, NEXT_TX_KEY.as_bytes())?
.map(|a| decode_tx_seq(&a)) .map(|a| decode_tx_seq(&a))
.unwrap_or(Ok(0))?; .unwrap_or(Ok(0))?;
Ok(Self { Ok(Self {
flow_kvdb, kvdb,
data_kvdb,
next_tx_seq: AtomicU64::new(next_tx_seq), next_tx_seq: AtomicU64::new(next_tx_seq),
}) })
} }
@ -82,8 +51,6 @@ impl TransactionStore {
#[instrument(skip(self))] #[instrument(skip(self))]
/// Return `Ok(Some(tx_seq))` if a previous transaction has the same tx root. /// Return `Ok(Some(tx_seq))` if a previous transaction has the same tx root.
pub fn put_tx(&self, mut tx: Transaction) -> Result<Vec<u64>> { pub fn put_tx(&self, mut tx: Transaction) -> Result<Vec<u64>> {
let start_time = Instant::now();
let old_tx_seq_list = self.get_tx_seq_list_by_data_root(&tx.data_merkle_root)?; let old_tx_seq_list = self.get_tx_seq_list_by_data_root(&tx.data_merkle_root)?;
if old_tx_seq_list.last().is_some_and(|seq| *seq == tx.seq) { if old_tx_seq_list.last().is_some_and(|seq| *seq == tx.seq) {
// The last tx is inserted again, so no need to process it. // The last tx is inserted again, so no need to process it.
@ -91,7 +58,7 @@ impl TransactionStore {
return Ok(old_tx_seq_list); return Ok(old_tx_seq_list);
} }
let mut db_tx = self.flow_kvdb.transaction(); let mut db_tx = self.kvdb.transaction();
if !tx.data.is_empty() { if !tx.data.is_empty() {
tx.size = tx.data.len() as u64; tx.size = tx.data.len() as u64;
let mut padded_data = tx.data.clone(); let mut padded_data = tx.data.clone();
@ -118,35 +85,31 @@ impl TransactionStore {
&new_tx_seq_list.as_ssz_bytes(), &new_tx_seq_list.as_ssz_bytes(),
); );
self.next_tx_seq.store(tx.seq + 1, Ordering::SeqCst); self.next_tx_seq.store(tx.seq + 1, Ordering::SeqCst);
self.flow_kvdb.write(db_tx)?; self.kvdb.write(db_tx)?;
metrics::TX_STORE_PUT.update_since(start_time);
Ok(old_tx_seq_list) Ok(old_tx_seq_list)
} }
pub fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>> { pub fn get_tx_by_seq_number(&self, seq: u64) -> Result<Option<Transaction>> {
let start_time = Instant::now();
if seq >= self.next_tx_seq() { if seq >= self.next_tx_seq() {
return Ok(None); return Ok(None);
} }
let value = try_option!(self.flow_kvdb.get(COL_TX, &seq.to_be_bytes())?); let value = try_option!(self.kvdb.get(COL_TX, &seq.to_be_bytes())?);
let tx = Transaction::from_ssz_bytes(&value).map_err(Error::from)?; let tx = Transaction::from_ssz_bytes(&value).map_err(Error::from)?;
metrics::TX_BY_SEQ_NUMBER.update_since(start_time);
Ok(Some(tx)) Ok(Some(tx))
} }
pub fn remove_tx_after(&self, min_seq: u64) -> Result<Vec<Transaction>> { pub fn remove_tx_after(&self, min_seq: u64) -> Result<Vec<Transaction>> {
let mut removed_txs = Vec::new(); let mut removed_txs = Vec::new();
let max_seq = self.next_tx_seq(); let max_seq = self.next_tx_seq();
let mut flow_db_tx = self.flow_kvdb.transaction(); let mut db_tx = self.kvdb.transaction();
let mut data_db_tx = self.data_kvdb.transaction();
let mut modified_merkle_root_map = HashMap::new(); let mut modified_merkle_root_map = HashMap::new();
for seq in min_seq..max_seq { for seq in min_seq..max_seq {
let Some(tx) = self.get_tx_by_seq_number(seq)? else { let Some(tx) = self.get_tx_by_seq_number(seq)? else {
error!(?seq, ?max_seq, "Transaction missing before the end"); error!(?seq, ?max_seq, "Transaction missing before the end");
break; break;
}; };
flow_db_tx.delete(COL_TX, &seq.to_be_bytes()); db_tx.delete(COL_TX, &seq.to_be_bytes());
data_db_tx.delete(COL_TX_COMPLETED, &seq.to_be_bytes()); db_tx.delete(COL_TX_COMPLETED, &seq.to_be_bytes());
// We only remove tx when the blockchain reorgs. // We only remove tx when the blockchain reorgs.
// If a tx is reverted, all data after it will also be reverted, so we call remove // If a tx is reverted, all data after it will also be reverted, so we call remove
// all indices after it. // all indices after it.
@ -161,25 +124,24 @@ impl TransactionStore {
} }
for (merkle_root, tx_seq_list) in modified_merkle_root_map { for (merkle_root, tx_seq_list) in modified_merkle_root_map {
if tx_seq_list.is_empty() { if tx_seq_list.is_empty() {
flow_db_tx.delete(COL_TX_DATA_ROOT_INDEX, merkle_root.as_bytes()); db_tx.delete(COL_TX_DATA_ROOT_INDEX, merkle_root.as_bytes());
} else { } else {
flow_db_tx.put( db_tx.put(
COL_TX_DATA_ROOT_INDEX, COL_TX_DATA_ROOT_INDEX,
merkle_root.as_bytes(), merkle_root.as_bytes(),
&tx_seq_list.as_ssz_bytes(), &tx_seq_list.as_ssz_bytes(),
); );
} }
} }
flow_db_tx.put(COL_TX, NEXT_TX_KEY.as_bytes(), &min_seq.to_be_bytes()); db_tx.put(COL_TX, NEXT_TX_KEY.as_bytes(), &min_seq.to_be_bytes());
self.next_tx_seq.store(min_seq, Ordering::SeqCst); self.next_tx_seq.store(min_seq, Ordering::SeqCst);
self.data_kvdb.write(data_db_tx)?; self.kvdb.write(db_tx)?;
self.flow_kvdb.write(flow_db_tx)?;
Ok(removed_txs) Ok(removed_txs)
} }
pub fn get_tx_seq_list_by_data_root(&self, data_root: &DataRoot) -> Result<Vec<u64>> { pub fn get_tx_seq_list_by_data_root(&self, data_root: &DataRoot) -> Result<Vec<u64>> {
let value = match self let value = match self
.flow_kvdb .kvdb
.get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())? .get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())?
{ {
Some(v) => v, Some(v) => v,
@ -188,45 +150,37 @@ impl TransactionStore {
Ok(Vec::<u64>::from_ssz_bytes(&value).map_err(Error::from)?) Ok(Vec::<u64>::from_ssz_bytes(&value).map_err(Error::from)?)
} }
pub fn get_first_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result<Option<u64>> {
let value = try_option!(self
.kvdb
.get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())?);
let seq_list = Vec::<u64>::from_ssz_bytes(&value).map_err(Error::from)?;
Ok(seq_list.first().cloned())
}
#[instrument(skip(self))] #[instrument(skip(self))]
pub fn finalize_tx(&self, tx_seq: u64) -> Result<()> { pub fn finalize_tx(&self, tx_seq: u64) -> Result<()> {
Ok(self.data_kvdb.put( Ok(self.kvdb.put(
COL_TX_COMPLETED, COL_TX_COMPLETED,
&tx_seq.to_be_bytes(), &tx_seq.to_be_bytes(),
&[TxStatus::Finalized.into()], &[TX_STATUS_FINALIZED],
)?) )?)
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub fn prune_tx(&self, tx_seq: u64) -> Result<()> { pub fn prune_tx(&self, tx_seq: u64) -> Result<()> {
Ok(self.data_kvdb.put( Ok(self
COL_TX_COMPLETED, .kvdb
&tx_seq.to_be_bytes(), .put(COL_TX_COMPLETED, &tx_seq.to_be_bytes(), &[TX_STATUS_PRUNED])?)
&[TxStatus::Pruned.into()],
)?)
}
pub fn get_tx_status(&self, tx_seq: u64) -> Result<Option<TxStatus>> {
let value = try_option!(self
.data_kvdb
.get(COL_TX_COMPLETED, &tx_seq.to_be_bytes())?);
match value.first() {
Some(v) => Ok(Some(TxStatus::try_from(*v)?)),
None => Ok(None),
}
} }
pub fn check_tx_completed(&self, tx_seq: u64) -> Result<bool> { pub fn check_tx_completed(&self, tx_seq: u64) -> Result<bool> {
let start_time = Instant::now(); Ok(self.kvdb.get(COL_TX_COMPLETED, &tx_seq.to_be_bytes())?
let status = self.get_tx_status(tx_seq)?; == Some(vec![TX_STATUS_FINALIZED]))
metrics::CHECK_TX_COMPLETED.update_since(start_time);
Ok(matches!(status, Some(TxStatus::Finalized)))
} }
pub fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool> { pub fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool> {
let status = self.get_tx_status(tx_seq)?; Ok(self.kvdb.get(COL_TX_COMPLETED, &tx_seq.to_be_bytes())? == Some(vec![TX_STATUS_PRUNED]))
Ok(matches!(status, Some(TxStatus::Pruned)))
} }
pub fn next_tx_seq(&self) -> u64 { pub fn next_tx_seq(&self) -> u64 {
@ -248,14 +202,14 @@ impl TransactionStore {
(progress.1, p).as_ssz_bytes(), (progress.1, p).as_ssz_bytes(),
)); ));
} }
Ok(self.flow_kvdb.puts(items)?) Ok(self.kvdb.puts(items)?)
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub fn get_progress(&self) -> Result<Option<(u64, H256)>> { pub fn get_progress(&self) -> Result<Option<(u64, H256)>> {
Ok(Some( Ok(Some(
<(u64, H256)>::from_ssz_bytes(&try_option!(self <(u64, H256)>::from_ssz_bytes(&try_option!(self
.flow_kvdb .kvdb
.get(COL_MISC, LOG_SYNC_PROGRESS_KEY.as_bytes())?)) .get(COL_MISC, LOG_SYNC_PROGRESS_KEY.as_bytes())?))
.map_err(Error::from)?, .map_err(Error::from)?,
)) ))
@ -263,7 +217,7 @@ impl TransactionStore {
#[instrument(skip(self))] #[instrument(skip(self))]
pub fn put_log_latest_block_number(&self, block_number: u64) -> Result<()> { pub fn put_log_latest_block_number(&self, block_number: u64) -> Result<()> {
Ok(self.flow_kvdb.put( Ok(self.kvdb.put(
COL_MISC, COL_MISC,
LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes(), LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes(),
&block_number.as_ssz_bytes(), &block_number.as_ssz_bytes(),
@ -274,7 +228,7 @@ impl TransactionStore {
pub fn get_log_latest_block_number(&self) -> Result<Option<u64>> { pub fn get_log_latest_block_number(&self) -> Result<Option<u64>> {
Ok(Some( Ok(Some(
<u64>::from_ssz_bytes(&try_option!(self <u64>::from_ssz_bytes(&try_option!(self
.flow_kvdb .kvdb
.get(COL_MISC, LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes())?)) .get(COL_MISC, LOG_LATEST_BLOCK_NUMBER_KEY.as_bytes())?))
.map_err(Error::from)?, .map_err(Error::from)?,
)) ))
@ -286,7 +240,7 @@ impl TransactionStore {
) -> Result<Option<(H256, Option<u64>)>> { ) -> Result<Option<(H256, Option<u64>)>> {
Ok(Some( Ok(Some(
<(H256, Option<u64>)>::from_ssz_bytes(&try_option!(self <(H256, Option<u64>)>::from_ssz_bytes(&try_option!(self
.flow_kvdb .kvdb
.get(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?)) .get(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?))
.map_err(Error::from)?, .map_err(Error::from)?,
)) ))
@ -294,7 +248,7 @@ impl TransactionStore {
pub fn get_block_hashes(&self) -> Result<Vec<(u64, BlockHashAndSubmissionIndex)>> { pub fn get_block_hashes(&self) -> Result<Vec<(u64, BlockHashAndSubmissionIndex)>> {
let mut block_numbers = vec![]; let mut block_numbers = vec![];
for r in self.flow_kvdb.iter(COL_BLOCK_PROGRESS) { for r in self.kvdb.iter(COL_BLOCK_PROGRESS) {
let (key, val) = r?; let (key, val) = r?;
let block_number = let block_number =
u64::from_be_bytes(key.as_ref().try_into().map_err(|e| anyhow!("{:?}", e))?); u64::from_be_bytes(key.as_ref().try_into().map_err(|e| anyhow!("{:?}", e))?);
@ -314,7 +268,7 @@ impl TransactionStore {
pub fn delete_block_hash_by_number(&self, block_number: u64) -> Result<()> { pub fn delete_block_hash_by_number(&self, block_number: u64) -> Result<()> {
Ok(self Ok(self
.flow_kvdb .kvdb
.delete(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?) .delete(COL_BLOCK_PROGRESS, &block_number.to_be_bytes())?)
} }
@ -338,9 +292,6 @@ impl TransactionStore {
match tx.start_entry_index.cmp(&last_chunk_start_index) { match tx.start_entry_index.cmp(&last_chunk_start_index) {
cmp::Ordering::Greater => { cmp::Ordering::Greater => {
tx_list.push((tx_seq, tx.merkle_nodes)); tx_list.push((tx_seq, tx.merkle_nodes));
if tx.start_entry_index >= last_chunk_start_index + PORA_CHUNK_SIZE as u64 {
break;
}
} }
cmp::Ordering::Equal => { cmp::Ordering::Equal => {
tx_list.push((tx_seq, tx.merkle_nodes)); tx_list.push((tx_seq, tx.merkle_nodes));
@ -384,7 +335,11 @@ impl TransactionStore {
} }
let mut merkle = if last_chunk_start_index == 0 { let mut merkle = if last_chunk_start_index == 0 {
// The first entry hash is initialized as zero. // The first entry hash is initialized as zero.
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth(vec![H256::zero()], 1, None) AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth(
vec![H256::zero()],
log2_pow2(PORA_CHUNK_SIZE) + 1,
None,
)
} else { } else {
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth( AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth(
vec![], vec![],

View File

@ -84,12 +84,14 @@ impl Batcher {
} }
async fn poll_tx(&self, tx_seq: u64) -> Result<Option<SyncResult>> { async fn poll_tx(&self, tx_seq: u64) -> Result<Option<SyncResult>> {
// file already finalized or even pruned // file already exists
if let Some(tx_status) = self.store.get_store().get_tx_status(tx_seq)? { if self.store.check_tx_completed(tx_seq).await?
let num_terminated: usize = self.terminate_file_sync(tx_seq, false).await; || self.store.check_tx_pruned(tx_seq).await?
if num_terminated > 0 { {
info!(%tx_seq, %num_terminated, ?tx_status, "Terminate file sync due to file already completed in db"); // File may be finalized during file sync, e.g. user uploaded file via RPC.
} // In this case, just terminate the file sync.
let num_terminated = self.terminate_file_sync(tx_seq, false).await;
info!(%tx_seq, %num_terminated, "Terminate file sync due to file already finalized in db");
return Ok(Some(SyncResult::Completed)); return Ok(Some(SyncResult::Completed));
} }

View File

@ -15,7 +15,6 @@ use tokio::time::sleep;
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct RandomBatcherState { pub struct RandomBatcherState {
pub name: String,
pub tasks: Vec<u64>, pub tasks: Vec<u64>,
pub pending_txs: usize, pub pending_txs: usize,
pub ready_txs: usize, pub ready_txs: usize,
@ -23,7 +22,6 @@ pub struct RandomBatcherState {
#[derive(Clone)] #[derive(Clone)]
pub struct RandomBatcher { pub struct RandomBatcher {
name: String,
config: Config, config: Config,
batcher: Batcher, batcher: Batcher,
sync_store: Arc<SyncStore>, sync_store: Arc<SyncStore>,
@ -31,14 +29,12 @@ pub struct RandomBatcher {
impl RandomBatcher { impl RandomBatcher {
pub fn new( pub fn new(
name: String,
config: Config, config: Config,
store: Store, store: Store,
sync_send: SyncSender, sync_send: SyncSender,
sync_store: Arc<SyncStore>, sync_store: Arc<SyncStore>,
) -> Self { ) -> Self {
Self { Self {
name,
config, config,
batcher: Batcher::new( batcher: Batcher::new(
config.max_random_workers, config.max_random_workers,
@ -54,7 +50,6 @@ impl RandomBatcher {
let (pending_txs, ready_txs) = self.sync_store.stat().await?; let (pending_txs, ready_txs) = self.sync_store.stat().await?;
Ok(RandomBatcherState { Ok(RandomBatcherState {
name: self.name.clone(),
tasks: self.batcher.tasks().await, tasks: self.batcher.tasks().await,
pending_txs, pending_txs,
ready_txs, ready_txs,
@ -62,20 +57,21 @@ impl RandomBatcher {
} }
pub async fn start(mut self, catched_up: Arc<AtomicBool>) { pub async fn start(mut self, catched_up: Arc<AtomicBool>) {
info!("Start to sync files, state = {:?}", self.get_state().await); info!("Start to sync files");
// wait for log entry sync catched up
while !catched_up.load(Ordering::Relaxed) {
trace!("Cannot sync file in catch-up phase");
sleep(self.config.auto_sync_idle_interval).await;
}
loop { loop {
// if let Ok(state) = self.get_state().await { // disable file sync until catched up
// metrics::RANDOM_STATE_TXS_SYNCING.update(state.tasks.len() as u64); if !catched_up.load(Ordering::Relaxed) {
// metrics::RANDOM_STATE_TXS_READY.update(state.ready_txs as u64); trace!("Cannot sync file in catch-up phase");
// metrics::RANDOM_STATE_TXS_PENDING.update(state.pending_txs as u64); sleep(self.config.auto_sync_idle_interval).await;
// } continue;
}
if let Ok(state) = self.get_state().await {
metrics::RANDOM_STATE_TXS_SYNCING.update(state.tasks.len() as u64);
metrics::RANDOM_STATE_TXS_READY.update(state.ready_txs as u64);
metrics::RANDOM_STATE_TXS_PENDING.update(state.pending_txs as u64);
}
match self.sync_once().await { match self.sync_once().await {
Ok(true) => {} Ok(true) => {}

View File

@ -1,103 +0,0 @@
use std::sync::{
atomic::{AtomicU64, Ordering},
Arc,
};
use anyhow::Result;
use serde::{Deserialize, Serialize};
use storage::log_store::log_manager::DATA_DB_KEY;
use storage_async::Store;
use tokio::time::sleep;
use crate::Config;
use super::sync_store::{Queue, SyncStore};
const KEY_NEXT_TX_SEQ: &str = "sync.manager.historical.next_tx_seq";
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct HistoricalTxWriterState {
pub next_tx_seq: u64,
pub pending_txs: usize,
pub ready_txs: usize,
}
pub struct HistoricalTxWriter {
config: Config,
store: Store,
sync_store: Arc<SyncStore>,
next_tx_seq: Arc<AtomicU64>,
}
impl HistoricalTxWriter {
pub async fn new(config: Config, store: Store, sync_store: Arc<SyncStore>) -> Result<Self> {
let next_tx_seq = store
.get_config_decoded(&KEY_NEXT_TX_SEQ, DATA_DB_KEY)
.await?;
Ok(Self {
config,
store,
sync_store,
next_tx_seq: Arc::new(AtomicU64::new(next_tx_seq.unwrap_or(0))),
})
}
pub async fn get_state(&self) -> Result<HistoricalTxWriterState> {
let (pending_txs, ready_txs) = self.sync_store.stat().await?;
Ok(HistoricalTxWriterState {
next_tx_seq: self.next_tx_seq.load(Ordering::Relaxed),
pending_txs,
ready_txs,
})
}
pub async fn start(mut self) {
info!(
"Start to write historical files into sync store, state = {:?}",
self.get_state().await
);
loop {
match self.write_once().await {
Ok(true) => {}
Ok(false) => {
trace!(
"There is no tx to write in sync store, state = {:?}",
self.get_state().await
);
sleep(self.config.auto_sync_idle_interval).await;
}
Err(err) => {
warn!(%err, "Failed to write tx once, state = {:?}", self.get_state().await);
sleep(self.config.auto_sync_error_interval).await;
}
}
}
}
async fn write_once(&mut self) -> Result<bool> {
let mut next_tx_seq = self.next_tx_seq.load(Ordering::Relaxed);
// no tx to write in sync store
if next_tx_seq >= self.store.get_store().next_tx_seq() {
return Ok(false);
}
// write tx in sync store if not finalized or pruned
if self.store.get_store().get_tx_status(next_tx_seq)?.is_none() {
self.sync_store.insert(next_tx_seq, Queue::Ready).await?;
}
// move forward
next_tx_seq += 1;
self.store
.set_config_encoded(&KEY_NEXT_TX_SEQ, &next_tx_seq, DATA_DB_KEY)
.await?;
self.next_tx_seq.store(next_tx_seq, Ordering::Relaxed);
Ok(true)
}
}

View File

@ -9,24 +9,18 @@ use storage_async::Store;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tokio::sync::{ use tokio::sync::{
broadcast, broadcast,
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, mpsc::{unbounded_channel, UnboundedSender},
oneshot, oneshot,
}; };
use crate::{Config, SyncSender}; use crate::{Config, SyncSender};
use super::{ use super::{batcher_random::RandomBatcher, batcher_serial::SerialBatcher, sync_store::SyncStore};
batcher_random::RandomBatcher,
batcher_serial::SerialBatcher,
historical_tx_writer::HistoricalTxWriter,
sync_store::{Queue, SyncStore},
};
pub struct AutoSyncManager { pub struct AutoSyncManager {
pub serial: Option<SerialBatcher>, pub serial: SerialBatcher,
pub random: RandomBatcher, pub random: RandomBatcher,
pub file_announcement_send: UnboundedSender<u64>, pub file_announcement_send: UnboundedSender<u64>,
pub new_file_send: UnboundedSender<u64>,
pub catched_up: Arc<AtomicBool>, pub catched_up: Arc<AtomicBool>,
} }
@ -39,112 +33,42 @@ impl AutoSyncManager {
log_sync_recv: broadcast::Receiver<LogSyncEvent>, log_sync_recv: broadcast::Receiver<LogSyncEvent>,
catch_up_end_recv: oneshot::Receiver<()>, catch_up_end_recv: oneshot::Receiver<()>,
) -> Result<Self> { ) -> Result<Self> {
let (file_announcement_send, file_announcement_recv) = unbounded_channel(); let (send, recv) = unbounded_channel();
let (new_file_send, new_file_recv) = unbounded_channel(); let sync_store = Arc::new(SyncStore::new(store.clone()));
let sync_store = if config.neighbors_only {
// use v2 db to avoid reading v1 files that announced from the whole network instead of neighbors
Arc::new(SyncStore::new_with_name(
store.clone(),
"pendingv2",
"readyv2",
))
} else {
Arc::new(SyncStore::new(store.clone()))
};
let catched_up = Arc::new(AtomicBool::new(false)); let catched_up = Arc::new(AtomicBool::new(false));
// handle new file
executor.spawn(
Self::handle_new_file(new_file_recv, sync_store.clone()),
"auto_sync_handle_new_file",
);
// sync in sequence // sync in sequence
let serial = if config.neighbors_only {
None
} else {
let serial = let serial =
SerialBatcher::new(config, store.clone(), sync_send.clone(), sync_store.clone()) SerialBatcher::new(config, store.clone(), sync_send.clone(), sync_store.clone())
.await?; .await?;
executor.spawn( executor.spawn(
serial serial
.clone() .clone()
.start(file_announcement_recv, log_sync_recv, catched_up.clone()), .start(recv, log_sync_recv, catched_up.clone()),
"auto_sync_serial", "auto_sync_serial",
); );
Some(serial)
};
// sync randomly // sync randomly
let random = RandomBatcher::new( let random = RandomBatcher::new(config, store, sync_send, sync_store);
"random".into(),
config,
store.clone(),
sync_send.clone(),
sync_store,
);
executor.spawn(random.clone().start(catched_up.clone()), "auto_sync_random"); executor.spawn(random.clone().start(catched_up.clone()), "auto_sync_random");
// handle on catched up notification // handle on catched up notification
let catched_up_cloned = catched_up.clone();
executor.spawn( executor.spawn(
Self::listen_catch_up(catch_up_end_recv, catched_up.clone()), async move {
if catch_up_end_recv.await.is_ok() {
info!("log entry catched up");
catched_up_cloned.store(true, Ordering::Relaxed);
}
},
"auto_sync_wait_for_catchup", "auto_sync_wait_for_catchup",
); );
// sync randomly for files without NewFile announcement
if config.neighbors_only {
let historical_sync_store = Arc::new(SyncStore::new_with_name(
store.clone(),
"pendingv2_historical",
"readyv2_historical",
));
let writer =
HistoricalTxWriter::new(config, store.clone(), historical_sync_store.clone())
.await?;
executor.spawn(writer.start(), "auto_sync_historical_writer");
let random_historical = RandomBatcher::new(
"random_historical".into(),
config,
store,
sync_send,
historical_sync_store,
);
executor.spawn(
random_historical.start(catched_up.clone()),
"auto_sync_random_historical",
);
}
Ok(Self { Ok(Self {
serial, serial,
random, random,
file_announcement_send, file_announcement_send: send,
new_file_send,
catched_up, catched_up,
}) })
} }
async fn handle_new_file(
mut new_file_recv: UnboundedReceiver<u64>,
sync_store: Arc<SyncStore>,
) {
while let Some(tx_seq) = new_file_recv.recv().await {
if let Err(err) = sync_store.insert(tx_seq, Queue::Ready).await {
warn!(?err, %tx_seq, "Failed to insert new file to ready queue");
}
}
}
async fn listen_catch_up(
catch_up_end_recv: oneshot::Receiver<()>,
catched_up: Arc<AtomicBool>,
) {
if catch_up_end_recv.await.is_ok() {
info!("log entry catched up");
catched_up.store(true, Ordering::Relaxed);
}
}
} }

View File

@ -14,9 +14,9 @@ lazy_static::lazy_static! {
pub static ref SEQUENTIAL_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_sequential_sync_result_timeout"); pub static ref SEQUENTIAL_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_sequential_sync_result_timeout");
// random auto sync // random auto sync
// pub static ref RANDOM_STATE_TXS_SYNCING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_syncing", 1024); pub static ref RANDOM_STATE_TXS_SYNCING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_syncing", 1024);
// pub static ref RANDOM_STATE_TXS_READY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_ready", 1024); pub static ref RANDOM_STATE_TXS_READY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_ready", 1024);
// pub static ref RANDOM_STATE_TXS_PENDING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_pending", 1024); pub static ref RANDOM_STATE_TXS_PENDING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_pending", 1024);
pub static ref RANDOM_SYNC_RESULT_COMPLETED: Arc<dyn Meter> = register_meter("sync_auto_random_sync_result_completed"); pub static ref RANDOM_SYNC_RESULT_COMPLETED: Arc<dyn Meter> = register_meter("sync_auto_random_sync_result_completed");
pub static ref RANDOM_SYNC_RESULT_FAILED: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_failed"); pub static ref RANDOM_SYNC_RESULT_FAILED: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_failed");

View File

@ -1,7 +1,6 @@
mod batcher; mod batcher;
pub mod batcher_random; pub mod batcher_random;
pub mod batcher_serial; pub mod batcher_serial;
mod historical_tx_writer;
pub mod manager; pub mod manager;
mod metrics; mod metrics;
pub mod sync_store; pub mod sync_store;

View File

@ -1,10 +1,7 @@
use super::tx_store::TxStore; use super::tx_store::TxStore;
use anyhow::Result; use anyhow::Result;
use std::sync::Arc; use std::sync::Arc;
use storage::log_store::{ use storage::log_store::config::{ConfigTx, ConfigurableExt};
config::{ConfigTx, ConfigurableExt},
log_manager::DATA_DB_KEY,
};
use storage_async::Store; use storage_async::Store;
use tokio::sync::RwLock; use tokio::sync::RwLock;
@ -45,14 +42,6 @@ impl SyncStore {
} }
} }
pub fn new_with_name(store: Store, pending: &'static str, ready: &'static str) -> Self {
Self {
store: Arc::new(RwLock::new(store)),
pending_txs: TxStore::new(pending),
ready_txs: TxStore::new(ready),
}
}
/// Returns the number of pending txs and ready txs. /// Returns the number of pending txs and ready txs.
pub async fn stat(&self) -> Result<(usize, usize)> { pub async fn stat(&self) -> Result<(usize, usize)> {
let async_store = self.store.read().await; let async_store = self.store.read().await;
@ -69,10 +58,10 @@ impl SyncStore {
let store = async_store.get_store(); let store = async_store.get_store();
// load next_tx_seq // load next_tx_seq
let next_tx_seq = store.get_config_decoded(&KEY_NEXT_TX_SEQ, DATA_DB_KEY)?; let next_tx_seq = store.get_config_decoded(&KEY_NEXT_TX_SEQ)?;
// load max_tx_seq // load max_tx_seq
let max_tx_seq = store.get_config_decoded(&KEY_MAX_TX_SEQ, DATA_DB_KEY)?; let max_tx_seq = store.get_config_decoded(&KEY_MAX_TX_SEQ)?;
Ok((next_tx_seq, max_tx_seq)) Ok((next_tx_seq, max_tx_seq))
} }
@ -80,13 +69,13 @@ impl SyncStore {
pub async fn set_next_tx_seq(&self, tx_seq: u64) -> Result<()> { pub async fn set_next_tx_seq(&self, tx_seq: u64) -> Result<()> {
let async_store = self.store.write().await; let async_store = self.store.write().await;
let store = async_store.get_store(); let store = async_store.get_store();
store.set_config_encoded(&KEY_NEXT_TX_SEQ, &tx_seq, DATA_DB_KEY) store.set_config_encoded(&KEY_NEXT_TX_SEQ, &tx_seq)
} }
pub async fn set_max_tx_seq(&self, tx_seq: u64) -> Result<()> { pub async fn set_max_tx_seq(&self, tx_seq: u64) -> Result<()> {
let async_store = self.store.write().await; let async_store = self.store.write().await;
let store = async_store.get_store(); let store = async_store.get_store();
store.set_config_encoded(&KEY_MAX_TX_SEQ, &tx_seq, DATA_DB_KEY) store.set_config_encoded(&KEY_MAX_TX_SEQ, &tx_seq)
} }
pub async fn contains(&self, tx_seq: u64) -> Result<Option<Queue>> { pub async fn contains(&self, tx_seq: u64) -> Result<Option<Queue>> {
@ -117,7 +106,7 @@ impl SyncStore {
} }
let removed = self.pending_txs.remove(store, Some(&mut tx), tx_seq)?; let removed = self.pending_txs.remove(store, Some(&mut tx), tx_seq)?;
store.exec_configs(tx, DATA_DB_KEY)?; store.exec_configs(tx)?;
if removed { if removed {
Ok(InsertResult::Upgraded) Ok(InsertResult::Upgraded)
@ -131,7 +120,7 @@ impl SyncStore {
} }
let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq)?; let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq)?;
store.exec_configs(tx, DATA_DB_KEY)?; store.exec_configs(tx)?;
if removed { if removed {
Ok(InsertResult::Downgraded) Ok(InsertResult::Downgraded)
@ -154,7 +143,7 @@ impl SyncStore {
let added = self.ready_txs.add(store, Some(&mut tx), tx_seq)?; let added = self.ready_txs.add(store, Some(&mut tx), tx_seq)?;
store.exec_configs(tx, DATA_DB_KEY)?; store.exec_configs(tx)?;
Ok(added) Ok(added)
} }

View File

@ -1,7 +1,6 @@
use anyhow::Result; use anyhow::Result;
use rand::Rng; use rand::Rng;
use storage::log_store::config::{ConfigTx, ConfigurableExt}; use storage::log_store::config::{ConfigTx, ConfigurableExt};
use storage::log_store::log_manager::DATA_DB_KEY;
use storage::log_store::Store; use storage::log_store::Store;
/// TxStore is used to store pending transactions that to be synchronized in advance. /// TxStore is used to store pending transactions that to be synchronized in advance.
@ -33,11 +32,11 @@ impl TxStore {
} }
fn index_of(&self, store: &dyn Store, tx_seq: u64) -> Result<Option<usize>> { fn index_of(&self, store: &dyn Store, tx_seq: u64) -> Result<Option<usize>> {
store.get_config_decoded(&self.key_seq_to_index(tx_seq), DATA_DB_KEY) store.get_config_decoded(&self.key_seq_to_index(tx_seq))
} }
fn at(&self, store: &dyn Store, index: usize) -> Result<Option<u64>> { fn at(&self, store: &dyn Store, index: usize) -> Result<Option<u64>> {
store.get_config_decoded(&self.key_index_to_seq(index), DATA_DB_KEY) store.get_config_decoded(&self.key_index_to_seq(index))
} }
pub fn has(&self, store: &dyn Store, tx_seq: u64) -> Result<bool> { pub fn has(&self, store: &dyn Store, tx_seq: u64) -> Result<bool> {
@ -46,7 +45,7 @@ impl TxStore {
pub fn count(&self, store: &dyn Store) -> Result<usize> { pub fn count(&self, store: &dyn Store) -> Result<usize> {
store store
.get_config_decoded(&self.key_count, DATA_DB_KEY) .get_config_decoded(&self.key_count)
.map(|x| x.unwrap_or(0)) .map(|x| x.unwrap_or(0))
} }
@ -71,7 +70,7 @@ impl TxStore {
if let Some(db_tx) = db_tx { if let Some(db_tx) = db_tx {
db_tx.append(&mut tx); db_tx.append(&mut tx);
} else { } else {
store.exec_configs(tx, DATA_DB_KEY)?; store.exec_configs(tx)?;
} }
Ok(true) Ok(true)
@ -131,7 +130,7 @@ impl TxStore {
if let Some(db_tx) = db_tx { if let Some(db_tx) = db_tx {
db_tx.append(&mut tx); db_tx.append(&mut tx);
} else { } else {
store.exec_configs(tx, DATA_DB_KEY)?; store.exec_configs(tx)?;
} }
Ok(true) Ok(true)

View File

@ -1,11 +1,12 @@
use network::{NetworkMessage, NetworkSender, PeerAction, PeerId, PubsubMessage, ReportSource}; use network::{NetworkMessage, PeerAction, PeerId, PubsubMessage, ReportSource};
use tokio::sync::mpsc;
pub struct SyncNetworkContext { pub struct SyncNetworkContext {
network_send: NetworkSender, network_send: mpsc::UnboundedSender<NetworkMessage>,
} }
impl SyncNetworkContext { impl SyncNetworkContext {
pub fn new(network_send: NetworkSender) -> Self { pub fn new(network_send: mpsc::UnboundedSender<NetworkMessage>) -> Self {
Self { network_send } Self { network_send }
} }

View File

@ -14,13 +14,12 @@ use shared_types::{timestamp_now, ChunkArrayWithProof, TxID, CHUNK_SIZE};
use ssz::Encode; use ssz::Encode;
use std::{sync::Arc, time::Instant}; use std::{sync::Arc, time::Instant};
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE}; use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
use storage_async::{ShardConfig, Store}; use storage_async::Store;
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub enum FailureReason { pub enum FailureReason {
DBError(String), DBError(String),
TxReverted(TxID), TxReverted(TxID),
TimeoutFindFile,
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
@ -160,14 +159,11 @@ impl SerialSyncController {
/// Find more peers to sync chunks. Return whether `FindFile` pubsub message published, /// Find more peers to sync chunks. Return whether `FindFile` pubsub message published,
fn try_find_peers(&mut self) { fn try_find_peers(&mut self) {
let (published, num_new_peers) = if !self.goal.is_all_chunks() { let (published, num_new_peers) = if self.goal.is_all_chunks() {
self.publish_find_file()
} else {
self.publish_find_chunks(); self.publish_find_chunks();
(true, 0) (true, 0)
} else if self.config.neighbors_only {
self.do_publish_find_file();
(true, 0)
} else {
self.publish_find_file()
}; };
info!(%self.tx_seq, %published, %num_new_peers, "Finding peers"); info!(%self.tx_seq, %published, %num_new_peers, "Finding peers");
@ -203,21 +199,12 @@ impl SerialSyncController {
return (false, num_new_peers); return (false, num_new_peers);
} }
self.do_publish_find_file();
(true, num_new_peers)
}
fn do_publish_find_file(&self) {
let shard_config = self.store.get_store().get_shard_config();
self.ctx.publish(PubsubMessage::FindFile(FindFile { self.ctx.publish(PubsubMessage::FindFile(FindFile {
tx_id: self.tx_id, tx_id: self.tx_id,
num_shard: shard_config.num_shard,
shard_id: shard_config.shard_id,
neighbors_only: self.config.neighbors_only,
timestamp: timestamp_now(), timestamp: timestamp_now(),
})); }));
(true, num_new_peers)
} }
fn publish_find_chunks(&self) { fn publish_find_chunks(&self) {
@ -350,14 +337,6 @@ impl SerialSyncController {
} }
} }
/// Triggered when any peer (TCP connected) announced file via RPC message.
pub fn on_peer_announced(&mut self, peer_id: PeerId, shard_config: ShardConfig) {
self.peers
.add_new_peer_with_config(peer_id, Multiaddr::empty(), shard_config);
self.peers
.update_state_force(&peer_id, PeerState::Connected);
}
pub fn on_dail_failed(&mut self, peer_id: PeerId, err: &DialError) { pub fn on_dail_failed(&mut self, peer_id: PeerId, err: &DialError) {
match err { match err {
DialError::ConnectionLimit(_) => { DialError::ConnectionLimit(_) => {
@ -566,9 +545,6 @@ impl SerialSyncController {
info!(%self.tx_seq, "Succeeded to finalize file"); info!(%self.tx_seq, "Succeeded to finalize file");
self.state = SyncState::Completed; self.state = SyncState::Completed;
metrics::SERIAL_SYNC_FILE_COMPLETED.update_since(self.since.0); metrics::SERIAL_SYNC_FILE_COMPLETED.update_since(self.since.0);
// notify neighbor nodes about new file completed to sync
self.ctx
.send(NetworkMessage::AnnounceLocalFile { tx_id: self.tx_id });
} }
Ok(false) => { Ok(false) => {
warn!(?self.tx_id, %self.tx_seq, "Transaction reverted during finalize_tx"); warn!(?self.tx_id, %self.tx_seq, "Transaction reverted during finalize_tx");
@ -660,21 +636,14 @@ impl SerialSyncController {
.all_shards_available(vec![Found, Connecting, Connected]) .all_shards_available(vec![Found, Connecting, Connected])
{ {
self.state = SyncState::FoundPeers; self.state = SyncState::FoundPeers;
} else {
// FindFile timeout
if since.elapsed() >= self.config.peer_find_timeout {
if self.config.neighbors_only {
self.state = SyncState::Failed {
reason: FailureReason::TimeoutFindFile,
};
} else { } else {
// storage node may not have the specific file when `FindFile` // storage node may not have the specific file when `FindFile`
// gossip message received. In this case, just broadcast the // gossip message received. In this case, just broadcast the
// `FindFile` message again. // `FindFile` message again.
if since.elapsed() >= self.config.peer_find_timeout {
debug!(%self.tx_seq, "Finding peer timeout and try to find peers again"); debug!(%self.tx_seq, "Finding peer timeout and try to find peers again");
self.try_find_peers(); self.try_find_peers();
} }
}
completed = true; completed = true;
} }
@ -750,13 +719,13 @@ mod tests {
use crate::test_util::create_2_store; use crate::test_util::create_2_store;
use crate::test_util::tests::create_file_location_cache; use crate::test_util::tests::create_file_location_cache;
use libp2p::identity; use libp2p::identity;
use network::{new_network_channel, NetworkReceiver};
use network::{ReportSource, Request}; use network::{ReportSource, Request};
use storage::log_store::log_manager::LogConfig; use storage::log_store::log_manager::LogConfig;
use storage::log_store::log_manager::LogManager; use storage::log_store::log_manager::LogManager;
use storage::log_store::LogStoreRead; use storage::log_store::LogStoreRead;
use storage::H256; use storage::H256;
use task_executor::{test_utils::TestRuntime, TaskExecutor}; use task_executor::{test_utils::TestRuntime, TaskExecutor};
use tokio::sync::mpsc::{self, UnboundedReceiver};
#[test] #[test]
fn test_status() { fn test_status() {
@ -1544,10 +1513,6 @@ mod tests {
controller.on_response(peer_id, chunks).await; controller.on_response(peer_id, chunks).await;
assert_eq!(*controller.get_status(), SyncState::Completed); assert_eq!(*controller.get_status(), SyncState::Completed);
assert!(matches!(
network_recv.try_recv().unwrap(),
NetworkMessage::AnnounceLocalFile { .. }
));
assert!(network_recv.try_recv().is_err()); assert!(network_recv.try_recv().is_err());
} }
@ -1649,7 +1614,7 @@ mod tests {
fn create_default_controller( fn create_default_controller(
task_executor: TaskExecutor, task_executor: TaskExecutor,
peer_id: Option<PeerId>, peer_id: Option<PeerId>,
) -> (SerialSyncController, NetworkReceiver) { ) -> (SerialSyncController, UnboundedReceiver<NetworkMessage>) {
let tx_id = TxID { let tx_id = TxID {
seq: 0, seq: 0,
hash: H256::random(), hash: H256::random(),
@ -1657,7 +1622,7 @@ mod tests {
let num_chunks = 123; let num_chunks = 123;
let config = LogConfig::default(); let config = LogConfig::default();
let store = Arc::new(LogManager::memorydb(config).unwrap()); let store = Arc::new(LogManager::memorydb(config, task_executor.clone()).unwrap());
create_controller(task_executor, peer_id, store, tx_id, num_chunks) create_controller(task_executor, peer_id, store, tx_id, num_chunks)
} }
@ -1668,8 +1633,8 @@ mod tests {
store: Arc<LogManager>, store: Arc<LogManager>,
tx_id: TxID, tx_id: TxID,
num_chunks: usize, num_chunks: usize,
) -> (SerialSyncController, NetworkReceiver) { ) -> (SerialSyncController, UnboundedReceiver<NetworkMessage>) {
let (network_send, network_recv) = new_network_channel(); let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
let ctx = Arc::new(SyncNetworkContext::new(network_send)); let ctx = Arc::new(SyncNetworkContext::new(network_send));
let peer_id = match peer_id { let peer_id = match peer_id {

View File

@ -21,10 +21,6 @@ use std::{
#[serde(default)] #[serde(default)]
pub struct Config { pub struct Config {
// sync service config // sync service config
/// Indicates whether to sync file from neighbor nodes only.
/// This is to avoid flooding file announcements in the whole network,
/// which leads to high latency or even timeout to sync files.
pub neighbors_only: bool,
#[serde(deserialize_with = "deserialize_duration")] #[serde(deserialize_with = "deserialize_duration")]
pub heartbeat_interval: Duration, pub heartbeat_interval: Duration,
pub auto_sync_enabled: bool, pub auto_sync_enabled: bool,
@ -68,7 +64,6 @@ impl Default for Config {
fn default() -> Self { fn default() -> Self {
Self { Self {
// sync service config // sync service config
neighbors_only: false,
heartbeat_interval: Duration::from_secs(5), heartbeat_interval: Duration::from_secs(5),
auto_sync_enabled: false, auto_sync_enabled: false,
max_sync_files: 8, max_sync_files: 8,

View File

@ -8,11 +8,11 @@ use anyhow::{anyhow, bail, Result};
use file_location_cache::FileLocationCache; use file_location_cache::FileLocationCache;
use libp2p::swarm::DialError; use libp2p::swarm::DialError;
use log_entry_sync::LogSyncEvent; use log_entry_sync::LogSyncEvent;
use network::rpc::methods::FileAnnouncement; use network::types::{AnnounceChunks, FindFile};
use network::types::{AnnounceChunks, FindFile, NewFile}; use network::PubsubMessage;
use network::{ use network::{
rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, NetworkSender, rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, PeerId,
PeerId, PeerRequestId, PubsubMessage, SyncId as RequestId, PeerRequestId, SyncId as RequestId,
}; };
use shared_types::{bytes_to_chunks, timestamp_now, ChunkArrayWithProof, Transaction, TxID}; use shared_types::{bytes_to_chunks, timestamp_now, ChunkArrayWithProof, Transaction, TxID};
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
@ -26,7 +26,7 @@ use storage::error::Result as StorageResult;
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE}; use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
use storage::log_store::Store as LogStore; use storage::log_store::Store as LogStore;
use storage_async::Store; use storage_async::Store;
use tokio::sync::{broadcast, oneshot}; use tokio::sync::{broadcast, mpsc, oneshot};
pub type SyncSender = channel::Sender<SyncMessage, SyncRequest, SyncResponse>; pub type SyncSender = channel::Sender<SyncMessage, SyncRequest, SyncResponse>;
pub type SyncReceiver = channel::Receiver<SyncMessage, SyncRequest, SyncResponse>; pub type SyncReceiver = channel::Receiver<SyncMessage, SyncRequest, SyncResponse>;
@ -70,15 +70,6 @@ pub enum SyncMessage {
AnnounceChunksGossip { AnnounceChunksGossip {
msg: AnnounceChunks, msg: AnnounceChunks,
}, },
NewFile {
from: PeerId,
msg: NewFile,
},
AnnounceFile {
peer_id: PeerId,
request_id: PeerRequestId,
announcement: FileAnnouncement,
},
} }
#[derive(Debug)] #[derive(Debug)]
@ -141,7 +132,7 @@ pub struct SyncService {
impl SyncService { impl SyncService {
pub async fn spawn( pub async fn spawn(
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
network_send: NetworkSender, network_send: mpsc::UnboundedSender<NetworkMessage>,
store: Arc<dyn LogStore>, store: Arc<dyn LogStore>,
file_location_cache: Arc<FileLocationCache>, file_location_cache: Arc<FileLocationCache>,
event_recv: broadcast::Receiver<LogSyncEvent>, event_recv: broadcast::Receiver<LogSyncEvent>,
@ -162,7 +153,7 @@ impl SyncService {
pub async fn spawn_with_config( pub async fn spawn_with_config(
config: Config, config: Config,
executor: task_executor::TaskExecutor, executor: task_executor::TaskExecutor,
network_send: NetworkSender, network_send: mpsc::UnboundedSender<NetworkMessage>,
store: Arc<dyn LogStore>, store: Arc<dyn LogStore>,
file_location_cache: Arc<FileLocationCache>, file_location_cache: Arc<FileLocationCache>,
event_recv: broadcast::Receiver<LogSyncEvent>, event_recv: broadcast::Receiver<LogSyncEvent>,
@ -274,12 +265,6 @@ impl SyncService {
SyncMessage::AnnounceShardConfig { .. } => { SyncMessage::AnnounceShardConfig { .. } => {
// FIXME: Check if controllers need to be reset? // FIXME: Check if controllers need to be reset?
} }
SyncMessage::NewFile { from, msg } => self.on_new_file_gossip(from, msg).await,
SyncMessage::AnnounceFile {
peer_id,
announcement,
..
} => self.on_announce_file(peer_id, announcement).await,
} }
} }
@ -294,10 +279,7 @@ impl SyncService {
Some(manager) => SyncServiceState { Some(manager) => SyncServiceState {
num_syncing: self.controllers.len(), num_syncing: self.controllers.len(),
catched_up: Some(manager.catched_up.load(Ordering::Relaxed)), catched_up: Some(manager.catched_up.load(Ordering::Relaxed)),
auto_sync_serial: match &manager.serial { auto_sync_serial: Some(manager.serial.get_state().await),
Some(v) => Some(v.get_state().await),
None => None,
},
auto_sync_random: manager.random.get_state().await.ok(), auto_sync_random: manager.random.get_state().await.ok(),
}, },
None => SyncServiceState { None => SyncServiceState {
@ -595,12 +577,8 @@ impl SyncService {
Some(tx) => tx, Some(tx) => tx,
None => bail!("Transaction not found"), None => bail!("Transaction not found"),
}; };
let shard_config = self.store.get_store().get_shard_config();
self.ctx.publish(PubsubMessage::FindFile(FindFile { self.ctx.publish(PubsubMessage::FindFile(FindFile {
tx_id: tx.id(), tx_id: tx.id(),
num_shard: shard_config.num_shard,
shard_id: shard_config.shard_id,
neighbors_only: false,
timestamp: timestamp_now(), timestamp: timestamp_now(),
})); }));
Ok(()) Ok(())
@ -664,10 +642,7 @@ impl SyncService {
Some(s) => s, Some(s) => s,
None => { None => {
debug!(%tx.seq, "No more data needed"); debug!(%tx.seq, "No more data needed");
if self.store.finalize_tx_with_hash(tx.seq, tx.hash()).await? { self.store.finalize_tx_with_hash(tx.seq, tx.hash()).await?;
self.ctx
.send(NetworkMessage::AnnounceLocalFile { tx_id: tx.id() });
}
return Ok(()); return Ok(());
} }
}; };
@ -773,34 +748,6 @@ impl SyncService {
} }
} }
/// Handle on `NewFile` gossip message received.
async fn on_new_file_gossip(&mut self, from: PeerId, msg: NewFile) {
debug!(%from, ?msg, "Received NewFile gossip");
if let Some(controller) = self.controllers.get_mut(&msg.tx_id.seq) {
// Notify new peer announced if file already in sync
if let Ok(shard_config) = ShardConfig::new(msg.shard_id, msg.num_shard) {
controller.on_peer_announced(from, shard_config);
controller.transition();
}
} else if let Some(manager) = &self.auto_sync_manager {
let _ = manager.new_file_send.send(msg.tx_id.seq);
}
}
/// Handle on `AnnounceFile` RPC message received.
async fn on_announce_file(&mut self, from: PeerId, announcement: FileAnnouncement) {
// Notify new peer announced if file already in sync
if let Some(controller) = self.controllers.get_mut(&announcement.tx_id.seq) {
if let Ok(shard_config) =
ShardConfig::new(announcement.shard_id, announcement.num_shard)
{
controller.on_peer_announced(from, shard_config);
controller.transition();
}
}
}
/// Terminate file sync of `min_tx_seq`. /// Terminate file sync of `min_tx_seq`.
/// If `is_reverted` is `true` (means confirmed transactions reverted), /// If `is_reverted` is `true` (means confirmed transactions reverted),
/// also terminate `tx_seq` greater than `min_tx_seq` /// also terminate `tx_seq` greater than `min_tx_seq`
@ -896,9 +843,7 @@ mod tests {
use crate::test_util::tests::create_file_location_cache; use crate::test_util::tests::create_file_location_cache;
use libp2p::identity; use libp2p::identity;
use network::discovery::ConnectionId; use network::discovery::ConnectionId;
use network::new_network_channel;
use network::rpc::SubstreamId; use network::rpc::SubstreamId;
use network::NetworkReceiver;
use network::ReportSource; use network::ReportSource;
use shared_types::ChunkArray; use shared_types::ChunkArray;
use shared_types::Transaction; use shared_types::Transaction;
@ -910,6 +855,8 @@ mod tests {
use storage::log_store::LogStoreRead; use storage::log_store::LogStoreRead;
use storage::H256; use storage::H256;
use task_executor::test_utils::TestRuntime; use task_executor::test_utils::TestRuntime;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender;
struct TestSyncRuntime { struct TestSyncRuntime {
runtime: TestRuntime, runtime: TestRuntime,
@ -923,8 +870,8 @@ mod tests {
init_peer_id: PeerId, init_peer_id: PeerId,
file_location_cache: Arc<FileLocationCache>, file_location_cache: Arc<FileLocationCache>,
network_send: NetworkSender, network_send: UnboundedSender<NetworkMessage>,
network_recv: NetworkReceiver, network_recv: UnboundedReceiver<NetworkMessage>,
event_send: broadcast::Sender<LogSyncEvent>, event_send: broadcast::Sender<LogSyncEvent>,
catch_up_end_recv: Option<oneshot::Receiver<()>>, catch_up_end_recv: Option<oneshot::Receiver<()>>,
} }
@ -941,7 +888,7 @@ mod tests {
let (store, peer_store, txs, data) = create_2_store(chunk_counts); let (store, peer_store, txs, data) = create_2_store(chunk_counts);
let init_data = data[0].clone(); let init_data = data[0].clone();
let init_peer_id = identity::Keypair::generate_ed25519().public().to_peer_id(); let init_peer_id = identity::Keypair::generate_ed25519().public().to_peer_id();
let (network_send, network_recv) = new_network_channel(); let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
let (event_send, _) = broadcast::channel(16); let (event_send, _) = broadcast::channel(16);
let (_, catch_up_end_recv) = oneshot::channel(); let (_, catch_up_end_recv) = oneshot::channel();
@ -1005,7 +952,7 @@ mod tests {
let file_location_cache: Arc<FileLocationCache> = let file_location_cache: Arc<FileLocationCache> =
create_file_location_cache(init_peer_id, vec![txs[0].id()]); create_file_location_cache(init_peer_id, vec![txs[0].id()]);
let (network_send, mut network_recv) = new_network_channel(); let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
let (_, sync_recv) = channel::Channel::unbounded("test"); let (_, sync_recv) = channel::Channel::unbounded("test");
let mut sync = SyncService { let mut sync = SyncService {
@ -1034,7 +981,7 @@ mod tests {
let file_location_cache: Arc<FileLocationCache> = let file_location_cache: Arc<FileLocationCache> =
create_file_location_cache(init_peer_id, vec![txs[0].id()]); create_file_location_cache(init_peer_id, vec![txs[0].id()]);
let (network_send, mut network_recv) = new_network_channel(); let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
let (_, sync_recv) = channel::Channel::unbounded("test"); let (_, sync_recv) = channel::Channel::unbounded("test");
let mut sync = SyncService { let mut sync = SyncService {
@ -1347,13 +1294,15 @@ mod tests {
let config = LogConfig::default(); let config = LogConfig::default();
let store = Arc::new(LogManager::memorydb(config.clone()).unwrap()); let executor = runtime.task_executor.clone();
let store = Arc::new(LogManager::memorydb(config.clone(), executor).unwrap());
let init_peer_id = identity::Keypair::generate_ed25519().public().to_peer_id(); let init_peer_id = identity::Keypair::generate_ed25519().public().to_peer_id();
let file_location_cache: Arc<FileLocationCache> = let file_location_cache: Arc<FileLocationCache> =
create_file_location_cache(init_peer_id, vec![]); create_file_location_cache(init_peer_id, vec![]);
let (network_send, mut network_recv) = new_network_channel(); let (network_send, mut network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
let (_event_send, event_recv) = broadcast::channel(16); let (_event_send, event_recv) = broadcast::channel(16);
let (_, catch_up_end_recv) = oneshot::channel(); let (_, catch_up_end_recv) = oneshot::channel();
let sync_send = SyncService::spawn_with_config( let sync_send = SyncService::spawn_with_config(
@ -1555,10 +1504,6 @@ mod tests {
.await; .await;
wait_for_tx_finalized(runtime.store.clone(), tx_seq).await; wait_for_tx_finalized(runtime.store.clone(), tx_seq).await;
assert!(matches!(
runtime.network_recv.try_recv().unwrap(),
NetworkMessage::AnnounceLocalFile { .. }
));
assert!(!runtime.store.check_tx_completed(0).unwrap()); assert!(!runtime.store.check_tx_completed(0).unwrap());
@ -1583,10 +1528,6 @@ mod tests {
.await; .await;
wait_for_tx_finalized(runtime.store, tx_seq).await; wait_for_tx_finalized(runtime.store, tx_seq).await;
assert!(matches!(
runtime.network_recv.try_recv().unwrap(),
NetworkMessage::AnnounceLocalFile { .. }
));
sync_send sync_send
.notify(SyncMessage::PeerDisconnected { .notify(SyncMessage::PeerDisconnected {
@ -1780,7 +1721,7 @@ mod tests {
} }
async fn receive_chunk_request( async fn receive_chunk_request(
network_recv: &mut NetworkReceiver, network_recv: &mut UnboundedReceiver<NetworkMessage>,
sync_send: &SyncSender, sync_send: &SyncSender,
peer_store: Arc<LogManager>, peer_store: Arc<LogManager>,
init_peer_id: PeerId, init_peer_id: PeerId,

View File

@ -9,6 +9,8 @@ use storage::{
LogManager, LogManager,
}; };
use task_executor::test_utils::TestRuntime;
/// Creates stores for local node and peers with initialized transaction of specified chunk count. /// Creates stores for local node and peers with initialized transaction of specified chunk count.
/// The first store is for local node, and data not stored. The second store is for peers, and all /// The first store is for local node, and data not stored. The second store is for peers, and all
/// transactions are finalized for file sync. /// transactions are finalized for file sync.
@ -22,8 +24,11 @@ pub fn create_2_store(
Vec<Vec<u8>>, Vec<Vec<u8>>,
) { ) {
let config = LogConfig::default(); let config = LogConfig::default();
let mut store = LogManager::memorydb(config.clone()).unwrap(); let runtime = TestRuntime::default();
let mut peer_store = LogManager::memorydb(config).unwrap();
let executor = runtime.task_executor.clone();
let mut store = LogManager::memorydb(config.clone(), executor.clone()).unwrap();
let mut peer_store = LogManager::memorydb(config, executor).unwrap();
let mut offset = 1; let mut offset = 1;
let mut txs = vec![]; let mut txs = vec![];
@ -115,7 +120,10 @@ pub mod tests {
impl TestStoreRuntime { impl TestStoreRuntime {
pub fn new_store() -> impl LogStore { pub fn new_store() -> impl LogStore {
LogManager::memorydb(LogConfig::default()).unwrap() let runtime = TestRuntime::default();
let executor = runtime.task_executor.clone();
LogManager::memorydb(LogConfig::default(), executor).unwrap()
} }
pub fn new(store: Arc<dyn LogStore>) -> TestStoreRuntime { pub fn new(store: Arc<dyn LogStore>) -> TestStoreRuntime {

View File

@ -1,10 +1,10 @@
jsonrpcclient==4.0.3 jsonrpcclient==4.0.3
pyyaml==6.0.1 pyyaml==6.0.1
safe-pysha3==1.0.4 pysha3==1.0.2
coincurve==20.0.0 coincurve==18.0.0
eth-utils==5.1.0 eth-utils==3.0.0
py-ecc==7.0.0 py-ecc==7.0.0
web3==7.5.0 web3==6.14.0
eth_tester eth_tester
cffi==1.16.0 cffi==1.16.0
rtoml==0.11.0 rtoml==0.10.0

View File

@ -176,7 +176,7 @@ mine_contract_address = "0x1785c8683b3c527618eFfF78d876d9dCB4b70285"
# If this limit is reached, the node will update its `shard_position` # If this limit is reached, the node will update its `shard_position`
# and store only half data. # and store only half data.
# #
db_max_num_sectors = 4000000000 db_max_num_sectors = 1000000000
# The format is <shard_id>/<shard_number>, where the shard number is 2^n. # The format is <shard_id>/<shard_number>, where the shard number is 2^n.
# This only applies if there is no stored shard config in db. # This only applies if there is no stored shard config in db.
@ -232,10 +232,6 @@ batcher_announcement_capacity = 100
# all files, and sufficient disk space is required. # all files, and sufficient disk space is required.
auto_sync_enabled = true auto_sync_enabled = true
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
# announcements in the whole network, which leads to high latency or even timeout to sync files.
neighbors_only = true
# Maximum number of files in sync from other peers simultaneously. # Maximum number of files in sync from other peers simultaneously.
# max_sync_files = 8 # max_sync_files = 8
@ -328,7 +324,7 @@ neighbors_only = true
# enabled = false # enabled = false
# Interval to output metrics periodically, e.g. "10s", "30s" or "60s". # Interval to output metrics periodically, e.g. "10s", "30s" or "60s".
# report_interval = "10s" # report_interval = ""
# File name to output metrics periodically. # File name to output metrics periodically.
# file_report_output = "" # file_report_output = ""

View File

@ -188,7 +188,7 @@ mine_contract_address = "0x6815F41019255e00D6F34aAB8397a6Af5b6D806f"
# If this limit is reached, the node will update its `shard_position` # If this limit is reached, the node will update its `shard_position`
# and store only half data. # and store only half data.
# #
db_max_num_sectors = 4000000000 db_max_num_sectors = 1000000000
# The format is <shard_id>/<shard_number>, where the shard number is 2^n. # The format is <shard_id>/<shard_number>, where the shard number is 2^n.
# This only applies if there is no stored shard config in db. # This only applies if there is no stored shard config in db.
@ -244,10 +244,6 @@ batcher_announcement_capacity = 100
# all files, and sufficient disk space is required. # all files, and sufficient disk space is required.
auto_sync_enabled = true auto_sync_enabled = true
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
# announcements in the whole network, which leads to high latency or even timeout to sync files.
neighbors_only = true
# Maximum number of files in sync from other peers simultaneously. # Maximum number of files in sync from other peers simultaneously.
# max_sync_files = 8 # max_sync_files = 8
@ -340,7 +336,7 @@ neighbors_only = true
# enabled = false # enabled = false
# Interval to output metrics periodically, e.g. "10s", "30s" or "60s". # Interval to output metrics periodically, e.g. "10s", "30s" or "60s".
# report_interval = "10s" # report_interval = ""
# File name to output metrics periodically. # File name to output metrics periodically.
# file_report_output = "" # file_report_output = ""

View File

@ -246,10 +246,6 @@
# all files, and sufficient disk space is required. # all files, and sufficient disk space is required.
# auto_sync_enabled = false # auto_sync_enabled = false
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
# announcements in the whole network, which leads to high latency or even timeout to sync files.
neighbors_only = true
# Maximum number of files in sync from other peers simultaneously. # Maximum number of files in sync from other peers simultaneously.
# max_sync_files = 8 # max_sync_files = 8
@ -342,7 +338,7 @@ neighbors_only = true
# enabled = false # enabled = false
# Interval to output metrics periodically, e.g. "10s", "30s" or "60s". # Interval to output metrics periodically, e.g. "10s", "30s" or "60s".
# report_interval = "10s" # report_interval = ""
# File name to output metrics periodically. # File name to output metrics periodically.
# file_report_output = "" # file_report_output = ""

View File

@ -1 +1 @@
bea58429e436e4952ae69235d9079cfc4ac5f3b3 75c251804a29ab22adced50d92478cf0baf834bc

View File

@ -40,8 +40,8 @@
"type": "function" "type": "function"
} }
], ],
"bytecode": "0x608060405234801561001057600080fd5b5060be8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122080db0b00f4b93cc320a2df449a74e503451a2675da518eff0fc5b7cf0ae8c90c64736f6c63430008100033", "bytecode": "0x608060405234801561001057600080fd5b5060be8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122044ebf96fcad90f0bbc521513843d64fbc182c5c913a8210a4d638393793be63064736f6c63430008100033",
"deployedBytecode": "0x6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122080db0b00f4b93cc320a2df449a74e503451a2675da518eff0fc5b7cf0ae8c90c64736f6c63430008100033", "deployedBytecode": "0x6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122044ebf96fcad90f0bbc521513843d64fbc182c5c913a8210a4d638393793be63064736f6c63430008100033",
"linkReferences": {}, "linkReferences": {},
"deployedLinkReferences": {} "deployedLinkReferences": {}
} }

View File

@ -70,8 +70,8 @@
"type": "function" "type": "function"
} }
], ],
"bytecode": "0x608060405234801561001057600080fd5b5060f18061001f6000396000f3fe60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220d2f22ec6a41724281bad8a768c241562927a5fcc8ba600f3b3784f584a68c65864736f6c63430008100033", "bytecode": "0x608060405234801561001057600080fd5b5060f18061001f6000396000f3fe60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ce57385afc7714a4000e530d1e1154d214fc1c0e2392abde201018635be1a2ab64736f6c63430008100033",
"deployedBytecode": "0x60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220d2f22ec6a41724281bad8a768c241562927a5fcc8ba600f3b3784f584a68c65864736f6c63430008100033", "deployedBytecode": "0x60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ce57385afc7714a4000e530d1e1154d214fc1c0e2392abde201018635be1a2ab64736f6c63430008100033",
"linkReferences": {}, "linkReferences": {},
"deployedLinkReferences": {} "deployedLinkReferences": {}
} }

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More