mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-11-03 08:07:27 +00:00
Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b857728660 | ||
|
|
cf11e1b68a | ||
|
|
46de15a345 | ||
|
|
88287333b5 | ||
|
|
df570e34d2 | ||
|
|
a3717d6bc1 | ||
|
|
55087eac7f | ||
|
|
9a1edae9a2 |
6
.github/workflows/abi.yml
vendored
6
.github/workflows/abi.yml
vendored
@ -12,14 +12,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Clone current repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Get the Git revision from the current repository
|
||||
id: get-rev
|
||||
run: echo "rev=$(cat ./storage-contracts-abis/0g-storage-contracts-rev)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Clone another repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
repository: '0glabs/0g-storage-contracts'
|
||||
path: '0g-storage-contracts'
|
||||
@ -45,4 +45,4 @@ jobs:
|
||||
|
||||
- name: Compare files
|
||||
run: |
|
||||
./scripts/check_abis.sh ./0g-storage-contracts/artifacts/
|
||||
./scripts/check_abis.sh ./0g-storage-contracts/artifacts/
|
||||
|
||||
4
.github/workflows/cc.yml
vendored
4
.github/workflows/cc.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
||||
swap-storage: true
|
||||
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
@ -57,4 +57,4 @@ jobs:
|
||||
# Disable to avoid CI failure as following:
|
||||
# ['error'] There was an error running the uploader: Error uploading to https://codecov.io: Error: There was an error fetching the storage
|
||||
# URL during POST: 404 - {'detail': ErrorDetail(string='Could not find a repository, try using repo upload token', code='not_found')}
|
||||
# fail_ci_if_error: true
|
||||
# fail_ci_if_error: true
|
||||
|
||||
8
.github/workflows/rust.yml
vendored
8
.github/workflows/rust.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Setup Rust (cache & toolchain)
|
||||
@ -37,7 +37,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Setup Rust (cache & toolchain)
|
||||
@ -53,7 +53,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Setup Rust (cache & toolchain)
|
||||
@ -69,4 +69,4 @@ jobs:
|
||||
command: clippy
|
||||
# blocks_in_conditions is triggered for tracing::instrument.
|
||||
# This can be removed after the fix is released.
|
||||
args: -- -D warnings
|
||||
args: -- -D warnings
|
||||
|
||||
5
.github/workflows/tests.yml
vendored
5
.github/workflows/tests.yml
vendored
@ -29,8 +29,9 @@ jobs:
|
||||
swap-storage: true
|
||||
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false # prevents writing the extraheader
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Rust (cache & toolchain)
|
||||
@ -66,4 +67,4 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test_logs
|
||||
path: /tmp/zgs_test_*
|
||||
path: /tmp/zgs_test_*
|
||||
|
||||
@ -16,12 +16,84 @@ use tracing::{trace, warn};
|
||||
|
||||
use crate::merkle_tree::MerkleTreeWrite;
|
||||
pub use crate::merkle_tree::{
|
||||
Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, ZERO_HASHES,
|
||||
Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, OptionalHash, ZERO_HASHES,
|
||||
};
|
||||
pub use crate::node_manager::{EmptyNodeDatabase, NodeDatabase, NodeManager, NodeTransaction};
|
||||
pub use proof::{Proof, RangeProof};
|
||||
pub use sha3::Sha3Algorithm;
|
||||
|
||||
// Helper functions for converting between H256 and OptionalHash types
|
||||
use ethereum_types::H256;
|
||||
|
||||
impl AppendMerkleTree<OptionalHash, Sha3Algorithm> {
|
||||
/// Convert a proof of OptionalHash to a proof of H256
|
||||
pub fn convert_proof_to_h256(proof: Proof<OptionalHash>) -> Result<Proof<H256>, anyhow::Error> {
|
||||
let lemma: Result<Vec<H256>, anyhow::Error> = proof
|
||||
.lemma()
|
||||
.iter()
|
||||
.map(|oh| {
|
||||
oh.0.ok_or_else(|| anyhow::anyhow!("Cannot convert null OptionalHash to H256"))
|
||||
})
|
||||
.collect();
|
||||
|
||||
Proof::new(lemma?, proof.path().to_vec())
|
||||
}
|
||||
|
||||
/// Convert a range proof of OptionalHash to a range proof of H256
|
||||
pub fn convert_range_proof_to_h256(
|
||||
proof: RangeProof<OptionalHash>,
|
||||
) -> Result<RangeProof<H256>, anyhow::Error> {
|
||||
Ok(RangeProof {
|
||||
left_proof: Self::convert_proof_to_h256(proof.left_proof)?,
|
||||
right_proof: Self::convert_proof_to_h256(proof.right_proof)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert a Proof<H256> to Proof<OptionalHash>
|
||||
pub fn convert_proof_from_h256(
|
||||
proof: Proof<H256>,
|
||||
) -> Result<Proof<OptionalHash>, anyhow::Error> {
|
||||
let lemma = proof
|
||||
.lemma()
|
||||
.iter()
|
||||
.map(|h| OptionalHash::some(*h))
|
||||
.collect();
|
||||
let path = proof.path().to_vec();
|
||||
Proof::new(lemma, path)
|
||||
}
|
||||
|
||||
/// Convert a RangeProof<H256> to RangeProof<OptionalHash>
|
||||
pub fn convert_range_proof_from_h256(
|
||||
range_proof: RangeProof<H256>,
|
||||
) -> Result<RangeProof<OptionalHash>, anyhow::Error> {
|
||||
Ok(RangeProof {
|
||||
left_proof: Self::convert_proof_from_h256(range_proof.left_proof)?,
|
||||
right_proof: Self::convert_proof_from_h256(range_proof.right_proof)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Generate a proof and convert it to H256
|
||||
pub fn gen_proof_h256(&self, leaf_index: usize) -> Result<Proof<H256>, anyhow::Error> {
|
||||
let proof = self.gen_proof(leaf_index)?;
|
||||
Self::convert_proof_to_h256(proof)
|
||||
}
|
||||
|
||||
/// Generate a range proof and convert it to H256
|
||||
pub fn gen_range_proof_h256(
|
||||
&self,
|
||||
start_index: usize,
|
||||
end_index: usize,
|
||||
) -> Result<RangeProof<H256>, anyhow::Error> {
|
||||
let proof = self.gen_range_proof(start_index, end_index)?;
|
||||
Self::convert_range_proof_to_h256(proof)
|
||||
}
|
||||
|
||||
/// Get the root as H256 (unwraps the OptionalHash)
|
||||
pub fn root_h256(&self) -> H256 {
|
||||
self.root().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> {
|
||||
/// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves.
|
||||
node_manager: NodeManager<E>,
|
||||
@ -148,7 +220,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
|
||||
pub fn append(&mut self, new_leaf: E) {
|
||||
let start_time = Instant::now();
|
||||
if new_leaf == E::null() {
|
||||
if new_leaf.is_null() {
|
||||
// appending null is not allowed.
|
||||
return;
|
||||
}
|
||||
@ -162,7 +234,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
|
||||
pub fn append_list(&mut self, leaf_list: Vec<E>) {
|
||||
let start_time = Instant::now();
|
||||
if leaf_list.contains(&E::null()) {
|
||||
if leaf_list.iter().any(|leaf| leaf.is_null()) {
|
||||
// appending null is not allowed.
|
||||
return;
|
||||
}
|
||||
@ -181,7 +253,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
/// TODO: Optimize to avoid storing the `null` nodes?
|
||||
pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> {
|
||||
let start_time = Instant::now();
|
||||
if subtree_root == E::null() {
|
||||
if subtree_root.is_null() {
|
||||
// appending null is not allowed.
|
||||
bail!("subtree_root is null");
|
||||
}
|
||||
@ -197,7 +269,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
|
||||
pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> {
|
||||
let start_time = Instant::now();
|
||||
if subtree_list.iter().any(|(_, root)| root == &E::null()) {
|
||||
if subtree_list.iter().any(|(_, root)| root.is_null()) {
|
||||
// appending null is not allowed.
|
||||
bail!("subtree_list contains null");
|
||||
}
|
||||
@ -217,7 +289,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
/// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves.
|
||||
pub fn update_last(&mut self, updated_leaf: E) {
|
||||
let start_time = Instant::now();
|
||||
if updated_leaf == E::null() {
|
||||
if updated_leaf.is_null() {
|
||||
// updating to null is not allowed.
|
||||
return;
|
||||
}
|
||||
@ -237,9 +309,9 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
/// Panics if the leaf is already set and different or the index is out of range.
|
||||
/// TODO: Batch computing intermediate nodes.
|
||||
pub fn fill_leaf(&mut self, index: usize, leaf: E) {
|
||||
if leaf == E::null() {
|
||||
if leaf.is_null() {
|
||||
// fill leaf with null is not allowed.
|
||||
} else if self.node(0, index) == E::null() {
|
||||
} else if self.node(0, index).is_null() {
|
||||
self.node_manager.start_transaction();
|
||||
self.update_node(0, index, leaf);
|
||||
self.recompute_after_fill_leaves(index, index + 1);
|
||||
@ -332,7 +404,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
// skip padding node.
|
||||
continue;
|
||||
}
|
||||
if self.node(i, position) == E::null() {
|
||||
if self.node(i, position).is_null() {
|
||||
self.update_node(i, position, data.clone());
|
||||
updated_nodes.push((i, position, data))
|
||||
} else if self.node(i, position) != data {
|
||||
@ -357,7 +429,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
if position >= self.leaves() {
|
||||
bail!("Out of bound: position={} end={}", position, self.leaves());
|
||||
}
|
||||
if self.node(0, position) != E::null() {
|
||||
if !self.node(0, position).is_null() {
|
||||
Ok(Some(self.node(0, position)))
|
||||
} else {
|
||||
// The leaf hash is unknown.
|
||||
@ -472,7 +544,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
// Note that if we are recompute a range of an existing tree,
|
||||
// we do not need to keep these possibly null parent. This is only saved
|
||||
// for the case of constructing a new tree from the leaves.
|
||||
let parent = if *left == E::null() || *right == E::null() {
|
||||
let parent = if left.is_null() || right.is_null() {
|
||||
E::null()
|
||||
} else {
|
||||
A::parent(left, right)
|
||||
@ -483,7 +555,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
assert_eq!(chunk.len(), 1);
|
||||
let r = &chunk[0];
|
||||
// Same as above.
|
||||
let parent = if *r == E::null() {
|
||||
let parent = if r.is_null() {
|
||||
E::null()
|
||||
} else {
|
||||
A::parent_single(r, height + self.leaf_height)
|
||||
@ -501,8 +573,8 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
||||
match parent_index.cmp(&self.layer_len(height + 1)) {
|
||||
Ordering::Less => {
|
||||
// We do not overwrite with null.
|
||||
if parent != E::null() {
|
||||
if self.node(height + 1, parent_index) == E::null()
|
||||
if !parent.is_null() {
|
||||
if self.node(height + 1, parent_index).is_null()
|
||||
// The last node in a layer can be updated.
|
||||
|| (self.node(height + 1, parent_index) != parent
|
||||
&& parent_index == self.layer_len(height + 1) - 1)
|
||||
@ -741,7 +813,7 @@ impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> {
|
||||
type E = E;
|
||||
fn node(&self, layer: usize, index: usize) -> Self::E {
|
||||
match self.delta_nodes.get(layer, index).expect("range checked") {
|
||||
Some(node) if *node != E::null() => node.clone(),
|
||||
Some(node) if !node.is_null() => node.clone(),
|
||||
_ => self
|
||||
.node_manager
|
||||
.get_node(layer, index)
|
||||
@ -798,7 +870,7 @@ macro_rules! ensure_eq {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::merkle_tree::MerkleTreeRead;
|
||||
use crate::merkle_tree::{MerkleTreeRead, OptionalHash};
|
||||
|
||||
use crate::sha3::Sha3Algorithm;
|
||||
use crate::AppendMerkleTree;
|
||||
@ -812,21 +884,30 @@ mod tests {
|
||||
for _ in 0..entry_len {
|
||||
data.push(H256::random());
|
||||
}
|
||||
let mut merkle =
|
||||
AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None);
|
||||
merkle.append_list(data.clone());
|
||||
let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
|
||||
vec![OptionalHash::some(H256::zero())],
|
||||
0,
|
||||
None,
|
||||
);
|
||||
merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
|
||||
merkle.commit(Some(0));
|
||||
verify(&data, &mut merkle);
|
||||
|
||||
data.push(H256::random());
|
||||
merkle.append(*data.last().unwrap());
|
||||
merkle.append(OptionalHash::some(*data.last().unwrap()));
|
||||
merkle.commit(Some(1));
|
||||
verify(&data, &mut merkle);
|
||||
|
||||
for _ in 0..6 {
|
||||
data.push(H256::random());
|
||||
}
|
||||
merkle.append_list(data[data.len() - 6..].to_vec());
|
||||
merkle.append_list(
|
||||
data[data.len() - 6..]
|
||||
.iter()
|
||||
.copied()
|
||||
.map(OptionalHash::some)
|
||||
.collect(),
|
||||
);
|
||||
merkle.commit(Some(2));
|
||||
verify(&data, &mut merkle);
|
||||
}
|
||||
@ -840,9 +921,12 @@ mod tests {
|
||||
for _ in 0..entry_len {
|
||||
data.push(H256::random());
|
||||
}
|
||||
let mut merkle =
|
||||
AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None);
|
||||
merkle.append_list(data.clone());
|
||||
let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
|
||||
vec![OptionalHash::some(H256::zero())],
|
||||
0,
|
||||
None,
|
||||
);
|
||||
merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
|
||||
merkle.commit(Some(0));
|
||||
|
||||
for i in (0..data.len()).step_by(6) {
|
||||
@ -850,12 +934,17 @@ mod tests {
|
||||
let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap();
|
||||
let mut new_data = Vec::new();
|
||||
for _ in 0..3 {
|
||||
new_data.push(H256::random());
|
||||
new_data.push(OptionalHash::some(H256::random()));
|
||||
}
|
||||
merkle.append_list(new_data);
|
||||
let seq = i as u64 / 6 + 1;
|
||||
merkle.commit(Some(seq));
|
||||
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], i + 1);
|
||||
let optional_data: Vec<OptionalHash> = data[i..end]
|
||||
.iter()
|
||||
.copied()
|
||||
.map(OptionalHash::some)
|
||||
.collect();
|
||||
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, i + 1);
|
||||
assert!(r.is_ok(), "{:?}", r);
|
||||
merkle.fill_with_range_proof(range_proof).unwrap();
|
||||
}
|
||||
@ -865,7 +954,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_proof_at_version() {
|
||||
let n = [2, 255, 256, 257];
|
||||
let mut merkle = AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None);
|
||||
let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
|
||||
vec![OptionalHash::some(H256::zero())],
|
||||
0,
|
||||
None,
|
||||
);
|
||||
let mut start_pos = 0;
|
||||
|
||||
for (tx_seq, &entry_len) in n.iter().enumerate() {
|
||||
@ -873,7 +966,7 @@ mod tests {
|
||||
for _ in 0..entry_len {
|
||||
data.push(H256::random());
|
||||
}
|
||||
merkle.append_list(data.clone());
|
||||
merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
|
||||
merkle.commit(Some(tx_seq as u64));
|
||||
for i in (0..data.len()).step_by(6) {
|
||||
let end = std::cmp::min(start_pos + i + 3, data.len());
|
||||
@ -882,7 +975,12 @@ mod tests {
|
||||
.unwrap()
|
||||
.gen_range_proof(start_pos + i + 1, start_pos + end + 1)
|
||||
.unwrap();
|
||||
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], start_pos + i + 1);
|
||||
let optional_data: Vec<OptionalHash> = data[i..end]
|
||||
.iter()
|
||||
.copied()
|
||||
.map(OptionalHash::some)
|
||||
.collect();
|
||||
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, start_pos + i + 1);
|
||||
assert!(r.is_ok(), "{:?}", r);
|
||||
merkle.fill_with_range_proof(range_proof).unwrap();
|
||||
}
|
||||
@ -891,16 +989,21 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(data: &[H256], merkle: &mut AppendMerkleTree<H256, Sha3Algorithm>) {
|
||||
fn verify(data: &[H256], merkle: &mut AppendMerkleTree<OptionalHash, Sha3Algorithm>) {
|
||||
for (i, item) in data.iter().enumerate() {
|
||||
let proof = merkle.gen_proof(i + 1).unwrap();
|
||||
let r = merkle.validate(&proof, item, i + 1);
|
||||
let r = merkle.validate(&proof, &OptionalHash::some(*item), i + 1);
|
||||
assert!(matches!(r, Ok(true)), "{:?}", r);
|
||||
}
|
||||
for i in (0..data.len()).step_by(6) {
|
||||
let end = std::cmp::min(i + 3, data.len());
|
||||
let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap();
|
||||
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], i + 1);
|
||||
let optional_data: Vec<OptionalHash> = data[i..end]
|
||||
.iter()
|
||||
.copied()
|
||||
.map(OptionalHash::some)
|
||||
.collect();
|
||||
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, i + 1);
|
||||
assert!(r.is_ok(), "{:?}", r);
|
||||
merkle.fill_with_range_proof(range_proof).unwrap();
|
||||
}
|
||||
|
||||
@ -8,6 +8,173 @@ use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use tracing::trace;
|
||||
|
||||
/// A wrapper around Option<H256> that properly handles null hashes
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub struct OptionalHash(pub Option<H256>);
|
||||
|
||||
impl OptionalHash {
|
||||
pub fn some(hash: H256) -> Self {
|
||||
OptionalHash(Some(hash))
|
||||
}
|
||||
|
||||
pub fn none() -> Self {
|
||||
OptionalHash(None)
|
||||
}
|
||||
|
||||
pub fn is_some(&self) -> bool {
|
||||
self.0.is_some()
|
||||
}
|
||||
|
||||
pub fn is_none(&self) -> bool {
|
||||
self.0.is_none()
|
||||
}
|
||||
|
||||
pub fn unwrap(&self) -> H256 {
|
||||
self.0.unwrap()
|
||||
}
|
||||
|
||||
pub fn unwrap_or(&self, default: H256) -> H256 {
|
||||
self.0.unwrap_or(default)
|
||||
}
|
||||
|
||||
pub fn as_ref(&self) -> Option<&H256> {
|
||||
self.0.as_ref()
|
||||
}
|
||||
|
||||
/// Create OptionalHash from a byte slice
|
||||
pub fn from_slice(bytes: &[u8]) -> Result<Self, &'static str> {
|
||||
if bytes.len() != 32 {
|
||||
return Err("Invalid byte length for H256");
|
||||
}
|
||||
let mut hash_bytes = [0u8; 32];
|
||||
hash_bytes.copy_from_slice(bytes);
|
||||
Ok(OptionalHash::some(H256(hash_bytes)))
|
||||
}
|
||||
|
||||
/// Convert to bytes for storage (33 bytes: 1 flag + 32 hash)
|
||||
pub fn as_bytes(&self) -> [u8; 33] {
|
||||
let mut bytes = [0u8; 33];
|
||||
match &self.0 {
|
||||
Some(hash) => {
|
||||
bytes[0] = 1; // Some flag
|
||||
bytes[1..].copy_from_slice(hash.as_ref());
|
||||
}
|
||||
None => {
|
||||
bytes[0] = 0; // None flag
|
||||
// bytes[1..] remain zeros
|
||||
}
|
||||
}
|
||||
bytes
|
||||
}
|
||||
|
||||
/// Create OptionalHash from storage bytes (33 bytes)
|
||||
pub fn from_bytes(bytes: &[u8; 33]) -> Result<Self, &'static str> {
|
||||
match bytes[0] {
|
||||
0 => Ok(OptionalHash::none()),
|
||||
1 => {
|
||||
let mut hash_bytes = [0u8; 32];
|
||||
hash_bytes.copy_from_slice(&bytes[1..]);
|
||||
Ok(OptionalHash::some(H256(hash_bytes)))
|
||||
}
|
||||
_ => Err("Invalid flag byte for OptionalHash"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add From conversions for easier usage
|
||||
impl From<H256> for OptionalHash {
|
||||
fn from(hash: H256) -> Self {
|
||||
OptionalHash::some(hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<H256>> for OptionalHash {
|
||||
fn from(opt: Option<H256>) -> Self {
|
||||
OptionalHash(opt)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OptionalHash> for Option<H256> {
|
||||
fn from(opt_hash: OptionalHash) -> Self {
|
||||
opt_hash.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for OptionalHash {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.0.as_ref().unwrap().as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<[u8]> for OptionalHash {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
if self.0.is_none() {
|
||||
self.0 = Some(H256::zero());
|
||||
}
|
||||
self.0.as_mut().unwrap().as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl Encode for OptionalHash {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn ssz_fixed_len() -> usize {
|
||||
33 // 1 byte for Some/None flag + 32 bytes for hash
|
||||
}
|
||||
|
||||
fn ssz_bytes_len(&self) -> usize {
|
||||
33
|
||||
}
|
||||
|
||||
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
||||
match &self.0 {
|
||||
Some(hash) => {
|
||||
buf.push(1); // Some flag
|
||||
hash.ssz_append(buf);
|
||||
}
|
||||
None => {
|
||||
buf.push(0); // None flag
|
||||
buf.extend_from_slice(&[0u8; 32]); // Padding zeros
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Decode for OptionalHash {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn ssz_fixed_len() -> usize {
|
||||
33 // 1 byte for Some/None flag + 32 bytes for hash
|
||||
}
|
||||
|
||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
if bytes.len() != 33 {
|
||||
return Err(ssz::DecodeError::InvalidByteLength {
|
||||
len: bytes.len(),
|
||||
expected: 33,
|
||||
});
|
||||
}
|
||||
|
||||
match bytes[0] {
|
||||
0 => Ok(OptionalHash::none()),
|
||||
1 => {
|
||||
let hash = H256::from_ssz_bytes(&bytes[1..])?;
|
||||
Ok(OptionalHash::some(hash))
|
||||
}
|
||||
_ => Err(ssz::DecodeError::BytesInvalid(
|
||||
"Invalid flag byte for OptionalHash".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for OptionalHash {}
|
||||
unsafe impl Sync for OptionalHash {}
|
||||
|
||||
pub trait HashElement:
|
||||
Clone + Debug + Eq + Hash + AsRef<[u8]> + AsMut<[u8]> + Decode + Encode + Send + Sync
|
||||
{
|
||||
@ -18,13 +185,28 @@ pub trait HashElement:
|
||||
}
|
||||
}
|
||||
|
||||
impl HashElement for OptionalHash {
|
||||
fn end_pad(height: usize) -> Self {
|
||||
OptionalHash::some(ZERO_HASHES[height])
|
||||
}
|
||||
|
||||
fn null() -> Self {
|
||||
OptionalHash::none()
|
||||
}
|
||||
|
||||
fn is_null(&self) -> bool {
|
||||
self.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
// Keep the H256 implementation for backward compatibility
|
||||
impl HashElement for H256 {
|
||||
fn end_pad(height: usize) -> Self {
|
||||
ZERO_HASHES[height]
|
||||
}
|
||||
|
||||
fn null() -> Self {
|
||||
H256::repeat_byte(1)
|
||||
H256::repeat_byte(0x01)
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,7 +252,7 @@ pub trait MerkleTreeRead {
|
||||
self.leaves()
|
||||
);
|
||||
}
|
||||
if self.node(0, leaf_index) == Self::E::null() {
|
||||
if self.node(0, leaf_index).is_null() {
|
||||
bail!("Not ready to generate proof for leaf_index={}", leaf_index);
|
||||
}
|
||||
if self.height() == 1 {
|
||||
@ -102,7 +284,7 @@ pub trait MerkleTreeRead {
|
||||
index_in_layer >>= 1;
|
||||
}
|
||||
lemma.push(self.root());
|
||||
if lemma.contains(&Self::E::null()) {
|
||||
if lemma.iter().any(|e| e.is_null()) {
|
||||
bail!(
|
||||
"Not enough data to generate proof, lemma={:?} path={:?}",
|
||||
lemma,
|
||||
|
||||
@ -205,7 +205,7 @@ impl<E: HashElement> RangeProof<E> {
|
||||
// Avoid copying the first layer by working directly with the slice
|
||||
let mut children_layer = Vec::new();
|
||||
let mut current_layer = range_leaves;
|
||||
|
||||
|
||||
for height in 0..(tree_depth - 1) {
|
||||
let mut parent_layer = Vec::new();
|
||||
let start_index = if !self.left_proof.path()[height] {
|
||||
@ -231,7 +231,7 @@ impl<E: HashElement> RangeProof<E> {
|
||||
children_layer = parent_layer;
|
||||
current_layer = &children_layer;
|
||||
}
|
||||
|
||||
|
||||
// If no iterations occurred, the root should be computed from the original range_leaves
|
||||
if children_layer.is_empty() {
|
||||
ensure_eq!(range_leaves.len(), 1);
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use crate::merkle_tree::ZERO_HASHES;
|
||||
use crate::merkle_tree::{OptionalHash, ZERO_HASHES};
|
||||
use crate::{Algorithm, HashElement};
|
||||
use ethereum_types::H256;
|
||||
use once_cell::sync::Lazy;
|
||||
@ -50,3 +50,22 @@ impl Algorithm<H256> for Sha3Algorithm {
|
||||
Self::leaf_raw(data)
|
||||
}
|
||||
}
|
||||
|
||||
impl Algorithm<OptionalHash> for Sha3Algorithm {
|
||||
fn parent(left: &OptionalHash, right: &OptionalHash) -> OptionalHash {
|
||||
match (&left.0, &right.0) {
|
||||
(Some(l), Some(r)) => {
|
||||
// Use the H256 implementation directly to ensure identical logic
|
||||
let result = <Self as Algorithm<H256>>::parent(l, r);
|
||||
OptionalHash::some(result)
|
||||
}
|
||||
_ => OptionalHash::none(),
|
||||
}
|
||||
}
|
||||
|
||||
fn leaf(data: &[u8]) -> OptionalHash {
|
||||
// Use the H256 implementation directly to ensure identical logic
|
||||
let result = <Self as Algorithm<H256>>::leaf(data);
|
||||
OptionalHash::some(result)
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,6 +26,10 @@ pub type MineContextMessage = Option<PoraPuzzle>;
|
||||
lazy_static! {
|
||||
pub static ref EMPTY_HASH: H256 =
|
||||
H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap();
|
||||
pub static ref COMPUTE_WORKER_CONTEXT_CALLER: Address =
|
||||
"0x000000000000000000000000000000000000000A"
|
||||
.parse()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
const PORA_VERSION: u64 = 1;
|
||||
@ -139,6 +143,8 @@ impl MineContextWatcher {
|
||||
}
|
||||
|
||||
let miner_id = self.miner_id.0;
|
||||
|
||||
// Use eth_call with specific caller address for read-only access
|
||||
let WorkerContext {
|
||||
context,
|
||||
pora_target,
|
||||
@ -147,6 +153,7 @@ impl MineContextWatcher {
|
||||
} = self
|
||||
.mine_contract
|
||||
.compute_worker_context(miner_id)
|
||||
.from(*COMPUTE_WORKER_CONTEXT_CALLER)
|
||||
.call()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to query mining context: {:?}", e))?;
|
||||
|
||||
@ -1017,8 +1017,8 @@ impl std::convert::From<Request> for OutboundRequest {
|
||||
/// The type of RPC responses the Behaviour informs it has received, and allows for sending.
|
||||
///
|
||||
// NOTE: This is an application-level wrapper over the lower network level responses that can be
|
||||
// sent. The main difference is the absense of Pong and Metadata, which don't leave the
|
||||
// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and
|
||||
// sent. The main difference is the absence of Pong and Metadata, which don't leave the
|
||||
// Behaviour. For all protocol responses managed by RPC see `RPCResponse` and
|
||||
// `RPCCodedResponse`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Response {
|
||||
|
||||
@ -299,7 +299,7 @@ impl RouterService {
|
||||
}
|
||||
NetworkMessage::Publish { messages } => {
|
||||
if self.libp2p.swarm.connected_peers().next().is_none() {
|
||||
// this is a boardcast message, when current node doesn't have any peers connected, try to connect any peer in config
|
||||
// this is a broadcast message, when current node doesn't have any peers connected, try to connect any peer in config
|
||||
for multiaddr in &self.config.libp2p_nodes {
|
||||
match Swarm::dial(&mut self.libp2p.swarm, multiaddr.clone()) {
|
||||
Ok(()) => {
|
||||
|
||||
@ -2,7 +2,7 @@ mod proof;
|
||||
|
||||
use anyhow::{anyhow, bail, Error};
|
||||
use append_merkle::{
|
||||
AppendMerkleTree, Proof as RawProof, RangeProof as RawRangeProof, Sha3Algorithm,
|
||||
AppendMerkleTree, OptionalHash, Proof as RawProof, RangeProof as RawRangeProof, Sha3Algorithm,
|
||||
};
|
||||
use ethereum_types::{Address, H256, U256};
|
||||
use merkle_light::merkle::MerkleTree;
|
||||
@ -32,7 +32,7 @@ pub type DataRoot = H256;
|
||||
|
||||
pub type FlowProof = RawProof<H256>;
|
||||
pub type FlowRangeProof = RawRangeProof<H256>;
|
||||
pub type Merkle = AppendMerkleTree<H256, Sha3Algorithm>;
|
||||
pub type Merkle = AppendMerkleTree<OptionalHash, Sha3Algorithm>;
|
||||
|
||||
// Each chunk is 32 bytes.
|
||||
pub const CHUNK_SIZE: usize = 256;
|
||||
|
||||
@ -12,7 +12,9 @@ use crate::log_store::{
|
||||
use crate::{try_option, ZgsKeyValueDB};
|
||||
use any::Any;
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use append_merkle::{MerkleTreeRead, NodeDatabase, NodeTransaction};
|
||||
use append_merkle::{
|
||||
AppendMerkleTree, MerkleTreeRead, NodeDatabase, NodeTransaction, OptionalHash,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use kvdb::DBTransaction;
|
||||
use parking_lot::RwLock;
|
||||
@ -72,7 +74,8 @@ impl FlowStore {
|
||||
batch_index
|
||||
)
|
||||
})?;
|
||||
merkle.gen_proof(sector_index)
|
||||
let optional_proof = merkle.gen_proof(sector_index)?;
|
||||
AppendMerkleTree::convert_proof_to_h256(optional_proof)
|
||||
}
|
||||
|
||||
pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> {
|
||||
@ -577,12 +580,12 @@ fn layer_size_key(layer: usize) -> Vec<u8> {
|
||||
|
||||
pub struct NodeDBTransaction(DBTransaction);
|
||||
|
||||
impl NodeDatabase<DataRoot> for FlowDBStore {
|
||||
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<DataRoot>> {
|
||||
impl NodeDatabase<OptionalHash> for FlowDBStore {
|
||||
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
|
||||
Ok(self
|
||||
.kvdb
|
||||
.get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))?
|
||||
.map(|v| DataRoot::from_slice(&v)))
|
||||
.map(|v| OptionalHash::from_bytes(v.as_slice().try_into().unwrap()).unwrap()))
|
||||
}
|
||||
|
||||
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
|
||||
@ -592,11 +595,11 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
|
||||
}
|
||||
}
|
||||
|
||||
fn start_transaction(&self) -> Box<dyn NodeTransaction<DataRoot>> {
|
||||
fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
|
||||
Box::new(NodeDBTransaction(self.kvdb.transaction()))
|
||||
}
|
||||
|
||||
fn commit(&self, tx: Box<dyn NodeTransaction<DataRoot>>) -> Result<()> {
|
||||
fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
|
||||
let db_tx: Box<NodeDBTransaction> = tx
|
||||
.into_any()
|
||||
.downcast()
|
||||
@ -605,21 +608,21 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeTransaction<DataRoot> for NodeDBTransaction {
|
||||
fn save_node(&mut self, layer: usize, pos: usize, node: &DataRoot) {
|
||||
impl NodeTransaction<OptionalHash> for NodeDBTransaction {
|
||||
fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
|
||||
self.0.put(
|
||||
COL_FLOW_MPT_NODES,
|
||||
&encode_mpt_node_key(layer, pos),
|
||||
node.as_bytes(),
|
||||
&node.as_bytes(),
|
||||
);
|
||||
}
|
||||
|
||||
fn save_node_list(&mut self, nodes: &[(usize, usize, &DataRoot)]) {
|
||||
fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
|
||||
for (layer_index, position, data) in nodes {
|
||||
self.0.put(
|
||||
COL_FLOW_MPT_NODES,
|
||||
&encode_mpt_node_key(*layer_index, *position),
|
||||
data.as_bytes(),
|
||||
&data.as_bytes(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,9 +204,9 @@ impl EntryBatch {
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(
|
||||
try_option!(self.to_merkle_tree(is_first_chunk)?).root(),
|
||||
))
|
||||
Ok(try_option!(self.to_merkle_tree(is_first_chunk)?)
|
||||
.root()
|
||||
.into())
|
||||
}
|
||||
|
||||
pub fn submit_seal_result(&mut self, answer: SealAnswer) -> Result<()> {
|
||||
@ -243,7 +243,7 @@ impl EntryBatch {
|
||||
|
||||
pub fn to_merkle_tree(&self, is_first_chunk: bool) -> Result<Option<Merkle>> {
|
||||
let initial_leaves = if is_first_chunk {
|
||||
vec![H256::zero()]
|
||||
vec![H256::zero().into()]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@ -256,7 +256,7 @@ impl EntryBatch {
|
||||
);
|
||||
merkle.append_list(data_to_merkle_leaves(&leaf_data).expect("aligned"));
|
||||
}
|
||||
merkle.append_subtree(subtree.subtree_height, subtree.root)?;
|
||||
merkle.append_subtree(subtree.subtree_height, subtree.root.into())?;
|
||||
}
|
||||
if merkle.leaves() != SECTORS_PER_LOAD {
|
||||
let leaf_data = try_option!(
|
||||
|
||||
@ -9,7 +9,7 @@ use crate::log_store::{
|
||||
};
|
||||
use crate::{try_option, ZgsKeyValueDB};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use append_merkle::{Algorithm, MerkleTreeRead, Sha3Algorithm};
|
||||
use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
|
||||
use ethereum_types::H256;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
use merkle_light::merkle::{log2_pow2, MerkleTree};
|
||||
@ -55,13 +55,10 @@ const PAD_DELAY: Duration = Duration::from_secs(2);
|
||||
// Process at most 1M entries (256MB) pad data at a time.
|
||||
const PAD_MAX_SIZE: usize = 1 << 20;
|
||||
|
||||
static PAD_SEGMENT_ROOT: Lazy<H256> = Lazy::new(|| {
|
||||
Merkle::new(
|
||||
data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap(),
|
||||
0,
|
||||
None,
|
||||
)
|
||||
.root()
|
||||
static PAD_SEGMENT_ROOT: Lazy<OptionalHash> = Lazy::new(|| {
|
||||
let h256_leaves = data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap();
|
||||
|
||||
Merkle::new(h256_leaves, 0, None).root()
|
||||
});
|
||||
pub struct UpdateFlowMessage {
|
||||
pub pad_data: usize,
|
||||
@ -130,7 +127,8 @@ impl MerkleManager {
|
||||
|
||||
fn try_initialize(&mut self, flow_store: &FlowStore) -> Result<()> {
|
||||
if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 {
|
||||
self.last_chunk_merkle.append(H256::zero());
|
||||
self.last_chunk_merkle
|
||||
.append(OptionalHash::some(H256::zero()));
|
||||
self.pora_chunks_merkle
|
||||
.update_last(self.last_chunk_merkle.root());
|
||||
} else if self.last_chunk_merkle.leaves() != 0 {
|
||||
@ -222,9 +220,17 @@ impl LogStoreChunkWrite for LogManager {
|
||||
self.append_entries(flow_entry_array, &mut merkle)?;
|
||||
|
||||
if let Some(file_proof) = maybe_file_proof {
|
||||
// Convert H256 proof to OptionalHash proof
|
||||
let optional_proof = AppendMerkleTree::convert_proof_from_h256(file_proof)?;
|
||||
// Convert H256 merkle nodes to OptionalHash merkle nodes
|
||||
let optional_nodes: Vec<(usize, OptionalHash)> = tx
|
||||
.merkle_nodes
|
||||
.into_iter()
|
||||
.map(|(depth, hash)| (depth, OptionalHash::some(hash)))
|
||||
.collect();
|
||||
merkle.pora_chunks_merkle.fill_with_file_proof(
|
||||
file_proof,
|
||||
tx.merkle_nodes,
|
||||
optional_proof,
|
||||
optional_nodes,
|
||||
tx.start_entry_index,
|
||||
)?;
|
||||
}
|
||||
@ -424,9 +430,9 @@ impl LogStoreWrite for LogManager {
|
||||
// `merkle` is used in `validate_range_proof`.
|
||||
let mut merkle = self.merkle.write();
|
||||
if valid {
|
||||
merkle
|
||||
.pora_chunks_merkle
|
||||
.fill_with_range_proof(data.proof.clone())?;
|
||||
merkle.pora_chunks_merkle.fill_with_range_proof(
|
||||
AppendMerkleTree::convert_range_proof_from_h256(data.proof.clone())?,
|
||||
)?;
|
||||
}
|
||||
Ok(valid)
|
||||
}
|
||||
@ -637,7 +643,7 @@ impl LogStoreRead for LogManager {
|
||||
let tx = self
|
||||
.get_tx_by_seq_number(tx_seq)?
|
||||
.ok_or_else(|| anyhow!("tx missing"))?;
|
||||
let leaves = data_to_merkle_leaves(&data.chunks.data)?;
|
||||
let leaves = data_to_merkle_leaves_h256(&data.chunks.data)?;
|
||||
data.proof.validate::<Sha3Algorithm>(
|
||||
&leaves,
|
||||
(data.chunks.start_index + tx.start_entry_index) as usize,
|
||||
@ -646,7 +652,7 @@ impl LogStoreRead for LogManager {
|
||||
.merkle
|
||||
.read_recursive()
|
||||
.pora_chunks_merkle
|
||||
.check_root(&data.proof.root()))
|
||||
.check_root(&data.proof.root().into()))
|
||||
}
|
||||
|
||||
fn get_sync_progress(&self) -> Result<Option<(u64, H256)>> {
|
||||
@ -686,7 +692,7 @@ impl LogStoreRead for LogManager {
|
||||
fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> {
|
||||
let merkle = self.merkle.read_recursive();
|
||||
Ok((
|
||||
merkle.pora_chunks_merkle.root(),
|
||||
merkle.pora_chunks_merkle.root().unwrap(),
|
||||
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64,
|
||||
))
|
||||
}
|
||||
@ -871,7 +877,9 @@ impl LogManager {
|
||||
None => self.gen_proof_at_version(flow_index, None),
|
||||
Some(root) => {
|
||||
let merkle = self.merkle.read_recursive();
|
||||
let tx_seq = merkle.pora_chunks_merkle.tx_seq_at_root(&root)?;
|
||||
let tx_seq = merkle
|
||||
.pora_chunks_merkle
|
||||
.tx_seq_at_root(&OptionalHash::from(root))?;
|
||||
self.gen_proof_at_version(flow_index, Some(tx_seq))
|
||||
}
|
||||
}
|
||||
@ -885,11 +893,15 @@ impl LogManager {
|
||||
let merkle = self.merkle.read_recursive();
|
||||
let seg_index = sector_to_segment(flow_index);
|
||||
let top_proof = match maybe_tx_seq {
|
||||
None => merkle.pora_chunks_merkle.gen_proof(seg_index)?,
|
||||
Some(tx_seq) => merkle
|
||||
.pora_chunks_merkle
|
||||
.at_version(tx_seq)?
|
||||
.gen_proof(seg_index)?,
|
||||
None => AppendMerkleTree::convert_proof_to_h256(
|
||||
merkle.pora_chunks_merkle.gen_proof(seg_index)?,
|
||||
)?,
|
||||
Some(tx_seq) => AppendMerkleTree::convert_proof_to_h256(
|
||||
merkle
|
||||
.pora_chunks_merkle
|
||||
.at_version(tx_seq)?
|
||||
.gen_proof(seg_index)?,
|
||||
)?,
|
||||
};
|
||||
|
||||
// TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so
|
||||
@ -906,13 +918,17 @@ impl LogManager {
|
||||
.gen_proof_in_batch(seg_index, flow_index as usize % PORA_CHUNK_SIZE)?
|
||||
} else {
|
||||
match maybe_tx_seq {
|
||||
None => merkle
|
||||
.last_chunk_merkle
|
||||
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
|
||||
Some(tx_version) => merkle
|
||||
.last_chunk_merkle
|
||||
.at_version(tx_version)?
|
||||
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
|
||||
None => AppendMerkleTree::convert_proof_to_h256(
|
||||
merkle
|
||||
.last_chunk_merkle
|
||||
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
|
||||
)?,
|
||||
Some(tx_version) => AppendMerkleTree::convert_proof_to_h256(
|
||||
merkle
|
||||
.last_chunk_merkle
|
||||
.at_version(tx_version)?
|
||||
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
|
||||
)?,
|
||||
}
|
||||
};
|
||||
entry_proof(&top_proof, &sub_proof)
|
||||
@ -938,9 +954,10 @@ impl LogManager {
|
||||
if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE {
|
||||
merkle
|
||||
.last_chunk_merkle
|
||||
.append_subtree(subtree_depth, subtree_root)?;
|
||||
.append_subtree(subtree_depth, OptionalHash::some(subtree_root))?;
|
||||
if merkle.last_chunk_merkle.leaves() == subtree_size {
|
||||
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
|
||||
|
||||
merkle
|
||||
.pora_chunks_merkle
|
||||
.append_subtree(1, merkle.last_chunk_merkle.root())?;
|
||||
@ -960,9 +977,10 @@ impl LogManager {
|
||||
// the chunks boundary.
|
||||
assert_eq!(merkle.last_chunk_merkle.leaves(), 0);
|
||||
assert!(subtree_size >= PORA_CHUNK_SIZE);
|
||||
merkle
|
||||
.pora_chunks_merkle
|
||||
.append_subtree(subtree_depth - log2_pow2(PORA_CHUNK_SIZE), subtree_root)?;
|
||||
merkle.pora_chunks_merkle.append_subtree(
|
||||
subtree_depth - log2_pow2(PORA_CHUNK_SIZE),
|
||||
OptionalHash::some(subtree_root),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -997,9 +1015,8 @@ impl LogManager {
|
||||
let mut completed_chunk_index = None;
|
||||
if pad_data.len() < last_chunk_pad {
|
||||
is_full_empty = false;
|
||||
merkle
|
||||
.last_chunk_merkle
|
||||
.append_list(data_to_merkle_leaves(&pad_data)?);
|
||||
let pad_leaves = data_to_merkle_leaves(&pad_data)?;
|
||||
merkle.last_chunk_merkle.append_list(pad_leaves);
|
||||
merkle
|
||||
.pora_chunks_merkle
|
||||
.update_last(merkle.last_chunk_merkle.root());
|
||||
@ -1007,9 +1024,8 @@ impl LogManager {
|
||||
if last_chunk_pad != 0 {
|
||||
is_full_empty = false;
|
||||
// Pad the last chunk.
|
||||
merkle
|
||||
.last_chunk_merkle
|
||||
.append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?);
|
||||
let last_chunk_leaves = data_to_merkle_leaves(&pad_data[..last_chunk_pad])?;
|
||||
merkle.last_chunk_merkle.append_list(last_chunk_leaves);
|
||||
merkle
|
||||
.pora_chunks_merkle
|
||||
.update_last(merkle.last_chunk_merkle.root());
|
||||
@ -1019,7 +1035,7 @@ impl LogManager {
|
||||
// Pad with more complete chunks.
|
||||
let mut start_index = last_chunk_pad / ENTRY_SIZE;
|
||||
while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE {
|
||||
merkle.pora_chunks_merkle.append(*PAD_SEGMENT_ROOT);
|
||||
merkle.pora_chunks_merkle.append(PAD_SEGMENT_ROOT.clone());
|
||||
start_index += PORA_CHUNK_SIZE;
|
||||
}
|
||||
assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);
|
||||
@ -1104,7 +1120,7 @@ impl LogManager {
|
||||
if chunk_index < merkle.pora_chunks_merkle.leaves() as u64 {
|
||||
merkle
|
||||
.pora_chunks_merkle
|
||||
.fill_leaf(chunk_index as usize, chunk_root);
|
||||
.fill_leaf(chunk_index as usize, OptionalHash::some(chunk_root));
|
||||
} else {
|
||||
// TODO(zz): This assumption may be false in the future.
|
||||
unreachable!("We always insert tx nodes before put_chunks");
|
||||
@ -1253,7 +1269,7 @@ impl LogManager {
|
||||
let mut to_insert_subtrees = Vec::new();
|
||||
let mut start_index = 0;
|
||||
for (subtree_height, root) in subtree_list {
|
||||
to_insert_subtrees.push((start_index, subtree_height, root));
|
||||
to_insert_subtrees.push((start_index, subtree_height, root.unwrap()));
|
||||
start_index += 1 << (subtree_height - 1);
|
||||
}
|
||||
self.flow_store
|
||||
@ -1301,14 +1317,14 @@ macro_rules! try_option {
|
||||
/// This should be called with input checked.
|
||||
pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result<FileMerkleTree> {
|
||||
Ok(FileMerkleTree::new(
|
||||
data_to_merkle_leaves(leaf_data)?
|
||||
data_to_merkle_leaves_h256(leaf_data)?
|
||||
.into_iter()
|
||||
.map(|h| h.0)
|
||||
.collect::<Vec<[u8; 32]>>(),
|
||||
))
|
||||
}
|
||||
|
||||
pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> {
|
||||
pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<OptionalHash>> {
|
||||
let start_time = Instant::now();
|
||||
if leaf_data.len() % ENTRY_SIZE != 0 {
|
||||
bail!("merkle_tree: mismatched data size");
|
||||
@ -1331,6 +1347,12 @@ pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> {
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
/// Convenience function that combines data_to_merkle_leaves and conversion to H256
|
||||
pub fn data_to_merkle_leaves_h256(leaf_data: &[u8]) -> Result<Vec<H256>> {
|
||||
let optional_hashes = data_to_merkle_leaves(leaf_data)?;
|
||||
Ok(optional_hashes.into_iter().map(|oh| oh.unwrap()).collect())
|
||||
}
|
||||
|
||||
pub fn bytes_to_entries(size_bytes: u64) -> u64 {
|
||||
if size_bytes % ENTRY_SIZE as u64 == 0 {
|
||||
size_bytes / ENTRY_SIZE as u64
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
use crate::log_store::log_manager::{
|
||||
data_to_merkle_leaves, sub_merkle_tree, tx_subtree_root_list_padded, LogConfig, LogManager,
|
||||
PORA_CHUNK_SIZE,
|
||||
data_to_merkle_leaves, data_to_merkle_leaves_h256, sub_merkle_tree,
|
||||
tx_subtree_root_list_padded, LogConfig, LogManager, PORA_CHUNK_SIZE,
|
||||
};
|
||||
use crate::log_store::{LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite};
|
||||
use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, Sha3Algorithm};
|
||||
use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
|
||||
use ethereum_types::H256;
|
||||
use rand::random;
|
||||
use shared_types::{compute_padded_chunk_size, ChunkArray, Transaction, CHUNK_SIZE};
|
||||
@ -22,11 +22,17 @@ fn test_put_get() {
|
||||
data[i * CHUNK_SIZE] = random();
|
||||
}
|
||||
let (padded_chunks, _) = compute_padded_chunk_size(data_size);
|
||||
let mut merkle = AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None);
|
||||
merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(start_offset - 1)).unwrap());
|
||||
let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
|
||||
vec![OptionalHash::some(H256::zero())],
|
||||
0,
|
||||
None,
|
||||
);
|
||||
let padding_leaves = data_to_merkle_leaves(&LogManager::padding_raw(start_offset - 1)).unwrap();
|
||||
merkle.append_list(padding_leaves);
|
||||
let mut data_padded = data.clone();
|
||||
data_padded.append(&mut vec![0u8; CHUNK_SIZE]);
|
||||
merkle.append_list(data_to_merkle_leaves(&data_padded).unwrap());
|
||||
let data_leaves = data_to_merkle_leaves(&data_padded).unwrap();
|
||||
merkle.append_list(data_leaves);
|
||||
merkle.commit(Some(0));
|
||||
let tx_merkle = sub_merkle_tree(&data).unwrap();
|
||||
let tx = Transaction {
|
||||
@ -78,16 +84,17 @@ fn test_put_get() {
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(chunk_with_proof.chunk, chunk_array.chunk_at(i).unwrap());
|
||||
|
||||
assert_eq!(
|
||||
chunk_with_proof.proof,
|
||||
merkle.gen_proof(i + start_offset).unwrap()
|
||||
merkle.gen_proof_h256(i + start_offset).unwrap()
|
||||
);
|
||||
let r = chunk_with_proof.proof.validate::<Sha3Algorithm>(
|
||||
&Sha3Algorithm::leaf(&chunk_with_proof.chunk.0),
|
||||
i + start_offset,
|
||||
);
|
||||
assert!(r.is_ok(), "proof={:?} \n r={:?}", chunk_with_proof.proof, r);
|
||||
assert!(merkle.check_root(&chunk_with_proof.proof.root()));
|
||||
assert!(merkle.check_root(&chunk_with_proof.proof.root().into()));
|
||||
}
|
||||
for i in (0..chunk_count).step_by(PORA_CHUNK_SIZE / 3) {
|
||||
let end = std::cmp::min(i + PORA_CHUNK_SIZE, chunk_count);
|
||||
@ -102,7 +109,7 @@ fn test_put_get() {
|
||||
assert!(chunk_array_with_proof
|
||||
.proof
|
||||
.validate::<Sha3Algorithm>(
|
||||
&data_to_merkle_leaves(&chunk_array_with_proof.chunks.data).unwrap(),
|
||||
&data_to_merkle_leaves_h256(&chunk_array_with_proof.chunks.data).unwrap(),
|
||||
i + start_offset
|
||||
)
|
||||
.is_ok());
|
||||
@ -119,12 +126,12 @@ fn test_root() {
|
||||
}
|
||||
let mt = sub_merkle_tree(&data).unwrap();
|
||||
println!("{:?} {}", mt.root(), hex::encode(mt.root()));
|
||||
let append_mt = AppendMerkleTree::<H256, Sha3Algorithm>::new(
|
||||
let append_mt = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
|
||||
data_to_merkle_leaves(&data).unwrap(),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
assert_eq!(mt.root(), append_mt.root().0);
|
||||
assert_eq!(mt.root(), append_mt.root().unwrap().0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ use crate::log_store::log_manager::{
|
||||
use crate::log_store::metrics;
|
||||
use crate::{try_option, LogManager, ZgsKeyValueDB};
|
||||
use anyhow::{anyhow, Result};
|
||||
use append_merkle::{AppendMerkleTree, MerkleTreeRead, Sha3Algorithm};
|
||||
use append_merkle::{AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
|
||||
use ethereum_types::H256;
|
||||
use merkle_light::merkle::log2_pow2;
|
||||
use shared_types::{DataRoot, Transaction};
|
||||
@ -329,7 +329,7 @@ impl TransactionStore {
|
||||
&self,
|
||||
pora_chunk_index: usize,
|
||||
mut tx_seq: u64,
|
||||
) -> Result<AppendMerkleTree<H256, Sha3Algorithm>> {
|
||||
) -> Result<AppendMerkleTree<OptionalHash, Sha3Algorithm>> {
|
||||
let last_chunk_start_index = pora_chunk_index as u64 * PORA_CHUNK_SIZE as u64;
|
||||
let mut tx_list = Vec::new();
|
||||
// Find the first tx within the last chunk.
|
||||
@ -384,9 +384,13 @@ impl TransactionStore {
|
||||
}
|
||||
let mut merkle = if last_chunk_start_index == 0 {
|
||||
// The first entry hash is initialized as zero.
|
||||
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth(vec![H256::zero()], 1, None)
|
||||
AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new_with_depth(
|
||||
vec![H256::zero().into()],
|
||||
1,
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth(
|
||||
AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new_with_depth(
|
||||
vec![],
|
||||
log2_pow2(PORA_CHUNK_SIZE) + 1,
|
||||
None,
|
||||
@ -400,9 +404,12 @@ impl TransactionStore {
|
||||
cmp::min(first_subtree, PORA_CHUNK_SIZE) - (merkle.leaves() % first_subtree);
|
||||
merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(pad_len))?);
|
||||
}
|
||||
// Since we are building the last merkle with a given last tx_seq, it's ensured
|
||||
// that appending subtrees will not go beyond the max size.
|
||||
merkle.append_subtree_list(subtree_list)?;
|
||||
// Convert H256 to OptionalHash for append_subtree_list
|
||||
let subtree_list_optional_hash = subtree_list
|
||||
.into_iter()
|
||||
.map(|(depth, hash)| (depth, hash.into()))
|
||||
.collect();
|
||||
merkle.append_subtree_list(subtree_list_optional_hash)?;
|
||||
merkle.commit(Some(tx_seq));
|
||||
}
|
||||
Ok(merkle)
|
||||
|
||||
361
run/config-mainnet-turbo.toml
Normal file
361
run/config-mainnet-turbo.toml
Normal file
@ -0,0 +1,361 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
#######################################################################
|
||||
### Network Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# Data directory where node's keyfile is stored.
|
||||
# network_dir = "network"
|
||||
|
||||
# IP address to listen on.
|
||||
# network_listen_address = "0.0.0.0"
|
||||
|
||||
# The address to broadcast to peers about which address we are listening on. Generally,
|
||||
# configure public IP address for UDP discovery. If not specified, program will try to
|
||||
# detect public IP address automatically.
|
||||
# network_enr_address = ""
|
||||
|
||||
# The tcp port to broadcast to peers in order to reach back for libp2p services.
|
||||
# network_enr_tcp_port = 1234
|
||||
|
||||
# The udp port to broadcast to peers in order to reach back for discovery.
|
||||
# network_enr_udp_port = 1234
|
||||
|
||||
# The TCP port that libp2p listens on.
|
||||
# network_libp2p_port = 1234
|
||||
|
||||
# UDP port that discovery listens on.
|
||||
# network_discovery_port = 1234
|
||||
|
||||
# Target number of connected peers. can be 100
|
||||
# network_target_peers = 50
|
||||
|
||||
# List of nodes to bootstrap UDP discovery. Note, `network_enr_address` should be
|
||||
# configured as well to enable UDP discovery.
|
||||
network_boot_nodes = ["/ip4/34.66.131.173/udp/1234/p2p/16Uiu2HAmG81UgZ1JJLx9T2HqELgJNP36ChHzYkCdA9HdxvAbb5jQ","/ip4/34.60.163.4/udp/1234/p2p/16Uiu2HAmL3DoA7e7mbxs7CkeCPtNrAcfJFFtLpJDr2HWuR6QwJ8k","/ip4/34.169.236.186/udp/1234/p2p/16Uiu2HAm489RdhEgZUFmNTR4jdLEE4HjrvwaPCkEpSYSgvqi1CbR","/ip4/34.71.110.60/udp/1234/p2p/16Uiu2HAmBfGfbLNRegcqihiuXhgSXWNpgiGm6EwW2SYexfPUNUHQ"]
|
||||
|
||||
# List of libp2p nodes to initially connect to.
|
||||
# network_libp2p_nodes = []
|
||||
|
||||
# Indicates if the user has set the network to be in private mode. Currently this
|
||||
# prevents sending client identifying information over identify.
|
||||
# network_private = false
|
||||
|
||||
# Disables the discovery protocol from starting.
|
||||
# network_disable_discovery = false
|
||||
|
||||
#######################################################################
|
||||
### UDP Discovery Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# The request timeout for each UDP request.
|
||||
# discv5_request_timeout_secs = 5
|
||||
|
||||
# The timeout after which a `QueryPeer` in an ongoing query is marked unresponsive.
|
||||
# Unresponsive peers don't count towards the parallelism limits for a query.
|
||||
# Hence, we may potentially end up making more requests to good peers.
|
||||
# discv5_query_peer_timeout_secs = 2
|
||||
|
||||
# The number of retries for each UDP request.
|
||||
# discv5_request_retries = 1
|
||||
|
||||
# The number of peers to request in parallel in a single query.
|
||||
# discv5_query_parallelism = 5
|
||||
|
||||
# Reports all discovered ENR's when traversing the DHT to the event stream.
|
||||
# discv5_report_discovered_peers = false
|
||||
|
||||
# Disables the incoming packet filter.
|
||||
# discv5_disable_packet_filter = false
|
||||
|
||||
# Disable to limit the number of IP addresses from the same
|
||||
# /24 subnet in the kbuckets table. This is to mitigate eclipse attacks.
|
||||
# discv5_disable_ip_limit = false
|
||||
|
||||
#######################################################################
|
||||
### Log Sync Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# RPC endpoint to sync event logs on EVM compatible blockchain.
|
||||
# blockchain_rpc_endpoint = "http://127.0.0.1:8545"
|
||||
|
||||
# Flow contract address to sync event logs.
|
||||
log_contract_address = "0x62D4144dB0F0a6fBBaeb6296c785C71B3D57C526"
|
||||
|
||||
# Block number to sync event logs from blockchain. Generally, this is
|
||||
# the block number when flow contract deployed.
|
||||
log_sync_start_block_number = 2387557
|
||||
|
||||
# Number of blocks to confirm a transaction.
|
||||
confirmation_block_count = 1
|
||||
|
||||
# Maximum number of event logs to poll at a time.
|
||||
# log_page_size = 999
|
||||
|
||||
# Maximum data size to cache in memory (by default, 100MB).
|
||||
# max_cache_data_size = 104857600
|
||||
|
||||
# TTL to cache data in memory.
|
||||
# cache_tx_seq_ttl = 500
|
||||
|
||||
# The number of retries after a RPC request times out.
|
||||
# rate_limit_retries = 100
|
||||
|
||||
# The nubmer of retries for rate limited responses.
|
||||
# timeout_retries = 100
|
||||
|
||||
# The duration to wait before retry, in ms.
|
||||
# initial_backoff = 500
|
||||
|
||||
# The duration between each paginated getLogs RPC call, in ms.
|
||||
# This is set to avoid triggering the throttling mechanism in the RPC server.
|
||||
# recover_query_delay = 50
|
||||
|
||||
# The counter assumed the finalized block behind the latest block.
|
||||
# default_finalized_block_count = 100
|
||||
|
||||
# Remove finalized block trigger interval.
|
||||
# remove_finalized_block_interval_minutes = 30
|
||||
|
||||
# Watch_loop (eth_getLogs) trigger interval.
|
||||
# watch_loop_wait_time_ms = 500
|
||||
|
||||
#######################################################################
|
||||
### Chunk Pool Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# Maximum number of threads to upload segments of a single file simultaneously.
|
||||
# chunk_pool_write_window_size = 2
|
||||
|
||||
# Maximum data size of cached segment in pool (by default, 4MB).
|
||||
# chunk_pool_max_cached_chunks_all = 4194304
|
||||
|
||||
# Maximum number of threads to upload segments for all files simultaneously.
|
||||
# chunk_pool_max_writings = 64
|
||||
|
||||
# Expiration time to cache uploaded segments in memory.
|
||||
# chunk_pool_expiration_time_secs = 300
|
||||
|
||||
#######################################################################
|
||||
### DB Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# Directory to store data.
|
||||
# db_dir = "db"
|
||||
|
||||
#######################################################################
|
||||
### Misc Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# Log configuration file.
|
||||
# log_config_file = "log_config"
|
||||
|
||||
# Log directory.
|
||||
# log_directory = "log"
|
||||
|
||||
#######################################################################
|
||||
### Mine Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# Mine contract address for incentive.
|
||||
mine_contract_address = "0xCd01c5Cd953971CE4C2c9bFb95610236a7F414fe"
|
||||
|
||||
# Miner key is used to sign blockchain transaction for incentive.
|
||||
# The value should be a hex string of length 64 without 0x prefix.
|
||||
#
|
||||
# Note, the corresponding address should have enough tokens to pay
|
||||
# transaction gas fee.
|
||||
# miner_key = ""
|
||||
|
||||
# Period for querying mine context on chain (in seconds)
|
||||
#
|
||||
# Note: During each query period, nodes will issue 3 `eth_call` requests.
|
||||
# If your blockchain RPC endpoint is a public or priced node, please be
|
||||
# cautious not to set the period too short.
|
||||
#
|
||||
# mine_context_query_seconds = 5
|
||||
|
||||
# CPU Usage percentage for PoRA mining. 100 means one CPU core is fully loaded.
|
||||
#
|
||||
# miner_cpu_percentage = 100
|
||||
|
||||
#######################################################################
|
||||
### Sharding Config Options ###
|
||||
#######################################################################
|
||||
|
||||
# The max number of chunk entries to store in db.
|
||||
# Each entry is 256B, so the db size is roughly limited to
|
||||
# `256 * db_max_num_sectors` Bytes.
|
||||
# If this limit is reached, the node will update its `shard_position`
|
||||
# and store only half data.
|
||||
#
|
||||
# db_max_num_sectors = 4000000000
|
||||
|
||||
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
|
||||
# This only applies if there is no stored shard config in db.
|
||||
# shard_position = "0/1"
|
||||
|
||||
reward_contract_address = "0x457aC76B58ffcDc118AABD6DbC63ff9072880870"
|
||||
|
||||
# The time interval to check if we should half `shard_position` to prune data.
|
||||
#
|
||||
# prune_check_time_s = 60
|
||||
|
||||
# The number of chunk entries to delete in a batch when we prune data.
|
||||
#
|
||||
# prune_batch_size = 1024
|
||||
|
||||
# The time interval to wait between each prune batch deletion to avoid
|
||||
# IO resource exhaustion.
|
||||
#
|
||||
# prune_batch_wait_time_ms = 1000
|
||||
|
||||
#######################################################################
|
||||
### Network Peer DB Config Options ###
|
||||
#######################################################################
|
||||
|
||||
[network_peer_db]
|
||||
|
||||
# The maximum number of disconnected nodes to remember.
|
||||
max_disconnected_peers = 10000
|
||||
|
||||
# The maximum number of banned nodes to remember.
|
||||
max_banned_peers = 10000
|
||||
|
||||
#######################################################################
|
||||
### Router Config Options ###
|
||||
#######################################################################
|
||||
|
||||
[router]
|
||||
|
||||
# Timeout to publish file announcements in batch.
|
||||
# batcher_timeout = "1s"
|
||||
|
||||
# Number of files in an announcement to publish in batch.
|
||||
batcher_file_capacity = 10
|
||||
|
||||
# Number of announcements in a pubsub message to publish in batch.
|
||||
batcher_announcement_capacity = 100
|
||||
|
||||
#######################################################################
|
||||
### File Sync Config Options ###
|
||||
#######################################################################
|
||||
|
||||
[sync]
|
||||
|
||||
# Enable file sync among peers automatically. When enabled, each node will store
|
||||
# all files, and sufficient disk space is required.
|
||||
auto_sync_enabled = true
|
||||
|
||||
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
|
||||
# announcements in the whole network, which leads to high latency or even timeout to sync files.
|
||||
neighbors_only = true
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously. to watch, can increase
|
||||
# max_sync_files = 8
|
||||
|
||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||
# sync_file_by_rpc_enabled = true
|
||||
|
||||
# Maximum number of continous failures to terminate a file sync.
|
||||
# max_request_failures = 5
|
||||
|
||||
# Timeout to dial peers.
|
||||
# peer_connect_timeout = "15s"
|
||||
|
||||
# Timeout to disconnect peers.
|
||||
# peer_disconnect_timeout = "15s"
|
||||
|
||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||
# peer_find_timeout = "120s"
|
||||
|
||||
# Timeout to download data from remote peer.
|
||||
# peer_chunks_download_timeout = "15s"
|
||||
|
||||
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
|
||||
# which indicates no limitation. TODO: 50 MBps
|
||||
# max_bandwidth_bytes = 50 * 1024 * 1024
|
||||
|
||||
# Maximum threads to sync files in sequence.
|
||||
# max_sequential_workers = 0
|
||||
|
||||
# Maximum threads to sync files randomly.
|
||||
# max_random_workers = 2
|
||||
|
||||
# Timeout to terminate a file sync in sequence.
|
||||
# sequential_find_peer_timeout = "60s"
|
||||
|
||||
# Timeout to terminate a file sync randomly.
|
||||
# random_find_peer_timeout = "500s"
|
||||
|
||||
#######################################################################
|
||||
### File Location Cache Options ###
|
||||
#######################################################################
|
||||
|
||||
# [file_location_cache]
|
||||
|
||||
# File location cache is a cache that maintains storage positions of files.
|
||||
# Storage location information is represented by the IP address of the storage node and the timestamp indicating when the node declared that it stores the corresponding file.
|
||||
# It has both a global capacity limit and a limit on the capacity for location information of each individual file.
|
||||
# When the cache is full, the storage position information with oldest timestamp will be replaced.
|
||||
# Global cache capacity.
|
||||
# max_entries_total = 1000000
|
||||
|
||||
# Location information capacity for each file.
|
||||
# max_entries_per_file = 4
|
||||
|
||||
# Validity period of location information.
|
||||
# If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache.
|
||||
# entry_expiration_time_secs = 86400
|
||||
|
||||
#######################################################################
|
||||
### RPC Config Options ###
|
||||
#######################################################################
|
||||
|
||||
[rpc]
|
||||
|
||||
# Whether to provide RPC service.
|
||||
# enabled = true
|
||||
|
||||
# HTTP server address to bind for public RPC.
|
||||
# listen_address = "0.0.0.0:5678"
|
||||
|
||||
# HTTP server address to bind for admin and debug RPC.
|
||||
# listen_address_admin = "0.0.0.0:5679"
|
||||
|
||||
## Grpc server address to bind
|
||||
# listen_address_grpc = "0.0.0.0:50051"
|
||||
|
||||
# Number of chunks for a single segment.
|
||||
# chunks_per_segment = 1024
|
||||
|
||||
# Maximum data size of RPC request body (by default, 10MB).
|
||||
# max_request_body_size = 10485760
|
||||
|
||||
# Maximum file size that allowed to cache in memory (by default, 10MB).
|
||||
# max_cache_file_size = 10485760
|
||||
|
||||
#######################################################################
|
||||
### Metrics Options ###
|
||||
#######################################################################
|
||||
|
||||
# [metrics]
|
||||
|
||||
# Whether to enable metrics.
|
||||
# enabled = false
|
||||
|
||||
# Interval to output metrics periodically, e.g. "10s", "30s" or "60s".
|
||||
# report_interval = "10s"
|
||||
|
||||
# File name to output metrics periodically.
|
||||
# file_report_output = ""
|
||||
|
||||
# Influxdb configurations to output metrics periodically.
|
||||
# influxdb_report_host = ""
|
||||
# influxdb_report_db = ""
|
||||
# influxdb_report_username = ""
|
||||
# influxdb_report_password = ""
|
||||
|
||||
# Storage node name as a tag.
|
||||
# influxdb_report_node = ""
|
||||
@ -33,7 +33,7 @@
|
||||
|
||||
# List of nodes to bootstrap UDP discovery. Note, `network_enr_address` should be
|
||||
# configured as well to enable UDP discovery.
|
||||
network_boot_nodes = ["/ip4/47.251.79.83/udp/1234/p2p/16Uiu2HAkvJYQABP1MdvfWfUZUzGLx1sBSDZ2AT92EFKcMCCPVawV", "/ip4/47.238.87.44/udp/1234/p2p/16Uiu2HAmFGsLoajQdEds6tJqsLX7Dg8bYd2HWR4SbpJUut4QXqCj", "/ip4/47.251.78.104/udp/1234/p2p/16Uiu2HAmSe9UWdHrqkn2mKh99b9DwYZZcea6krfidtU3e5tiHiwN", "/ip4/47.76.30.235/udp/1234/p2p/16Uiu2HAm5tCqwGtXJemZqBhJ9JoQxdDgkWYavfCziaqaAYkGDSfU"]
|
||||
network_boot_nodes = ["/ip4/35.236.80.213/udp/1234/p2p/16Uiu2HAm1w2Lkr4vsnHUgHiyQBpVXmDuvuLP9SDUZaY5tkZudSME", "/ip4/34.102.76.235/udp/1234/p2p/16Uiu2HAmPQ9WTyYbstNPFX4Va8gH5cfkLJ5fJL9h7U4sgJyaHbcm"]
|
||||
|
||||
# List of libp2p nodes to initially connect to.
|
||||
# network_libp2p_nodes = []
|
||||
@ -80,14 +80,14 @@ network_boot_nodes = ["/ip4/47.251.79.83/udp/1234/p2p/16Uiu2HAkvJYQABP1MdvfWfUZU
|
||||
# blockchain_rpc_endpoint = "http://127.0.0.1:8545"
|
||||
|
||||
# Flow contract address to sync event logs.
|
||||
log_contract_address = "0x56A565685C9992BF5ACafb940ff68922980DBBC5"
|
||||
log_contract_address = "0x22E03a6A89B950F1c82ec5e74F8eCa321a105296"
|
||||
|
||||
# Block number to sync event logs from blockchain. Generally, this is
|
||||
# the block number when flow contract deployed.
|
||||
log_sync_start_block_number = 1
|
||||
|
||||
# Number of blocks to confirm a transaction.
|
||||
# confirmation_block_count = 3
|
||||
confirmation_block_count = 1
|
||||
|
||||
# Maximum number of event logs to poll at a time.
|
||||
# log_page_size = 999
|
||||
@ -125,13 +125,13 @@ log_sync_start_block_number = 1
|
||||
#######################################################################
|
||||
|
||||
# Maximum number of threads to upload segments of a single file simultaneously.
|
||||
# chunk_pool_write_window_size = 4
|
||||
chunk_pool_write_window_size = 2
|
||||
|
||||
# Maximum data size of cached segment in pool (by default, 4MB).
|
||||
# chunk_pool_max_cached_chunks_all = 4194304
|
||||
|
||||
# Maximum number of threads to upload segments for all files simultaneously.
|
||||
# chunk_pool_max_writings = 16
|
||||
chunk_pool_max_writings = 128
|
||||
|
||||
# Expiration time to cache uploaded segments in memory.
|
||||
# chunk_pool_expiration_time_secs = 300
|
||||
@ -158,7 +158,7 @@ log_sync_start_block_number = 1
|
||||
#######################################################################
|
||||
|
||||
# Mine contract address for incentive.
|
||||
mine_contract_address = "0xB87E0e5657C25b4e132CB6c34134C0cB8A962AD6"
|
||||
mine_contract_address = "0x00A9E9604b0538e06b268Fb297Df333337f9593b"
|
||||
|
||||
# Miner key is used to sign blockchain transaction for incentive.
|
||||
# The value should be a hex string of length 64 without 0x prefix.
|
||||
@ -194,7 +194,7 @@ db_max_num_sectors = 4000000000
|
||||
# This only applies if there is no stored shard config in db.
|
||||
# shard_position = "0/2"
|
||||
|
||||
reward_contract_address = "0x233B2768332e4Bae542824c93cc5c8ad5d44517E"
|
||||
reward_contract_address = "0xA97B57b4BdFEA2D0a25e535bd849ad4e6C440A69"
|
||||
# The time interval to check if we should half `shard_position` to prune data.
|
||||
#
|
||||
# prune_check_time_s = 60
|
||||
@ -215,10 +215,10 @@ reward_contract_address = "0x233B2768332e4Bae542824c93cc5c8ad5d44517E"
|
||||
# [network_peer_db]
|
||||
|
||||
# The maximum number of disconnected nodes to remember.
|
||||
# max_disconnected_peers = 500
|
||||
max_disconnected_peers = 10000
|
||||
|
||||
# The maximum number of banned nodes to remember.
|
||||
# max_banned_peers = 1000
|
||||
max_banned_peers = 10000
|
||||
|
||||
#######################################################################
|
||||
### Router Config Options ###
|
||||
@ -244,6 +244,10 @@ batcher_announcement_capacity = 100
|
||||
# all files, and sufficient disk space is required.
|
||||
auto_sync_enabled = true
|
||||
|
||||
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
|
||||
# announcements in the whole network, which leads to high latency or even timeout to sync files.
|
||||
neighbors_only = true
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously.
|
||||
# max_sync_files = 16
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user