Compare commits

..

8 Commits
v1.1.0 ... main

Author SHA1 Message Date
molla202
b857728660
grpc add update (#398)
Some checks failed
abi-consistent-check / build-and-compare (push) Has been cancelled
code-coverage / unittest-cov (push) Has been cancelled
rust / check (push) Has been cancelled
rust / test (push) Has been cancelled
rust / lints (push) Has been cancelled
functional-test / test (push) Has been cancelled
* Update config-mainnet-turbo.toml

* Update grpc config-mainnet-turbo.toml
2025-10-24 10:05:10 +08:00
0g-peterzhb
cf11e1b68a
add mainnet turbo config (#397) 2025-10-11 16:16:21 +08:00
0g-peterzhb
46de15a345
fix context error (#396)
* fix context error

* fix context error
2025-10-02 10:39:32 +08:00
0g-peterzhb
88287333b5
update test turbo config (#394) 2025-09-27 22:06:26 +08:00
0g-peterzhb
df570e34d2
a complete fix on null hash (#391)
* a complete fix on null hash

* simplify

* simplify

* fix tests

* fix tests

* fix tests

* no zero

* fix error

* remove unnecessary code

* len is fixed

* remove unnecessary code

* fix cicd
2025-09-18 14:24:50 +08:00
Jennifer Zelo
a3717d6bc1
docs: fix typos (#386) 2025-09-04 20:51:34 +08:00
CrazyFrog
55087eac7f
Update GitHub Actions in CI Workflows (#384)
* Update abi.yml

* Update cc.yml

* Update rust.yml

* Update tests.yml
2025-09-04 20:48:48 +08:00
0g-peterzhb
9a1edae9a2
fix lint (#383) 2025-08-11 09:34:03 +08:00
19 changed files with 859 additions and 143 deletions

View File

@ -12,14 +12,14 @@ jobs:
steps: steps:
- name: Clone current repository - name: Clone current repository
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Get the Git revision from the current repository - name: Get the Git revision from the current repository
id: get-rev id: get-rev
run: echo "rev=$(cat ./storage-contracts-abis/0g-storage-contracts-rev)" >> $GITHUB_OUTPUT run: echo "rev=$(cat ./storage-contracts-abis/0g-storage-contracts-rev)" >> $GITHUB_OUTPUT
- name: Clone another repository - name: Clone another repository
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
repository: '0glabs/0g-storage-contracts' repository: '0glabs/0g-storage-contracts'
path: '0g-storage-contracts' path: '0g-storage-contracts'

View File

@ -29,7 +29,7 @@ jobs:
swap-storage: true swap-storage: true
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
submodules: recursive submodules: recursive

View File

@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
submodules: recursive submodules: recursive
- name: Setup Rust (cache & toolchain) - name: Setup Rust (cache & toolchain)
@ -37,7 +37,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
submodules: recursive submodules: recursive
- name: Setup Rust (cache & toolchain) - name: Setup Rust (cache & toolchain)
@ -53,7 +53,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
submodules: recursive submodules: recursive
- name: Setup Rust (cache & toolchain) - name: Setup Rust (cache & toolchain)

View File

@ -29,8 +29,9 @@ jobs:
swap-storage: true swap-storage: true
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
persist-credentials: false # prevents writing the extraheader
submodules: recursive submodules: recursive
- name: Setup Rust (cache & toolchain) - name: Setup Rust (cache & toolchain)

View File

@ -16,12 +16,84 @@ use tracing::{trace, warn};
use crate::merkle_tree::MerkleTreeWrite; use crate::merkle_tree::MerkleTreeWrite;
pub use crate::merkle_tree::{ pub use crate::merkle_tree::{
Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, ZERO_HASHES, Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, OptionalHash, ZERO_HASHES,
}; };
pub use crate::node_manager::{EmptyNodeDatabase, NodeDatabase, NodeManager, NodeTransaction}; pub use crate::node_manager::{EmptyNodeDatabase, NodeDatabase, NodeManager, NodeTransaction};
pub use proof::{Proof, RangeProof}; pub use proof::{Proof, RangeProof};
pub use sha3::Sha3Algorithm; pub use sha3::Sha3Algorithm;
// Helper functions for converting between H256 and OptionalHash types
use ethereum_types::H256;
impl AppendMerkleTree<OptionalHash, Sha3Algorithm> {
/// Convert a proof of OptionalHash to a proof of H256
pub fn convert_proof_to_h256(proof: Proof<OptionalHash>) -> Result<Proof<H256>, anyhow::Error> {
let lemma: Result<Vec<H256>, anyhow::Error> = proof
.lemma()
.iter()
.map(|oh| {
oh.0.ok_or_else(|| anyhow::anyhow!("Cannot convert null OptionalHash to H256"))
})
.collect();
Proof::new(lemma?, proof.path().to_vec())
}
/// Convert a range proof of OptionalHash to a range proof of H256
pub fn convert_range_proof_to_h256(
proof: RangeProof<OptionalHash>,
) -> Result<RangeProof<H256>, anyhow::Error> {
Ok(RangeProof {
left_proof: Self::convert_proof_to_h256(proof.left_proof)?,
right_proof: Self::convert_proof_to_h256(proof.right_proof)?,
})
}
/// Convert a Proof<H256> to Proof<OptionalHash>
pub fn convert_proof_from_h256(
proof: Proof<H256>,
) -> Result<Proof<OptionalHash>, anyhow::Error> {
let lemma = proof
.lemma()
.iter()
.map(|h| OptionalHash::some(*h))
.collect();
let path = proof.path().to_vec();
Proof::new(lemma, path)
}
/// Convert a RangeProof<H256> to RangeProof<OptionalHash>
pub fn convert_range_proof_from_h256(
range_proof: RangeProof<H256>,
) -> Result<RangeProof<OptionalHash>, anyhow::Error> {
Ok(RangeProof {
left_proof: Self::convert_proof_from_h256(range_proof.left_proof)?,
right_proof: Self::convert_proof_from_h256(range_proof.right_proof)?,
})
}
/// Generate a proof and convert it to H256
pub fn gen_proof_h256(&self, leaf_index: usize) -> Result<Proof<H256>, anyhow::Error> {
let proof = self.gen_proof(leaf_index)?;
Self::convert_proof_to_h256(proof)
}
/// Generate a range proof and convert it to H256
pub fn gen_range_proof_h256(
&self,
start_index: usize,
end_index: usize,
) -> Result<RangeProof<H256>, anyhow::Error> {
let proof = self.gen_range_proof(start_index, end_index)?;
Self::convert_range_proof_to_h256(proof)
}
/// Get the root as H256 (unwraps the OptionalHash)
pub fn root_h256(&self) -> H256 {
self.root().unwrap()
}
}
pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> { pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> {
/// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves. /// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves.
node_manager: NodeManager<E>, node_manager: NodeManager<E>,
@ -148,7 +220,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn append(&mut self, new_leaf: E) { pub fn append(&mut self, new_leaf: E) {
let start_time = Instant::now(); let start_time = Instant::now();
if new_leaf == E::null() { if new_leaf.is_null() {
// appending null is not allowed. // appending null is not allowed.
return; return;
} }
@ -162,7 +234,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn append_list(&mut self, leaf_list: Vec<E>) { pub fn append_list(&mut self, leaf_list: Vec<E>) {
let start_time = Instant::now(); let start_time = Instant::now();
if leaf_list.contains(&E::null()) { if leaf_list.iter().any(|leaf| leaf.is_null()) {
// appending null is not allowed. // appending null is not allowed.
return; return;
} }
@ -181,7 +253,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// TODO: Optimize to avoid storing the `null` nodes? /// TODO: Optimize to avoid storing the `null` nodes?
pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> { pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> {
let start_time = Instant::now(); let start_time = Instant::now();
if subtree_root == E::null() { if subtree_root.is_null() {
// appending null is not allowed. // appending null is not allowed.
bail!("subtree_root is null"); bail!("subtree_root is null");
} }
@ -197,7 +269,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> { pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> {
let start_time = Instant::now(); let start_time = Instant::now();
if subtree_list.iter().any(|(_, root)| root == &E::null()) { if subtree_list.iter().any(|(_, root)| root.is_null()) {
// appending null is not allowed. // appending null is not allowed.
bail!("subtree_list contains null"); bail!("subtree_list contains null");
} }
@ -217,7 +289,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves. /// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves.
pub fn update_last(&mut self, updated_leaf: E) { pub fn update_last(&mut self, updated_leaf: E) {
let start_time = Instant::now(); let start_time = Instant::now();
if updated_leaf == E::null() { if updated_leaf.is_null() {
// updating to null is not allowed. // updating to null is not allowed.
return; return;
} }
@ -237,9 +309,9 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// Panics if the leaf is already set and different or the index is out of range. /// Panics if the leaf is already set and different or the index is out of range.
/// TODO: Batch computing intermediate nodes. /// TODO: Batch computing intermediate nodes.
pub fn fill_leaf(&mut self, index: usize, leaf: E) { pub fn fill_leaf(&mut self, index: usize, leaf: E) {
if leaf == E::null() { if leaf.is_null() {
// fill leaf with null is not allowed. // fill leaf with null is not allowed.
} else if self.node(0, index) == E::null() { } else if self.node(0, index).is_null() {
self.node_manager.start_transaction(); self.node_manager.start_transaction();
self.update_node(0, index, leaf); self.update_node(0, index, leaf);
self.recompute_after_fill_leaves(index, index + 1); self.recompute_after_fill_leaves(index, index + 1);
@ -332,7 +404,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
// skip padding node. // skip padding node.
continue; continue;
} }
if self.node(i, position) == E::null() { if self.node(i, position).is_null() {
self.update_node(i, position, data.clone()); self.update_node(i, position, data.clone());
updated_nodes.push((i, position, data)) updated_nodes.push((i, position, data))
} else if self.node(i, position) != data { } else if self.node(i, position) != data {
@ -357,7 +429,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
if position >= self.leaves() { if position >= self.leaves() {
bail!("Out of bound: position={} end={}", position, self.leaves()); bail!("Out of bound: position={} end={}", position, self.leaves());
} }
if self.node(0, position) != E::null() { if !self.node(0, position).is_null() {
Ok(Some(self.node(0, position))) Ok(Some(self.node(0, position)))
} else { } else {
// The leaf hash is unknown. // The leaf hash is unknown.
@ -472,7 +544,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
// Note that if we are recompute a range of an existing tree, // Note that if we are recompute a range of an existing tree,
// we do not need to keep these possibly null parent. This is only saved // we do not need to keep these possibly null parent. This is only saved
// for the case of constructing a new tree from the leaves. // for the case of constructing a new tree from the leaves.
let parent = if *left == E::null() || *right == E::null() { let parent = if left.is_null() || right.is_null() {
E::null() E::null()
} else { } else {
A::parent(left, right) A::parent(left, right)
@ -483,7 +555,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
assert_eq!(chunk.len(), 1); assert_eq!(chunk.len(), 1);
let r = &chunk[0]; let r = &chunk[0];
// Same as above. // Same as above.
let parent = if *r == E::null() { let parent = if r.is_null() {
E::null() E::null()
} else { } else {
A::parent_single(r, height + self.leaf_height) A::parent_single(r, height + self.leaf_height)
@ -501,8 +573,8 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
match parent_index.cmp(&self.layer_len(height + 1)) { match parent_index.cmp(&self.layer_len(height + 1)) {
Ordering::Less => { Ordering::Less => {
// We do not overwrite with null. // We do not overwrite with null.
if parent != E::null() { if !parent.is_null() {
if self.node(height + 1, parent_index) == E::null() if self.node(height + 1, parent_index).is_null()
// The last node in a layer can be updated. // The last node in a layer can be updated.
|| (self.node(height + 1, parent_index) != parent || (self.node(height + 1, parent_index) != parent
&& parent_index == self.layer_len(height + 1) - 1) && parent_index == self.layer_len(height + 1) - 1)
@ -741,7 +813,7 @@ impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> {
type E = E; type E = E;
fn node(&self, layer: usize, index: usize) -> Self::E { fn node(&self, layer: usize, index: usize) -> Self::E {
match self.delta_nodes.get(layer, index).expect("range checked") { match self.delta_nodes.get(layer, index).expect("range checked") {
Some(node) if *node != E::null() => node.clone(), Some(node) if !node.is_null() => node.clone(),
_ => self _ => self
.node_manager .node_manager
.get_node(layer, index) .get_node(layer, index)
@ -798,7 +870,7 @@ macro_rules! ensure_eq {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::merkle_tree::MerkleTreeRead; use crate::merkle_tree::{MerkleTreeRead, OptionalHash};
use crate::sha3::Sha3Algorithm; use crate::sha3::Sha3Algorithm;
use crate::AppendMerkleTree; use crate::AppendMerkleTree;
@ -812,21 +884,30 @@ mod tests {
for _ in 0..entry_len { for _ in 0..entry_len {
data.push(H256::random()); data.push(H256::random());
} }
let mut merkle = let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); vec![OptionalHash::some(H256::zero())],
merkle.append_list(data.clone()); 0,
None,
);
merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
merkle.commit(Some(0)); merkle.commit(Some(0));
verify(&data, &mut merkle); verify(&data, &mut merkle);
data.push(H256::random()); data.push(H256::random());
merkle.append(*data.last().unwrap()); merkle.append(OptionalHash::some(*data.last().unwrap()));
merkle.commit(Some(1)); merkle.commit(Some(1));
verify(&data, &mut merkle); verify(&data, &mut merkle);
for _ in 0..6 { for _ in 0..6 {
data.push(H256::random()); data.push(H256::random());
} }
merkle.append_list(data[data.len() - 6..].to_vec()); merkle.append_list(
data[data.len() - 6..]
.iter()
.copied()
.map(OptionalHash::some)
.collect(),
);
merkle.commit(Some(2)); merkle.commit(Some(2));
verify(&data, &mut merkle); verify(&data, &mut merkle);
} }
@ -840,9 +921,12 @@ mod tests {
for _ in 0..entry_len { for _ in 0..entry_len {
data.push(H256::random()); data.push(H256::random());
} }
let mut merkle = let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); vec![OptionalHash::some(H256::zero())],
merkle.append_list(data.clone()); 0,
None,
);
merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
merkle.commit(Some(0)); merkle.commit(Some(0));
for i in (0..data.len()).step_by(6) { for i in (0..data.len()).step_by(6) {
@ -850,12 +934,17 @@ mod tests {
let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap(); let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap();
let mut new_data = Vec::new(); let mut new_data = Vec::new();
for _ in 0..3 { for _ in 0..3 {
new_data.push(H256::random()); new_data.push(OptionalHash::some(H256::random()));
} }
merkle.append_list(new_data); merkle.append_list(new_data);
let seq = i as u64 / 6 + 1; let seq = i as u64 / 6 + 1;
merkle.commit(Some(seq)); merkle.commit(Some(seq));
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], i + 1); let optional_data: Vec<OptionalHash> = data[i..end]
.iter()
.copied()
.map(OptionalHash::some)
.collect();
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, i + 1);
assert!(r.is_ok(), "{:?}", r); assert!(r.is_ok(), "{:?}", r);
merkle.fill_with_range_proof(range_proof).unwrap(); merkle.fill_with_range_proof(range_proof).unwrap();
} }
@ -865,7 +954,11 @@ mod tests {
#[test] #[test]
fn test_proof_at_version() { fn test_proof_at_version() {
let n = [2, 255, 256, 257]; let n = [2, 255, 256, 257];
let mut merkle = AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
vec![OptionalHash::some(H256::zero())],
0,
None,
);
let mut start_pos = 0; let mut start_pos = 0;
for (tx_seq, &entry_len) in n.iter().enumerate() { for (tx_seq, &entry_len) in n.iter().enumerate() {
@ -873,7 +966,7 @@ mod tests {
for _ in 0..entry_len { for _ in 0..entry_len {
data.push(H256::random()); data.push(H256::random());
} }
merkle.append_list(data.clone()); merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
merkle.commit(Some(tx_seq as u64)); merkle.commit(Some(tx_seq as u64));
for i in (0..data.len()).step_by(6) { for i in (0..data.len()).step_by(6) {
let end = std::cmp::min(start_pos + i + 3, data.len()); let end = std::cmp::min(start_pos + i + 3, data.len());
@ -882,7 +975,12 @@ mod tests {
.unwrap() .unwrap()
.gen_range_proof(start_pos + i + 1, start_pos + end + 1) .gen_range_proof(start_pos + i + 1, start_pos + end + 1)
.unwrap(); .unwrap();
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], start_pos + i + 1); let optional_data: Vec<OptionalHash> = data[i..end]
.iter()
.copied()
.map(OptionalHash::some)
.collect();
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, start_pos + i + 1);
assert!(r.is_ok(), "{:?}", r); assert!(r.is_ok(), "{:?}", r);
merkle.fill_with_range_proof(range_proof).unwrap(); merkle.fill_with_range_proof(range_proof).unwrap();
} }
@ -891,16 +989,21 @@ mod tests {
} }
} }
fn verify(data: &[H256], merkle: &mut AppendMerkleTree<H256, Sha3Algorithm>) { fn verify(data: &[H256], merkle: &mut AppendMerkleTree<OptionalHash, Sha3Algorithm>) {
for (i, item) in data.iter().enumerate() { for (i, item) in data.iter().enumerate() {
let proof = merkle.gen_proof(i + 1).unwrap(); let proof = merkle.gen_proof(i + 1).unwrap();
let r = merkle.validate(&proof, item, i + 1); let r = merkle.validate(&proof, &OptionalHash::some(*item), i + 1);
assert!(matches!(r, Ok(true)), "{:?}", r); assert!(matches!(r, Ok(true)), "{:?}", r);
} }
for i in (0..data.len()).step_by(6) { for i in (0..data.len()).step_by(6) {
let end = std::cmp::min(i + 3, data.len()); let end = std::cmp::min(i + 3, data.len());
let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap(); let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap();
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], i + 1); let optional_data: Vec<OptionalHash> = data[i..end]
.iter()
.copied()
.map(OptionalHash::some)
.collect();
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, i + 1);
assert!(r.is_ok(), "{:?}", r); assert!(r.is_ok(), "{:?}", r);
merkle.fill_with_range_proof(range_proof).unwrap(); merkle.fill_with_range_proof(range_proof).unwrap();
} }

View File

@ -8,6 +8,173 @@ use std::fmt::Debug;
use std::hash::Hash; use std::hash::Hash;
use tracing::trace; use tracing::trace;
/// A wrapper around Option<H256> that properly handles null hashes
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct OptionalHash(pub Option<H256>);
impl OptionalHash {
pub fn some(hash: H256) -> Self {
OptionalHash(Some(hash))
}
pub fn none() -> Self {
OptionalHash(None)
}
pub fn is_some(&self) -> bool {
self.0.is_some()
}
pub fn is_none(&self) -> bool {
self.0.is_none()
}
pub fn unwrap(&self) -> H256 {
self.0.unwrap()
}
pub fn unwrap_or(&self, default: H256) -> H256 {
self.0.unwrap_or(default)
}
pub fn as_ref(&self) -> Option<&H256> {
self.0.as_ref()
}
/// Create OptionalHash from a byte slice
pub fn from_slice(bytes: &[u8]) -> Result<Self, &'static str> {
if bytes.len() != 32 {
return Err("Invalid byte length for H256");
}
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(bytes);
Ok(OptionalHash::some(H256(hash_bytes)))
}
/// Convert to bytes for storage (33 bytes: 1 flag + 32 hash)
pub fn as_bytes(&self) -> [u8; 33] {
let mut bytes = [0u8; 33];
match &self.0 {
Some(hash) => {
bytes[0] = 1; // Some flag
bytes[1..].copy_from_slice(hash.as_ref());
}
None => {
bytes[0] = 0; // None flag
// bytes[1..] remain zeros
}
}
bytes
}
/// Create OptionalHash from storage bytes (33 bytes)
pub fn from_bytes(bytes: &[u8; 33]) -> Result<Self, &'static str> {
match bytes[0] {
0 => Ok(OptionalHash::none()),
1 => {
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(&bytes[1..]);
Ok(OptionalHash::some(H256(hash_bytes)))
}
_ => Err("Invalid flag byte for OptionalHash"),
}
}
}
// Add From conversions for easier usage
impl From<H256> for OptionalHash {
fn from(hash: H256) -> Self {
OptionalHash::some(hash)
}
}
impl From<Option<H256>> for OptionalHash {
fn from(opt: Option<H256>) -> Self {
OptionalHash(opt)
}
}
impl From<OptionalHash> for Option<H256> {
fn from(opt_hash: OptionalHash) -> Self {
opt_hash.0
}
}
impl AsRef<[u8]> for OptionalHash {
fn as_ref(&self) -> &[u8] {
self.0.as_ref().unwrap().as_ref()
}
}
impl AsMut<[u8]> for OptionalHash {
fn as_mut(&mut self) -> &mut [u8] {
if self.0.is_none() {
self.0 = Some(H256::zero());
}
self.0.as_mut().unwrap().as_mut()
}
}
impl Encode for OptionalHash {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_fixed_len() -> usize {
33 // 1 byte for Some/None flag + 32 bytes for hash
}
fn ssz_bytes_len(&self) -> usize {
33
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
match &self.0 {
Some(hash) => {
buf.push(1); // Some flag
hash.ssz_append(buf);
}
None => {
buf.push(0); // None flag
buf.extend_from_slice(&[0u8; 32]); // Padding zeros
}
}
}
}
impl Decode for OptionalHash {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_fixed_len() -> usize {
33 // 1 byte for Some/None flag + 32 bytes for hash
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
if bytes.len() != 33 {
return Err(ssz::DecodeError::InvalidByteLength {
len: bytes.len(),
expected: 33,
});
}
match bytes[0] {
0 => Ok(OptionalHash::none()),
1 => {
let hash = H256::from_ssz_bytes(&bytes[1..])?;
Ok(OptionalHash::some(hash))
}
_ => Err(ssz::DecodeError::BytesInvalid(
"Invalid flag byte for OptionalHash".to_string(),
)),
}
}
}
unsafe impl Send for OptionalHash {}
unsafe impl Sync for OptionalHash {}
pub trait HashElement: pub trait HashElement:
Clone + Debug + Eq + Hash + AsRef<[u8]> + AsMut<[u8]> + Decode + Encode + Send + Sync Clone + Debug + Eq + Hash + AsRef<[u8]> + AsMut<[u8]> + Decode + Encode + Send + Sync
{ {
@ -18,13 +185,28 @@ pub trait HashElement:
} }
} }
impl HashElement for OptionalHash {
fn end_pad(height: usize) -> Self {
OptionalHash::some(ZERO_HASHES[height])
}
fn null() -> Self {
OptionalHash::none()
}
fn is_null(&self) -> bool {
self.is_none()
}
}
// Keep the H256 implementation for backward compatibility
impl HashElement for H256 { impl HashElement for H256 {
fn end_pad(height: usize) -> Self { fn end_pad(height: usize) -> Self {
ZERO_HASHES[height] ZERO_HASHES[height]
} }
fn null() -> Self { fn null() -> Self {
H256::repeat_byte(1) H256::repeat_byte(0x01)
} }
} }
@ -70,7 +252,7 @@ pub trait MerkleTreeRead {
self.leaves() self.leaves()
); );
} }
if self.node(0, leaf_index) == Self::E::null() { if self.node(0, leaf_index).is_null() {
bail!("Not ready to generate proof for leaf_index={}", leaf_index); bail!("Not ready to generate proof for leaf_index={}", leaf_index);
} }
if self.height() == 1 { if self.height() == 1 {
@ -102,7 +284,7 @@ pub trait MerkleTreeRead {
index_in_layer >>= 1; index_in_layer >>= 1;
} }
lemma.push(self.root()); lemma.push(self.root());
if lemma.contains(&Self::E::null()) { if lemma.iter().any(|e| e.is_null()) {
bail!( bail!(
"Not enough data to generate proof, lemma={:?} path={:?}", "Not enough data to generate proof, lemma={:?} path={:?}",
lemma, lemma,

View File

@ -1,4 +1,4 @@
use crate::merkle_tree::ZERO_HASHES; use crate::merkle_tree::{OptionalHash, ZERO_HASHES};
use crate::{Algorithm, HashElement}; use crate::{Algorithm, HashElement};
use ethereum_types::H256; use ethereum_types::H256;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
@ -50,3 +50,22 @@ impl Algorithm<H256> for Sha3Algorithm {
Self::leaf_raw(data) Self::leaf_raw(data)
} }
} }
impl Algorithm<OptionalHash> for Sha3Algorithm {
fn parent(left: &OptionalHash, right: &OptionalHash) -> OptionalHash {
match (&left.0, &right.0) {
(Some(l), Some(r)) => {
// Use the H256 implementation directly to ensure identical logic
let result = <Self as Algorithm<H256>>::parent(l, r);
OptionalHash::some(result)
}
_ => OptionalHash::none(),
}
}
fn leaf(data: &[u8]) -> OptionalHash {
// Use the H256 implementation directly to ensure identical logic
let result = <Self as Algorithm<H256>>::leaf(data);
OptionalHash::some(result)
}
}

View File

@ -26,6 +26,10 @@ pub type MineContextMessage = Option<PoraPuzzle>;
lazy_static! { lazy_static! {
pub static ref EMPTY_HASH: H256 = pub static ref EMPTY_HASH: H256 =
H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap(); H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap();
pub static ref COMPUTE_WORKER_CONTEXT_CALLER: Address =
"0x000000000000000000000000000000000000000A"
.parse()
.unwrap();
} }
const PORA_VERSION: u64 = 1; const PORA_VERSION: u64 = 1;
@ -139,6 +143,8 @@ impl MineContextWatcher {
} }
let miner_id = self.miner_id.0; let miner_id = self.miner_id.0;
// Use eth_call with specific caller address for read-only access
let WorkerContext { let WorkerContext {
context, context,
pora_target, pora_target,
@ -147,6 +153,7 @@ impl MineContextWatcher {
} = self } = self
.mine_contract .mine_contract
.compute_worker_context(miner_id) .compute_worker_context(miner_id)
.from(*COMPUTE_WORKER_CONTEXT_CALLER)
.call() .call()
.await .await
.map_err(|e| format!("Failed to query mining context: {:?}", e))?; .map_err(|e| format!("Failed to query mining context: {:?}", e))?;

View File

@ -1017,8 +1017,8 @@ impl std::convert::From<Request> for OutboundRequest {
/// The type of RPC responses the Behaviour informs it has received, and allows for sending. /// The type of RPC responses the Behaviour informs it has received, and allows for sending.
/// ///
// NOTE: This is an application-level wrapper over the lower network level responses that can be // NOTE: This is an application-level wrapper over the lower network level responses that can be
// sent. The main difference is the absense of Pong and Metadata, which don't leave the // sent. The main difference is the absence of Pong and Metadata, which don't leave the
// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and // Behaviour. For all protocol responses managed by RPC see `RPCResponse` and
// `RPCCodedResponse`. // `RPCCodedResponse`.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum Response { pub enum Response {

View File

@ -299,7 +299,7 @@ impl RouterService {
} }
NetworkMessage::Publish { messages } => { NetworkMessage::Publish { messages } => {
if self.libp2p.swarm.connected_peers().next().is_none() { if self.libp2p.swarm.connected_peers().next().is_none() {
// this is a boardcast message, when current node doesn't have any peers connected, try to connect any peer in config // this is a broadcast message, when current node doesn't have any peers connected, try to connect any peer in config
for multiaddr in &self.config.libp2p_nodes { for multiaddr in &self.config.libp2p_nodes {
match Swarm::dial(&mut self.libp2p.swarm, multiaddr.clone()) { match Swarm::dial(&mut self.libp2p.swarm, multiaddr.clone()) {
Ok(()) => { Ok(()) => {

View File

@ -2,7 +2,7 @@ mod proof;
use anyhow::{anyhow, bail, Error}; use anyhow::{anyhow, bail, Error};
use append_merkle::{ use append_merkle::{
AppendMerkleTree, Proof as RawProof, RangeProof as RawRangeProof, Sha3Algorithm, AppendMerkleTree, OptionalHash, Proof as RawProof, RangeProof as RawRangeProof, Sha3Algorithm,
}; };
use ethereum_types::{Address, H256, U256}; use ethereum_types::{Address, H256, U256};
use merkle_light::merkle::MerkleTree; use merkle_light::merkle::MerkleTree;
@ -32,7 +32,7 @@ pub type DataRoot = H256;
pub type FlowProof = RawProof<H256>; pub type FlowProof = RawProof<H256>;
pub type FlowRangeProof = RawRangeProof<H256>; pub type FlowRangeProof = RawRangeProof<H256>;
pub type Merkle = AppendMerkleTree<H256, Sha3Algorithm>; pub type Merkle = AppendMerkleTree<OptionalHash, Sha3Algorithm>;
// Each chunk is 32 bytes. // Each chunk is 32 bytes.
pub const CHUNK_SIZE: usize = 256; pub const CHUNK_SIZE: usize = 256;

View File

@ -12,7 +12,9 @@ use crate::log_store::{
use crate::{try_option, ZgsKeyValueDB}; use crate::{try_option, ZgsKeyValueDB};
use any::Any; use any::Any;
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use append_merkle::{MerkleTreeRead, NodeDatabase, NodeTransaction}; use append_merkle::{
AppendMerkleTree, MerkleTreeRead, NodeDatabase, NodeTransaction, OptionalHash,
};
use itertools::Itertools; use itertools::Itertools;
use kvdb::DBTransaction; use kvdb::DBTransaction;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -72,7 +74,8 @@ impl FlowStore {
batch_index batch_index
) )
})?; })?;
merkle.gen_proof(sector_index) let optional_proof = merkle.gen_proof(sector_index)?;
AppendMerkleTree::convert_proof_to_h256(optional_proof)
} }
pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> { pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> {
@ -577,12 +580,12 @@ fn layer_size_key(layer: usize) -> Vec<u8> {
pub struct NodeDBTransaction(DBTransaction); pub struct NodeDBTransaction(DBTransaction);
impl NodeDatabase<DataRoot> for FlowDBStore { impl NodeDatabase<OptionalHash> for FlowDBStore {
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<DataRoot>> { fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
Ok(self Ok(self
.kvdb .kvdb
.get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))? .get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))?
.map(|v| DataRoot::from_slice(&v))) .map(|v| OptionalHash::from_bytes(v.as_slice().try_into().unwrap()).unwrap()))
} }
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> { fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
@ -592,11 +595,11 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
} }
} }
fn start_transaction(&self) -> Box<dyn NodeTransaction<DataRoot>> { fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
Box::new(NodeDBTransaction(self.kvdb.transaction())) Box::new(NodeDBTransaction(self.kvdb.transaction()))
} }
fn commit(&self, tx: Box<dyn NodeTransaction<DataRoot>>) -> Result<()> { fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
let db_tx: Box<NodeDBTransaction> = tx let db_tx: Box<NodeDBTransaction> = tx
.into_any() .into_any()
.downcast() .downcast()
@ -605,21 +608,21 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
} }
} }
impl NodeTransaction<DataRoot> for NodeDBTransaction { impl NodeTransaction<OptionalHash> for NodeDBTransaction {
fn save_node(&mut self, layer: usize, pos: usize, node: &DataRoot) { fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
self.0.put( self.0.put(
COL_FLOW_MPT_NODES, COL_FLOW_MPT_NODES,
&encode_mpt_node_key(layer, pos), &encode_mpt_node_key(layer, pos),
node.as_bytes(), &node.as_bytes(),
); );
} }
fn save_node_list(&mut self, nodes: &[(usize, usize, &DataRoot)]) { fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
for (layer_index, position, data) in nodes { for (layer_index, position, data) in nodes {
self.0.put( self.0.put(
COL_FLOW_MPT_NODES, COL_FLOW_MPT_NODES,
&encode_mpt_node_key(*layer_index, *position), &encode_mpt_node_key(*layer_index, *position),
data.as_bytes(), &data.as_bytes(),
); );
} }
} }

View File

@ -204,9 +204,9 @@ impl EntryBatch {
} }
} }
} }
Ok(Some( Ok(try_option!(self.to_merkle_tree(is_first_chunk)?)
try_option!(self.to_merkle_tree(is_first_chunk)?).root(), .root()
)) .into())
} }
pub fn submit_seal_result(&mut self, answer: SealAnswer) -> Result<()> { pub fn submit_seal_result(&mut self, answer: SealAnswer) -> Result<()> {
@ -243,7 +243,7 @@ impl EntryBatch {
pub fn to_merkle_tree(&self, is_first_chunk: bool) -> Result<Option<Merkle>> { pub fn to_merkle_tree(&self, is_first_chunk: bool) -> Result<Option<Merkle>> {
let initial_leaves = if is_first_chunk { let initial_leaves = if is_first_chunk {
vec![H256::zero()] vec![H256::zero().into()]
} else { } else {
vec![] vec![]
}; };
@ -256,7 +256,7 @@ impl EntryBatch {
); );
merkle.append_list(data_to_merkle_leaves(&leaf_data).expect("aligned")); merkle.append_list(data_to_merkle_leaves(&leaf_data).expect("aligned"));
} }
merkle.append_subtree(subtree.subtree_height, subtree.root)?; merkle.append_subtree(subtree.subtree_height, subtree.root.into())?;
} }
if merkle.leaves() != SECTORS_PER_LOAD { if merkle.leaves() != SECTORS_PER_LOAD {
let leaf_data = try_option!( let leaf_data = try_option!(

View File

@ -9,7 +9,7 @@ use crate::log_store::{
}; };
use crate::{try_option, ZgsKeyValueDB}; use crate::{try_option, ZgsKeyValueDB};
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use append_merkle::{Algorithm, MerkleTreeRead, Sha3Algorithm}; use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
use ethereum_types::H256; use ethereum_types::H256;
use kvdb_rocksdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use merkle_light::merkle::{log2_pow2, MerkleTree}; use merkle_light::merkle::{log2_pow2, MerkleTree};
@ -55,13 +55,10 @@ const PAD_DELAY: Duration = Duration::from_secs(2);
// Process at most 1M entries (256MB) pad data at a time. // Process at most 1M entries (256MB) pad data at a time.
const PAD_MAX_SIZE: usize = 1 << 20; const PAD_MAX_SIZE: usize = 1 << 20;
static PAD_SEGMENT_ROOT: Lazy<H256> = Lazy::new(|| { static PAD_SEGMENT_ROOT: Lazy<OptionalHash> = Lazy::new(|| {
Merkle::new( let h256_leaves = data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap();
data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap(),
0, Merkle::new(h256_leaves, 0, None).root()
None,
)
.root()
}); });
pub struct UpdateFlowMessage { pub struct UpdateFlowMessage {
pub pad_data: usize, pub pad_data: usize,
@ -130,7 +127,8 @@ impl MerkleManager {
fn try_initialize(&mut self, flow_store: &FlowStore) -> Result<()> { fn try_initialize(&mut self, flow_store: &FlowStore) -> Result<()> {
if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 { if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 {
self.last_chunk_merkle.append(H256::zero()); self.last_chunk_merkle
.append(OptionalHash::some(H256::zero()));
self.pora_chunks_merkle self.pora_chunks_merkle
.update_last(self.last_chunk_merkle.root()); .update_last(self.last_chunk_merkle.root());
} else if self.last_chunk_merkle.leaves() != 0 { } else if self.last_chunk_merkle.leaves() != 0 {
@ -222,9 +220,17 @@ impl LogStoreChunkWrite for LogManager {
self.append_entries(flow_entry_array, &mut merkle)?; self.append_entries(flow_entry_array, &mut merkle)?;
if let Some(file_proof) = maybe_file_proof { if let Some(file_proof) = maybe_file_proof {
// Convert H256 proof to OptionalHash proof
let optional_proof = AppendMerkleTree::convert_proof_from_h256(file_proof)?;
// Convert H256 merkle nodes to OptionalHash merkle nodes
let optional_nodes: Vec<(usize, OptionalHash)> = tx
.merkle_nodes
.into_iter()
.map(|(depth, hash)| (depth, OptionalHash::some(hash)))
.collect();
merkle.pora_chunks_merkle.fill_with_file_proof( merkle.pora_chunks_merkle.fill_with_file_proof(
file_proof, optional_proof,
tx.merkle_nodes, optional_nodes,
tx.start_entry_index, tx.start_entry_index,
)?; )?;
} }
@ -424,9 +430,9 @@ impl LogStoreWrite for LogManager {
// `merkle` is used in `validate_range_proof`. // `merkle` is used in `validate_range_proof`.
let mut merkle = self.merkle.write(); let mut merkle = self.merkle.write();
if valid { if valid {
merkle merkle.pora_chunks_merkle.fill_with_range_proof(
.pora_chunks_merkle AppendMerkleTree::convert_range_proof_from_h256(data.proof.clone())?,
.fill_with_range_proof(data.proof.clone())?; )?;
} }
Ok(valid) Ok(valid)
} }
@ -637,7 +643,7 @@ impl LogStoreRead for LogManager {
let tx = self let tx = self
.get_tx_by_seq_number(tx_seq)? .get_tx_by_seq_number(tx_seq)?
.ok_or_else(|| anyhow!("tx missing"))?; .ok_or_else(|| anyhow!("tx missing"))?;
let leaves = data_to_merkle_leaves(&data.chunks.data)?; let leaves = data_to_merkle_leaves_h256(&data.chunks.data)?;
data.proof.validate::<Sha3Algorithm>( data.proof.validate::<Sha3Algorithm>(
&leaves, &leaves,
(data.chunks.start_index + tx.start_entry_index) as usize, (data.chunks.start_index + tx.start_entry_index) as usize,
@ -646,7 +652,7 @@ impl LogStoreRead for LogManager {
.merkle .merkle
.read_recursive() .read_recursive()
.pora_chunks_merkle .pora_chunks_merkle
.check_root(&data.proof.root())) .check_root(&data.proof.root().into()))
} }
fn get_sync_progress(&self) -> Result<Option<(u64, H256)>> { fn get_sync_progress(&self) -> Result<Option<(u64, H256)>> {
@ -686,7 +692,7 @@ impl LogStoreRead for LogManager {
fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> { fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
Ok(( Ok((
merkle.pora_chunks_merkle.root(), merkle.pora_chunks_merkle.root().unwrap(),
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64, merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64,
)) ))
} }
@ -871,7 +877,9 @@ impl LogManager {
None => self.gen_proof_at_version(flow_index, None), None => self.gen_proof_at_version(flow_index, None),
Some(root) => { Some(root) => {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
let tx_seq = merkle.pora_chunks_merkle.tx_seq_at_root(&root)?; let tx_seq = merkle
.pora_chunks_merkle
.tx_seq_at_root(&OptionalHash::from(root))?;
self.gen_proof_at_version(flow_index, Some(tx_seq)) self.gen_proof_at_version(flow_index, Some(tx_seq))
} }
} }
@ -885,11 +893,15 @@ impl LogManager {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
let seg_index = sector_to_segment(flow_index); let seg_index = sector_to_segment(flow_index);
let top_proof = match maybe_tx_seq { let top_proof = match maybe_tx_seq {
None => merkle.pora_chunks_merkle.gen_proof(seg_index)?, None => AppendMerkleTree::convert_proof_to_h256(
Some(tx_seq) => merkle merkle.pora_chunks_merkle.gen_proof(seg_index)?,
.pora_chunks_merkle )?,
.at_version(tx_seq)? Some(tx_seq) => AppendMerkleTree::convert_proof_to_h256(
.gen_proof(seg_index)?, merkle
.pora_chunks_merkle
.at_version(tx_seq)?
.gen_proof(seg_index)?,
)?,
}; };
// TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so // TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so
@ -906,13 +918,17 @@ impl LogManager {
.gen_proof_in_batch(seg_index, flow_index as usize % PORA_CHUNK_SIZE)? .gen_proof_in_batch(seg_index, flow_index as usize % PORA_CHUNK_SIZE)?
} else { } else {
match maybe_tx_seq { match maybe_tx_seq {
None => merkle None => AppendMerkleTree::convert_proof_to_h256(
.last_chunk_merkle merkle
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?, .last_chunk_merkle
Some(tx_version) => merkle .gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
.last_chunk_merkle )?,
.at_version(tx_version)? Some(tx_version) => AppendMerkleTree::convert_proof_to_h256(
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?, merkle
.last_chunk_merkle
.at_version(tx_version)?
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
)?,
} }
}; };
entry_proof(&top_proof, &sub_proof) entry_proof(&top_proof, &sub_proof)
@ -938,9 +954,10 @@ impl LogManager {
if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE { if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE {
merkle merkle
.last_chunk_merkle .last_chunk_merkle
.append_subtree(subtree_depth, subtree_root)?; .append_subtree(subtree_depth, OptionalHash::some(subtree_root))?;
if merkle.last_chunk_merkle.leaves() == subtree_size { if merkle.last_chunk_merkle.leaves() == subtree_size {
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree. // `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.append_subtree(1, merkle.last_chunk_merkle.root())?; .append_subtree(1, merkle.last_chunk_merkle.root())?;
@ -960,9 +977,10 @@ impl LogManager {
// the chunks boundary. // the chunks boundary.
assert_eq!(merkle.last_chunk_merkle.leaves(), 0); assert_eq!(merkle.last_chunk_merkle.leaves(), 0);
assert!(subtree_size >= PORA_CHUNK_SIZE); assert!(subtree_size >= PORA_CHUNK_SIZE);
merkle merkle.pora_chunks_merkle.append_subtree(
.pora_chunks_merkle subtree_depth - log2_pow2(PORA_CHUNK_SIZE),
.append_subtree(subtree_depth - log2_pow2(PORA_CHUNK_SIZE), subtree_root)?; OptionalHash::some(subtree_root),
)?;
} }
} }
@ -997,9 +1015,8 @@ impl LogManager {
let mut completed_chunk_index = None; let mut completed_chunk_index = None;
if pad_data.len() < last_chunk_pad { if pad_data.len() < last_chunk_pad {
is_full_empty = false; is_full_empty = false;
merkle let pad_leaves = data_to_merkle_leaves(&pad_data)?;
.last_chunk_merkle merkle.last_chunk_merkle.append_list(pad_leaves);
.append_list(data_to_merkle_leaves(&pad_data)?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
@ -1007,9 +1024,8 @@ impl LogManager {
if last_chunk_pad != 0 { if last_chunk_pad != 0 {
is_full_empty = false; is_full_empty = false;
// Pad the last chunk. // Pad the last chunk.
merkle let last_chunk_leaves = data_to_merkle_leaves(&pad_data[..last_chunk_pad])?;
.last_chunk_merkle merkle.last_chunk_merkle.append_list(last_chunk_leaves);
.append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
@ -1019,7 +1035,7 @@ impl LogManager {
// Pad with more complete chunks. // Pad with more complete chunks.
let mut start_index = last_chunk_pad / ENTRY_SIZE; let mut start_index = last_chunk_pad / ENTRY_SIZE;
while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE { while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE {
merkle.pora_chunks_merkle.append(*PAD_SEGMENT_ROOT); merkle.pora_chunks_merkle.append(PAD_SEGMENT_ROOT.clone());
start_index += PORA_CHUNK_SIZE; start_index += PORA_CHUNK_SIZE;
} }
assert_eq!(pad_data.len(), start_index * ENTRY_SIZE); assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);
@ -1104,7 +1120,7 @@ impl LogManager {
if chunk_index < merkle.pora_chunks_merkle.leaves() as u64 { if chunk_index < merkle.pora_chunks_merkle.leaves() as u64 {
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.fill_leaf(chunk_index as usize, chunk_root); .fill_leaf(chunk_index as usize, OptionalHash::some(chunk_root));
} else { } else {
// TODO(zz): This assumption may be false in the future. // TODO(zz): This assumption may be false in the future.
unreachable!("We always insert tx nodes before put_chunks"); unreachable!("We always insert tx nodes before put_chunks");
@ -1253,7 +1269,7 @@ impl LogManager {
let mut to_insert_subtrees = Vec::new(); let mut to_insert_subtrees = Vec::new();
let mut start_index = 0; let mut start_index = 0;
for (subtree_height, root) in subtree_list { for (subtree_height, root) in subtree_list {
to_insert_subtrees.push((start_index, subtree_height, root)); to_insert_subtrees.push((start_index, subtree_height, root.unwrap()));
start_index += 1 << (subtree_height - 1); start_index += 1 << (subtree_height - 1);
} }
self.flow_store self.flow_store
@ -1301,14 +1317,14 @@ macro_rules! try_option {
/// This should be called with input checked. /// This should be called with input checked.
pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result<FileMerkleTree> { pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result<FileMerkleTree> {
Ok(FileMerkleTree::new( Ok(FileMerkleTree::new(
data_to_merkle_leaves(leaf_data)? data_to_merkle_leaves_h256(leaf_data)?
.into_iter() .into_iter()
.map(|h| h.0) .map(|h| h.0)
.collect::<Vec<[u8; 32]>>(), .collect::<Vec<[u8; 32]>>(),
)) ))
} }
pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> { pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<OptionalHash>> {
let start_time = Instant::now(); let start_time = Instant::now();
if leaf_data.len() % ENTRY_SIZE != 0 { if leaf_data.len() % ENTRY_SIZE != 0 {
bail!("merkle_tree: mismatched data size"); bail!("merkle_tree: mismatched data size");
@ -1331,6 +1347,12 @@ pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> {
Ok(r) Ok(r)
} }
/// Convenience function that combines data_to_merkle_leaves and conversion to H256
pub fn data_to_merkle_leaves_h256(leaf_data: &[u8]) -> Result<Vec<H256>> {
let optional_hashes = data_to_merkle_leaves(leaf_data)?;
Ok(optional_hashes.into_iter().map(|oh| oh.unwrap()).collect())
}
pub fn bytes_to_entries(size_bytes: u64) -> u64 { pub fn bytes_to_entries(size_bytes: u64) -> u64 {
if size_bytes % ENTRY_SIZE as u64 == 0 { if size_bytes % ENTRY_SIZE as u64 == 0 {
size_bytes / ENTRY_SIZE as u64 size_bytes / ENTRY_SIZE as u64

View File

@ -1,9 +1,9 @@
use crate::log_store::log_manager::{ use crate::log_store::log_manager::{
data_to_merkle_leaves, sub_merkle_tree, tx_subtree_root_list_padded, LogConfig, LogManager, data_to_merkle_leaves, data_to_merkle_leaves_h256, sub_merkle_tree,
PORA_CHUNK_SIZE, tx_subtree_root_list_padded, LogConfig, LogManager, PORA_CHUNK_SIZE,
}; };
use crate::log_store::{LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite}; use crate::log_store::{LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite};
use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, Sha3Algorithm}; use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
use ethereum_types::H256; use ethereum_types::H256;
use rand::random; use rand::random;
use shared_types::{compute_padded_chunk_size, ChunkArray, Transaction, CHUNK_SIZE}; use shared_types::{compute_padded_chunk_size, ChunkArray, Transaction, CHUNK_SIZE};
@ -22,11 +22,17 @@ fn test_put_get() {
data[i * CHUNK_SIZE] = random(); data[i * CHUNK_SIZE] = random();
} }
let (padded_chunks, _) = compute_padded_chunk_size(data_size); let (padded_chunks, _) = compute_padded_chunk_size(data_size);
let mut merkle = AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(start_offset - 1)).unwrap()); vec![OptionalHash::some(H256::zero())],
0,
None,
);
let padding_leaves = data_to_merkle_leaves(&LogManager::padding_raw(start_offset - 1)).unwrap();
merkle.append_list(padding_leaves);
let mut data_padded = data.clone(); let mut data_padded = data.clone();
data_padded.append(&mut vec![0u8; CHUNK_SIZE]); data_padded.append(&mut vec![0u8; CHUNK_SIZE]);
merkle.append_list(data_to_merkle_leaves(&data_padded).unwrap()); let data_leaves = data_to_merkle_leaves(&data_padded).unwrap();
merkle.append_list(data_leaves);
merkle.commit(Some(0)); merkle.commit(Some(0));
let tx_merkle = sub_merkle_tree(&data).unwrap(); let tx_merkle = sub_merkle_tree(&data).unwrap();
let tx = Transaction { let tx = Transaction {
@ -78,16 +84,17 @@ fn test_put_get() {
.unwrap() .unwrap()
.unwrap(); .unwrap();
assert_eq!(chunk_with_proof.chunk, chunk_array.chunk_at(i).unwrap()); assert_eq!(chunk_with_proof.chunk, chunk_array.chunk_at(i).unwrap());
assert_eq!( assert_eq!(
chunk_with_proof.proof, chunk_with_proof.proof,
merkle.gen_proof(i + start_offset).unwrap() merkle.gen_proof_h256(i + start_offset).unwrap()
); );
let r = chunk_with_proof.proof.validate::<Sha3Algorithm>( let r = chunk_with_proof.proof.validate::<Sha3Algorithm>(
&Sha3Algorithm::leaf(&chunk_with_proof.chunk.0), &Sha3Algorithm::leaf(&chunk_with_proof.chunk.0),
i + start_offset, i + start_offset,
); );
assert!(r.is_ok(), "proof={:?} \n r={:?}", chunk_with_proof.proof, r); assert!(r.is_ok(), "proof={:?} \n r={:?}", chunk_with_proof.proof, r);
assert!(merkle.check_root(&chunk_with_proof.proof.root())); assert!(merkle.check_root(&chunk_with_proof.proof.root().into()));
} }
for i in (0..chunk_count).step_by(PORA_CHUNK_SIZE / 3) { for i in (0..chunk_count).step_by(PORA_CHUNK_SIZE / 3) {
let end = std::cmp::min(i + PORA_CHUNK_SIZE, chunk_count); let end = std::cmp::min(i + PORA_CHUNK_SIZE, chunk_count);
@ -102,7 +109,7 @@ fn test_put_get() {
assert!(chunk_array_with_proof assert!(chunk_array_with_proof
.proof .proof
.validate::<Sha3Algorithm>( .validate::<Sha3Algorithm>(
&data_to_merkle_leaves(&chunk_array_with_proof.chunks.data).unwrap(), &data_to_merkle_leaves_h256(&chunk_array_with_proof.chunks.data).unwrap(),
i + start_offset i + start_offset
) )
.is_ok()); .is_ok());
@ -119,12 +126,12 @@ fn test_root() {
} }
let mt = sub_merkle_tree(&data).unwrap(); let mt = sub_merkle_tree(&data).unwrap();
println!("{:?} {}", mt.root(), hex::encode(mt.root())); println!("{:?} {}", mt.root(), hex::encode(mt.root()));
let append_mt = AppendMerkleTree::<H256, Sha3Algorithm>::new( let append_mt = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
data_to_merkle_leaves(&data).unwrap(), data_to_merkle_leaves(&data).unwrap(),
0, 0,
None, None,
); );
assert_eq!(mt.root(), append_mt.root().0); assert_eq!(mt.root(), append_mt.root().unwrap().0);
} }
} }

View File

@ -6,7 +6,7 @@ use crate::log_store::log_manager::{
use crate::log_store::metrics; use crate::log_store::metrics;
use crate::{try_option, LogManager, ZgsKeyValueDB}; use crate::{try_option, LogManager, ZgsKeyValueDB};
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use append_merkle::{AppendMerkleTree, MerkleTreeRead, Sha3Algorithm}; use append_merkle::{AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
use ethereum_types::H256; use ethereum_types::H256;
use merkle_light::merkle::log2_pow2; use merkle_light::merkle::log2_pow2;
use shared_types::{DataRoot, Transaction}; use shared_types::{DataRoot, Transaction};
@ -329,7 +329,7 @@ impl TransactionStore {
&self, &self,
pora_chunk_index: usize, pora_chunk_index: usize,
mut tx_seq: u64, mut tx_seq: u64,
) -> Result<AppendMerkleTree<H256, Sha3Algorithm>> { ) -> Result<AppendMerkleTree<OptionalHash, Sha3Algorithm>> {
let last_chunk_start_index = pora_chunk_index as u64 * PORA_CHUNK_SIZE as u64; let last_chunk_start_index = pora_chunk_index as u64 * PORA_CHUNK_SIZE as u64;
let mut tx_list = Vec::new(); let mut tx_list = Vec::new();
// Find the first tx within the last chunk. // Find the first tx within the last chunk.
@ -384,9 +384,13 @@ impl TransactionStore {
} }
let mut merkle = if last_chunk_start_index == 0 { let mut merkle = if last_chunk_start_index == 0 {
// The first entry hash is initialized as zero. // The first entry hash is initialized as zero.
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth(vec![H256::zero()], 1, None) AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new_with_depth(
vec![H256::zero().into()],
1,
None,
)
} else { } else {
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth( AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new_with_depth(
vec![], vec![],
log2_pow2(PORA_CHUNK_SIZE) + 1, log2_pow2(PORA_CHUNK_SIZE) + 1,
None, None,
@ -400,9 +404,12 @@ impl TransactionStore {
cmp::min(first_subtree, PORA_CHUNK_SIZE) - (merkle.leaves() % first_subtree); cmp::min(first_subtree, PORA_CHUNK_SIZE) - (merkle.leaves() % first_subtree);
merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(pad_len))?); merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(pad_len))?);
} }
// Since we are building the last merkle with a given last tx_seq, it's ensured // Convert H256 to OptionalHash for append_subtree_list
// that appending subtrees will not go beyond the max size. let subtree_list_optional_hash = subtree_list
merkle.append_subtree_list(subtree_list)?; .into_iter()
.map(|(depth, hash)| (depth, hash.into()))
.collect();
merkle.append_subtree_list(subtree_list_optional_hash)?;
merkle.commit(Some(tx_seq)); merkle.commit(Some(tx_seq));
} }
Ok(merkle) Ok(merkle)

View File

@ -0,0 +1,361 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
#######################################################################
### Network Config Options ###
#######################################################################
# Data directory where node's keyfile is stored.
# network_dir = "network"
# IP address to listen on.
# network_listen_address = "0.0.0.0"
# The address to broadcast to peers about which address we are listening on. Generally,
# configure public IP address for UDP discovery. If not specified, program will try to
# detect public IP address automatically.
# network_enr_address = ""
# The tcp port to broadcast to peers in order to reach back for libp2p services.
# network_enr_tcp_port = 1234
# The udp port to broadcast to peers in order to reach back for discovery.
# network_enr_udp_port = 1234
# The TCP port that libp2p listens on.
# network_libp2p_port = 1234
# UDP port that discovery listens on.
# network_discovery_port = 1234
# Target number of connected peers. can be 100
# network_target_peers = 50
# List of nodes to bootstrap UDP discovery. Note, `network_enr_address` should be
# configured as well to enable UDP discovery.
network_boot_nodes = ["/ip4/34.66.131.173/udp/1234/p2p/16Uiu2HAmG81UgZ1JJLx9T2HqELgJNP36ChHzYkCdA9HdxvAbb5jQ","/ip4/34.60.163.4/udp/1234/p2p/16Uiu2HAmL3DoA7e7mbxs7CkeCPtNrAcfJFFtLpJDr2HWuR6QwJ8k","/ip4/34.169.236.186/udp/1234/p2p/16Uiu2HAm489RdhEgZUFmNTR4jdLEE4HjrvwaPCkEpSYSgvqi1CbR","/ip4/34.71.110.60/udp/1234/p2p/16Uiu2HAmBfGfbLNRegcqihiuXhgSXWNpgiGm6EwW2SYexfPUNUHQ"]
# List of libp2p nodes to initially connect to.
# network_libp2p_nodes = []
# Indicates if the user has set the network to be in private mode. Currently this
# prevents sending client identifying information over identify.
# network_private = false
# Disables the discovery protocol from starting.
# network_disable_discovery = false
#######################################################################
### UDP Discovery Config Options ###
#######################################################################
# The request timeout for each UDP request.
# discv5_request_timeout_secs = 5
# The timeout after which a `QueryPeer` in an ongoing query is marked unresponsive.
# Unresponsive peers don't count towards the parallelism limits for a query.
# Hence, we may potentially end up making more requests to good peers.
# discv5_query_peer_timeout_secs = 2
# The number of retries for each UDP request.
# discv5_request_retries = 1
# The number of peers to request in parallel in a single query.
# discv5_query_parallelism = 5
# Reports all discovered ENR's when traversing the DHT to the event stream.
# discv5_report_discovered_peers = false
# Disables the incoming packet filter.
# discv5_disable_packet_filter = false
# Disable to limit the number of IP addresses from the same
# /24 subnet in the kbuckets table. This is to mitigate eclipse attacks.
# discv5_disable_ip_limit = false
#######################################################################
### Log Sync Config Options ###
#######################################################################
# RPC endpoint to sync event logs on EVM compatible blockchain.
# blockchain_rpc_endpoint = "http://127.0.0.1:8545"
# Flow contract address to sync event logs.
log_contract_address = "0x62D4144dB0F0a6fBBaeb6296c785C71B3D57C526"
# Block number to sync event logs from blockchain. Generally, this is
# the block number when flow contract deployed.
log_sync_start_block_number = 2387557
# Number of blocks to confirm a transaction.
confirmation_block_count = 1
# Maximum number of event logs to poll at a time.
# log_page_size = 999
# Maximum data size to cache in memory (by default, 100MB).
# max_cache_data_size = 104857600
# TTL to cache data in memory.
# cache_tx_seq_ttl = 500
# The number of retries after a RPC request times out.
# rate_limit_retries = 100
# The nubmer of retries for rate limited responses.
# timeout_retries = 100
# The duration to wait before retry, in ms.
# initial_backoff = 500
# The duration between each paginated getLogs RPC call, in ms.
# This is set to avoid triggering the throttling mechanism in the RPC server.
# recover_query_delay = 50
# The counter assumed the finalized block behind the latest block.
# default_finalized_block_count = 100
# Remove finalized block trigger interval.
# remove_finalized_block_interval_minutes = 30
# Watch_loop (eth_getLogs) trigger interval.
# watch_loop_wait_time_ms = 500
#######################################################################
### Chunk Pool Config Options ###
#######################################################################
# Maximum number of threads to upload segments of a single file simultaneously.
# chunk_pool_write_window_size = 2
# Maximum data size of cached segment in pool (by default, 4MB).
# chunk_pool_max_cached_chunks_all = 4194304
# Maximum number of threads to upload segments for all files simultaneously.
# chunk_pool_max_writings = 64
# Expiration time to cache uploaded segments in memory.
# chunk_pool_expiration_time_secs = 300
#######################################################################
### DB Config Options ###
#######################################################################
# Directory to store data.
# db_dir = "db"
#######################################################################
### Misc Config Options ###
#######################################################################
# Log configuration file.
# log_config_file = "log_config"
# Log directory.
# log_directory = "log"
#######################################################################
### Mine Config Options ###
#######################################################################
# Mine contract address for incentive.
mine_contract_address = "0xCd01c5Cd953971CE4C2c9bFb95610236a7F414fe"
# Miner key is used to sign blockchain transaction for incentive.
# The value should be a hex string of length 64 without 0x prefix.
#
# Note, the corresponding address should have enough tokens to pay
# transaction gas fee.
# miner_key = ""
# Period for querying mine context on chain (in seconds)
#
# Note: During each query period, nodes will issue 3 `eth_call` requests.
# If your blockchain RPC endpoint is a public or priced node, please be
# cautious not to set the period too short.
#
# mine_context_query_seconds = 5
# CPU Usage percentage for PoRA mining. 100 means one CPU core is fully loaded.
#
# miner_cpu_percentage = 100
#######################################################################
### Sharding Config Options ###
#######################################################################
# The max number of chunk entries to store in db.
# Each entry is 256B, so the db size is roughly limited to
# `256 * db_max_num_sectors` Bytes.
# If this limit is reached, the node will update its `shard_position`
# and store only half data.
#
# db_max_num_sectors = 4000000000
# The format is <shard_id>/<shard_number>, where the shard number is 2^n.
# This only applies if there is no stored shard config in db.
# shard_position = "0/1"
reward_contract_address = "0x457aC76B58ffcDc118AABD6DbC63ff9072880870"
# The time interval to check if we should half `shard_position` to prune data.
#
# prune_check_time_s = 60
# The number of chunk entries to delete in a batch when we prune data.
#
# prune_batch_size = 1024
# The time interval to wait between each prune batch deletion to avoid
# IO resource exhaustion.
#
# prune_batch_wait_time_ms = 1000
#######################################################################
### Network Peer DB Config Options ###
#######################################################################
[network_peer_db]
# The maximum number of disconnected nodes to remember.
max_disconnected_peers = 10000
# The maximum number of banned nodes to remember.
max_banned_peers = 10000
#######################################################################
### Router Config Options ###
#######################################################################
[router]
# Timeout to publish file announcements in batch.
# batcher_timeout = "1s"
# Number of files in an announcement to publish in batch.
batcher_file_capacity = 10
# Number of announcements in a pubsub message to publish in batch.
batcher_announcement_capacity = 100
#######################################################################
### File Sync Config Options ###
#######################################################################
[sync]
# Enable file sync among peers automatically. When enabled, each node will store
# all files, and sufficient disk space is required.
auto_sync_enabled = true
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
# announcements in the whole network, which leads to high latency or even timeout to sync files.
neighbors_only = true
# Maximum number of files in sync from other peers simultaneously. to watch, can increase
# max_sync_files = 8
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
# sync_file_by_rpc_enabled = true
# Maximum number of continous failures to terminate a file sync.
# max_request_failures = 5
# Timeout to dial peers.
# peer_connect_timeout = "15s"
# Timeout to disconnect peers.
# peer_disconnect_timeout = "15s"
# Timeout to find peers via FIND_FILE P2P pubsub message.
# peer_find_timeout = "120s"
# Timeout to download data from remote peer.
# peer_chunks_download_timeout = "15s"
# Maximum network bandwidth (B/s) to sync files. Default value is 0,
# which indicates no limitation. TODO: 50 MBps
# max_bandwidth_bytes = 50 * 1024 * 1024
# Maximum threads to sync files in sequence.
# max_sequential_workers = 0
# Maximum threads to sync files randomly.
# max_random_workers = 2
# Timeout to terminate a file sync in sequence.
# sequential_find_peer_timeout = "60s"
# Timeout to terminate a file sync randomly.
# random_find_peer_timeout = "500s"
#######################################################################
### File Location Cache Options ###
#######################################################################
# [file_location_cache]
# File location cache is a cache that maintains storage positions of files.
# Storage location information is represented by the IP address of the storage node and the timestamp indicating when the node declared that it stores the corresponding file.
# It has both a global capacity limit and a limit on the capacity for location information of each individual file.
# When the cache is full, the storage position information with oldest timestamp will be replaced.
# Global cache capacity.
# max_entries_total = 1000000
# Location information capacity for each file.
# max_entries_per_file = 4
# Validity period of location information.
# If the timestamp in the storage location information exceeds this duration from the current time, it will be removed from the cache.
# entry_expiration_time_secs = 86400
#######################################################################
### RPC Config Options ###
#######################################################################
[rpc]
# Whether to provide RPC service.
# enabled = true
# HTTP server address to bind for public RPC.
# listen_address = "0.0.0.0:5678"
# HTTP server address to bind for admin and debug RPC.
# listen_address_admin = "0.0.0.0:5679"
## Grpc server address to bind
# listen_address_grpc = "0.0.0.0:50051"
# Number of chunks for a single segment.
# chunks_per_segment = 1024
# Maximum data size of RPC request body (by default, 10MB).
# max_request_body_size = 10485760
# Maximum file size that allowed to cache in memory (by default, 10MB).
# max_cache_file_size = 10485760
#######################################################################
### Metrics Options ###
#######################################################################
# [metrics]
# Whether to enable metrics.
# enabled = false
# Interval to output metrics periodically, e.g. "10s", "30s" or "60s".
# report_interval = "10s"
# File name to output metrics periodically.
# file_report_output = ""
# Influxdb configurations to output metrics periodically.
# influxdb_report_host = ""
# influxdb_report_db = ""
# influxdb_report_username = ""
# influxdb_report_password = ""
# Storage node name as a tag.
# influxdb_report_node = ""

View File

@ -33,7 +33,7 @@
# List of nodes to bootstrap UDP discovery. Note, `network_enr_address` should be # List of nodes to bootstrap UDP discovery. Note, `network_enr_address` should be
# configured as well to enable UDP discovery. # configured as well to enable UDP discovery.
network_boot_nodes = ["/ip4/47.251.79.83/udp/1234/p2p/16Uiu2HAkvJYQABP1MdvfWfUZUzGLx1sBSDZ2AT92EFKcMCCPVawV", "/ip4/47.238.87.44/udp/1234/p2p/16Uiu2HAmFGsLoajQdEds6tJqsLX7Dg8bYd2HWR4SbpJUut4QXqCj", "/ip4/47.251.78.104/udp/1234/p2p/16Uiu2HAmSe9UWdHrqkn2mKh99b9DwYZZcea6krfidtU3e5tiHiwN", "/ip4/47.76.30.235/udp/1234/p2p/16Uiu2HAm5tCqwGtXJemZqBhJ9JoQxdDgkWYavfCziaqaAYkGDSfU"] network_boot_nodes = ["/ip4/35.236.80.213/udp/1234/p2p/16Uiu2HAm1w2Lkr4vsnHUgHiyQBpVXmDuvuLP9SDUZaY5tkZudSME", "/ip4/34.102.76.235/udp/1234/p2p/16Uiu2HAmPQ9WTyYbstNPFX4Va8gH5cfkLJ5fJL9h7U4sgJyaHbcm"]
# List of libp2p nodes to initially connect to. # List of libp2p nodes to initially connect to.
# network_libp2p_nodes = [] # network_libp2p_nodes = []
@ -80,14 +80,14 @@ network_boot_nodes = ["/ip4/47.251.79.83/udp/1234/p2p/16Uiu2HAkvJYQABP1MdvfWfUZU
# blockchain_rpc_endpoint = "http://127.0.0.1:8545" # blockchain_rpc_endpoint = "http://127.0.0.1:8545"
# Flow contract address to sync event logs. # Flow contract address to sync event logs.
log_contract_address = "0x56A565685C9992BF5ACafb940ff68922980DBBC5" log_contract_address = "0x22E03a6A89B950F1c82ec5e74F8eCa321a105296"
# Block number to sync event logs from blockchain. Generally, this is # Block number to sync event logs from blockchain. Generally, this is
# the block number when flow contract deployed. # the block number when flow contract deployed.
log_sync_start_block_number = 1 log_sync_start_block_number = 1
# Number of blocks to confirm a transaction. # Number of blocks to confirm a transaction.
# confirmation_block_count = 3 confirmation_block_count = 1
# Maximum number of event logs to poll at a time. # Maximum number of event logs to poll at a time.
# log_page_size = 999 # log_page_size = 999
@ -125,13 +125,13 @@ log_sync_start_block_number = 1
####################################################################### #######################################################################
# Maximum number of threads to upload segments of a single file simultaneously. # Maximum number of threads to upload segments of a single file simultaneously.
# chunk_pool_write_window_size = 4 chunk_pool_write_window_size = 2
# Maximum data size of cached segment in pool (by default, 4MB). # Maximum data size of cached segment in pool (by default, 4MB).
# chunk_pool_max_cached_chunks_all = 4194304 # chunk_pool_max_cached_chunks_all = 4194304
# Maximum number of threads to upload segments for all files simultaneously. # Maximum number of threads to upload segments for all files simultaneously.
# chunk_pool_max_writings = 16 chunk_pool_max_writings = 128
# Expiration time to cache uploaded segments in memory. # Expiration time to cache uploaded segments in memory.
# chunk_pool_expiration_time_secs = 300 # chunk_pool_expiration_time_secs = 300
@ -158,7 +158,7 @@ log_sync_start_block_number = 1
####################################################################### #######################################################################
# Mine contract address for incentive. # Mine contract address for incentive.
mine_contract_address = "0xB87E0e5657C25b4e132CB6c34134C0cB8A962AD6" mine_contract_address = "0x00A9E9604b0538e06b268Fb297Df333337f9593b"
# Miner key is used to sign blockchain transaction for incentive. # Miner key is used to sign blockchain transaction for incentive.
# The value should be a hex string of length 64 without 0x prefix. # The value should be a hex string of length 64 without 0x prefix.
@ -194,7 +194,7 @@ db_max_num_sectors = 4000000000
# This only applies if there is no stored shard config in db. # This only applies if there is no stored shard config in db.
# shard_position = "0/2" # shard_position = "0/2"
reward_contract_address = "0x233B2768332e4Bae542824c93cc5c8ad5d44517E" reward_contract_address = "0xA97B57b4BdFEA2D0a25e535bd849ad4e6C440A69"
# The time interval to check if we should half `shard_position` to prune data. # The time interval to check if we should half `shard_position` to prune data.
# #
# prune_check_time_s = 60 # prune_check_time_s = 60
@ -215,10 +215,10 @@ reward_contract_address = "0x233B2768332e4Bae542824c93cc5c8ad5d44517E"
# [network_peer_db] # [network_peer_db]
# The maximum number of disconnected nodes to remember. # The maximum number of disconnected nodes to remember.
# max_disconnected_peers = 500 max_disconnected_peers = 10000
# The maximum number of banned nodes to remember. # The maximum number of banned nodes to remember.
# max_banned_peers = 1000 max_banned_peers = 10000
####################################################################### #######################################################################
### Router Config Options ### ### Router Config Options ###
@ -244,6 +244,10 @@ batcher_announcement_capacity = 100
# all files, and sufficient disk space is required. # all files, and sufficient disk space is required.
auto_sync_enabled = true auto_sync_enabled = true
# Indicates whether to sync file from neighbor nodes only. This is to avoid flooding file
# announcements in the whole network, which leads to high latency or even timeout to sync files.
neighbors_only = true
# Maximum number of files in sync from other peers simultaneously. # Maximum number of files in sync from other peers simultaneously.
# max_sync_files = 16 # max_sync_files = 16