a complete fix on null hash (#391)

* a complete fix on null hash

* simplify

* simplify

* fix tests

* fix tests

* fix tests

* no zero

* fix error

* remove unnecessary code

* len is fixed

* remove unnecessary code

* fix cicd
This commit is contained in:
0g-peterzhb 2025-09-18 14:24:50 +08:00 committed by GitHub
parent a3717d6bc1
commit df570e34d2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 462 additions and 118 deletions

View File

@ -31,6 +31,7 @@ jobs:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v5 uses: actions/checkout@v5
with: with:
persist-credentials: false # prevents writing the extraheader
submodules: recursive submodules: recursive
- name: Setup Rust (cache & toolchain) - name: Setup Rust (cache & toolchain)

View File

@ -16,12 +16,84 @@ use tracing::{trace, warn};
use crate::merkle_tree::MerkleTreeWrite; use crate::merkle_tree::MerkleTreeWrite;
pub use crate::merkle_tree::{ pub use crate::merkle_tree::{
Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, ZERO_HASHES, Algorithm, HashElement, MerkleTreeInitialData, MerkleTreeRead, OptionalHash, ZERO_HASHES,
}; };
pub use crate::node_manager::{EmptyNodeDatabase, NodeDatabase, NodeManager, NodeTransaction}; pub use crate::node_manager::{EmptyNodeDatabase, NodeDatabase, NodeManager, NodeTransaction};
pub use proof::{Proof, RangeProof}; pub use proof::{Proof, RangeProof};
pub use sha3::Sha3Algorithm; pub use sha3::Sha3Algorithm;
// Helper functions for converting between H256 and OptionalHash types
use ethereum_types::H256;
impl AppendMerkleTree<OptionalHash, Sha3Algorithm> {
/// Convert a proof of OptionalHash to a proof of H256
pub fn convert_proof_to_h256(proof: Proof<OptionalHash>) -> Result<Proof<H256>, anyhow::Error> {
let lemma: Result<Vec<H256>, anyhow::Error> = proof
.lemma()
.iter()
.map(|oh| {
oh.0.ok_or_else(|| anyhow::anyhow!("Cannot convert null OptionalHash to H256"))
})
.collect();
Proof::new(lemma?, proof.path().to_vec())
}
/// Convert a range proof of OptionalHash to a range proof of H256
pub fn convert_range_proof_to_h256(
proof: RangeProof<OptionalHash>,
) -> Result<RangeProof<H256>, anyhow::Error> {
Ok(RangeProof {
left_proof: Self::convert_proof_to_h256(proof.left_proof)?,
right_proof: Self::convert_proof_to_h256(proof.right_proof)?,
})
}
/// Convert a Proof<H256> to Proof<OptionalHash>
pub fn convert_proof_from_h256(
proof: Proof<H256>,
) -> Result<Proof<OptionalHash>, anyhow::Error> {
let lemma = proof
.lemma()
.iter()
.map(|h| OptionalHash::some(*h))
.collect();
let path = proof.path().to_vec();
Proof::new(lemma, path)
}
/// Convert a RangeProof<H256> to RangeProof<OptionalHash>
pub fn convert_range_proof_from_h256(
range_proof: RangeProof<H256>,
) -> Result<RangeProof<OptionalHash>, anyhow::Error> {
Ok(RangeProof {
left_proof: Self::convert_proof_from_h256(range_proof.left_proof)?,
right_proof: Self::convert_proof_from_h256(range_proof.right_proof)?,
})
}
/// Generate a proof and convert it to H256
pub fn gen_proof_h256(&self, leaf_index: usize) -> Result<Proof<H256>, anyhow::Error> {
let proof = self.gen_proof(leaf_index)?;
Self::convert_proof_to_h256(proof)
}
/// Generate a range proof and convert it to H256
pub fn gen_range_proof_h256(
&self,
start_index: usize,
end_index: usize,
) -> Result<RangeProof<H256>, anyhow::Error> {
let proof = self.gen_range_proof(start_index, end_index)?;
Self::convert_range_proof_to_h256(proof)
}
/// Get the root as H256 (unwraps the OptionalHash)
pub fn root_h256(&self) -> H256 {
self.root().unwrap()
}
}
pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> { pub struct AppendMerkleTree<E: HashElement, A: Algorithm<E>> {
/// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves. /// Keep all the nodes in the latest version. `layers[0]` is the layer of leaves.
node_manager: NodeManager<E>, node_manager: NodeManager<E>,
@ -148,7 +220,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn append(&mut self, new_leaf: E) { pub fn append(&mut self, new_leaf: E) {
let start_time = Instant::now(); let start_time = Instant::now();
if new_leaf == E::null() { if new_leaf.is_null() {
// appending null is not allowed. // appending null is not allowed.
return; return;
} }
@ -162,7 +234,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn append_list(&mut self, leaf_list: Vec<E>) { pub fn append_list(&mut self, leaf_list: Vec<E>) {
let start_time = Instant::now(); let start_time = Instant::now();
if leaf_list.contains(&E::null()) { if leaf_list.iter().any(|leaf| leaf.is_null()) {
// appending null is not allowed. // appending null is not allowed.
return; return;
} }
@ -181,7 +253,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// TODO: Optimize to avoid storing the `null` nodes? /// TODO: Optimize to avoid storing the `null` nodes?
pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> { pub fn append_subtree(&mut self, subtree_depth: usize, subtree_root: E) -> Result<()> {
let start_time = Instant::now(); let start_time = Instant::now();
if subtree_root == E::null() { if subtree_root.is_null() {
// appending null is not allowed. // appending null is not allowed.
bail!("subtree_root is null"); bail!("subtree_root is null");
} }
@ -197,7 +269,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> { pub fn append_subtree_list(&mut self, subtree_list: Vec<(usize, E)>) -> Result<()> {
let start_time = Instant::now(); let start_time = Instant::now();
if subtree_list.iter().any(|(_, root)| root == &E::null()) { if subtree_list.iter().any(|(_, root)| root.is_null()) {
// appending null is not allowed. // appending null is not allowed.
bail!("subtree_list contains null"); bail!("subtree_list contains null");
} }
@ -217,7 +289,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves. /// This is needed if our merkle-tree in memory only keeps intermediate nodes instead of real leaves.
pub fn update_last(&mut self, updated_leaf: E) { pub fn update_last(&mut self, updated_leaf: E) {
let start_time = Instant::now(); let start_time = Instant::now();
if updated_leaf == E::null() { if updated_leaf.is_null() {
// updating to null is not allowed. // updating to null is not allowed.
return; return;
} }
@ -237,9 +309,9 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
/// Panics if the leaf is already set and different or the index is out of range. /// Panics if the leaf is already set and different or the index is out of range.
/// TODO: Batch computing intermediate nodes. /// TODO: Batch computing intermediate nodes.
pub fn fill_leaf(&mut self, index: usize, leaf: E) { pub fn fill_leaf(&mut self, index: usize, leaf: E) {
if leaf == E::null() { if leaf.is_null() {
// fill leaf with null is not allowed. // fill leaf with null is not allowed.
} else if self.node(0, index) == E::null() { } else if self.node(0, index).is_null() {
self.node_manager.start_transaction(); self.node_manager.start_transaction();
self.update_node(0, index, leaf); self.update_node(0, index, leaf);
self.recompute_after_fill_leaves(index, index + 1); self.recompute_after_fill_leaves(index, index + 1);
@ -332,7 +404,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
// skip padding node. // skip padding node.
continue; continue;
} }
if self.node(i, position) == E::null() { if self.node(i, position).is_null() {
self.update_node(i, position, data.clone()); self.update_node(i, position, data.clone());
updated_nodes.push((i, position, data)) updated_nodes.push((i, position, data))
} else if self.node(i, position) != data { } else if self.node(i, position) != data {
@ -357,7 +429,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
if position >= self.leaves() { if position >= self.leaves() {
bail!("Out of bound: position={} end={}", position, self.leaves()); bail!("Out of bound: position={} end={}", position, self.leaves());
} }
if self.node(0, position) != E::null() { if !self.node(0, position).is_null() {
Ok(Some(self.node(0, position))) Ok(Some(self.node(0, position)))
} else { } else {
// The leaf hash is unknown. // The leaf hash is unknown.
@ -472,7 +544,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
// Note that if we are recompute a range of an existing tree, // Note that if we are recompute a range of an existing tree,
// we do not need to keep these possibly null parent. This is only saved // we do not need to keep these possibly null parent. This is only saved
// for the case of constructing a new tree from the leaves. // for the case of constructing a new tree from the leaves.
let parent = if *left == E::null() || *right == E::null() { let parent = if left.is_null() || right.is_null() {
E::null() E::null()
} else { } else {
A::parent(left, right) A::parent(left, right)
@ -483,7 +555,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
assert_eq!(chunk.len(), 1); assert_eq!(chunk.len(), 1);
let r = &chunk[0]; let r = &chunk[0];
// Same as above. // Same as above.
let parent = if *r == E::null() { let parent = if r.is_null() {
E::null() E::null()
} else { } else {
A::parent_single(r, height + self.leaf_height) A::parent_single(r, height + self.leaf_height)
@ -501,8 +573,8 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
match parent_index.cmp(&self.layer_len(height + 1)) { match parent_index.cmp(&self.layer_len(height + 1)) {
Ordering::Less => { Ordering::Less => {
// We do not overwrite with null. // We do not overwrite with null.
if parent != E::null() { if !parent.is_null() {
if self.node(height + 1, parent_index) == E::null() if self.node(height + 1, parent_index).is_null()
// The last node in a layer can be updated. // The last node in a layer can be updated.
|| (self.node(height + 1, parent_index) != parent || (self.node(height + 1, parent_index) != parent
&& parent_index == self.layer_len(height + 1) - 1) && parent_index == self.layer_len(height + 1) - 1)
@ -741,7 +813,7 @@ impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> {
type E = E; type E = E;
fn node(&self, layer: usize, index: usize) -> Self::E { fn node(&self, layer: usize, index: usize) -> Self::E {
match self.delta_nodes.get(layer, index).expect("range checked") { match self.delta_nodes.get(layer, index).expect("range checked") {
Some(node) if *node != E::null() => node.clone(), Some(node) if !node.is_null() => node.clone(),
_ => self _ => self
.node_manager .node_manager
.get_node(layer, index) .get_node(layer, index)
@ -798,7 +870,7 @@ macro_rules! ensure_eq {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::merkle_tree::MerkleTreeRead; use crate::merkle_tree::{MerkleTreeRead, OptionalHash};
use crate::sha3::Sha3Algorithm; use crate::sha3::Sha3Algorithm;
use crate::AppendMerkleTree; use crate::AppendMerkleTree;
@ -812,21 +884,30 @@ mod tests {
for _ in 0..entry_len { for _ in 0..entry_len {
data.push(H256::random()); data.push(H256::random());
} }
let mut merkle = let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); vec![OptionalHash::some(H256::zero())],
merkle.append_list(data.clone()); 0,
None,
);
merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
merkle.commit(Some(0)); merkle.commit(Some(0));
verify(&data, &mut merkle); verify(&data, &mut merkle);
data.push(H256::random()); data.push(H256::random());
merkle.append(*data.last().unwrap()); merkle.append(OptionalHash::some(*data.last().unwrap()));
merkle.commit(Some(1)); merkle.commit(Some(1));
verify(&data, &mut merkle); verify(&data, &mut merkle);
for _ in 0..6 { for _ in 0..6 {
data.push(H256::random()); data.push(H256::random());
} }
merkle.append_list(data[data.len() - 6..].to_vec()); merkle.append_list(
data[data.len() - 6..]
.iter()
.copied()
.map(OptionalHash::some)
.collect(),
);
merkle.commit(Some(2)); merkle.commit(Some(2));
verify(&data, &mut merkle); verify(&data, &mut merkle);
} }
@ -840,9 +921,12 @@ mod tests {
for _ in 0..entry_len { for _ in 0..entry_len {
data.push(H256::random()); data.push(H256::random());
} }
let mut merkle = let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); vec![OptionalHash::some(H256::zero())],
merkle.append_list(data.clone()); 0,
None,
);
merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
merkle.commit(Some(0)); merkle.commit(Some(0));
for i in (0..data.len()).step_by(6) { for i in (0..data.len()).step_by(6) {
@ -850,12 +934,17 @@ mod tests {
let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap(); let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap();
let mut new_data = Vec::new(); let mut new_data = Vec::new();
for _ in 0..3 { for _ in 0..3 {
new_data.push(H256::random()); new_data.push(OptionalHash::some(H256::random()));
} }
merkle.append_list(new_data); merkle.append_list(new_data);
let seq = i as u64 / 6 + 1; let seq = i as u64 / 6 + 1;
merkle.commit(Some(seq)); merkle.commit(Some(seq));
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], i + 1); let optional_data: Vec<OptionalHash> = data[i..end]
.iter()
.copied()
.map(OptionalHash::some)
.collect();
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, i + 1);
assert!(r.is_ok(), "{:?}", r); assert!(r.is_ok(), "{:?}", r);
merkle.fill_with_range_proof(range_proof).unwrap(); merkle.fill_with_range_proof(range_proof).unwrap();
} }
@ -865,7 +954,11 @@ mod tests {
#[test] #[test]
fn test_proof_at_version() { fn test_proof_at_version() {
let n = [2, 255, 256, 257]; let n = [2, 255, 256, 257];
let mut merkle = AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
vec![OptionalHash::some(H256::zero())],
0,
None,
);
let mut start_pos = 0; let mut start_pos = 0;
for (tx_seq, &entry_len) in n.iter().enumerate() { for (tx_seq, &entry_len) in n.iter().enumerate() {
@ -873,7 +966,7 @@ mod tests {
for _ in 0..entry_len { for _ in 0..entry_len {
data.push(H256::random()); data.push(H256::random());
} }
merkle.append_list(data.clone()); merkle.append_list(data.clone().into_iter().map(OptionalHash::some).collect());
merkle.commit(Some(tx_seq as u64)); merkle.commit(Some(tx_seq as u64));
for i in (0..data.len()).step_by(6) { for i in (0..data.len()).step_by(6) {
let end = std::cmp::min(start_pos + i + 3, data.len()); let end = std::cmp::min(start_pos + i + 3, data.len());
@ -882,7 +975,12 @@ mod tests {
.unwrap() .unwrap()
.gen_range_proof(start_pos + i + 1, start_pos + end + 1) .gen_range_proof(start_pos + i + 1, start_pos + end + 1)
.unwrap(); .unwrap();
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], start_pos + i + 1); let optional_data: Vec<OptionalHash> = data[i..end]
.iter()
.copied()
.map(OptionalHash::some)
.collect();
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, start_pos + i + 1);
assert!(r.is_ok(), "{:?}", r); assert!(r.is_ok(), "{:?}", r);
merkle.fill_with_range_proof(range_proof).unwrap(); merkle.fill_with_range_proof(range_proof).unwrap();
} }
@ -891,16 +989,21 @@ mod tests {
} }
} }
fn verify(data: &[H256], merkle: &mut AppendMerkleTree<H256, Sha3Algorithm>) { fn verify(data: &[H256], merkle: &mut AppendMerkleTree<OptionalHash, Sha3Algorithm>) {
for (i, item) in data.iter().enumerate() { for (i, item) in data.iter().enumerate() {
let proof = merkle.gen_proof(i + 1).unwrap(); let proof = merkle.gen_proof(i + 1).unwrap();
let r = merkle.validate(&proof, item, i + 1); let r = merkle.validate(&proof, &OptionalHash::some(*item), i + 1);
assert!(matches!(r, Ok(true)), "{:?}", r); assert!(matches!(r, Ok(true)), "{:?}", r);
} }
for i in (0..data.len()).step_by(6) { for i in (0..data.len()).step_by(6) {
let end = std::cmp::min(i + 3, data.len()); let end = std::cmp::min(i + 3, data.len());
let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap(); let range_proof = merkle.gen_range_proof(i + 1, end + 1).unwrap();
let r = range_proof.validate::<Sha3Algorithm>(&data[i..end], i + 1); let optional_data: Vec<OptionalHash> = data[i..end]
.iter()
.copied()
.map(OptionalHash::some)
.collect();
let r = range_proof.validate::<Sha3Algorithm>(&optional_data, i + 1);
assert!(r.is_ok(), "{:?}", r); assert!(r.is_ok(), "{:?}", r);
merkle.fill_with_range_proof(range_proof).unwrap(); merkle.fill_with_range_proof(range_proof).unwrap();
} }

View File

@ -8,6 +8,173 @@ use std::fmt::Debug;
use std::hash::Hash; use std::hash::Hash;
use tracing::trace; use tracing::trace;
/// A wrapper around Option<H256> that properly handles null hashes
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct OptionalHash(pub Option<H256>);
impl OptionalHash {
pub fn some(hash: H256) -> Self {
OptionalHash(Some(hash))
}
pub fn none() -> Self {
OptionalHash(None)
}
pub fn is_some(&self) -> bool {
self.0.is_some()
}
pub fn is_none(&self) -> bool {
self.0.is_none()
}
pub fn unwrap(&self) -> H256 {
self.0.unwrap()
}
pub fn unwrap_or(&self, default: H256) -> H256 {
self.0.unwrap_or(default)
}
pub fn as_ref(&self) -> Option<&H256> {
self.0.as_ref()
}
/// Create OptionalHash from a byte slice
pub fn from_slice(bytes: &[u8]) -> Result<Self, &'static str> {
if bytes.len() != 32 {
return Err("Invalid byte length for H256");
}
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(bytes);
Ok(OptionalHash::some(H256(hash_bytes)))
}
/// Convert to bytes for storage (33 bytes: 1 flag + 32 hash)
pub fn as_bytes(&self) -> [u8; 33] {
let mut bytes = [0u8; 33];
match &self.0 {
Some(hash) => {
bytes[0] = 1; // Some flag
bytes[1..].copy_from_slice(hash.as_ref());
}
None => {
bytes[0] = 0; // None flag
// bytes[1..] remain zeros
}
}
bytes
}
/// Create OptionalHash from storage bytes (33 bytes)
pub fn from_bytes(bytes: &[u8; 33]) -> Result<Self, &'static str> {
match bytes[0] {
0 => Ok(OptionalHash::none()),
1 => {
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(&bytes[1..]);
Ok(OptionalHash::some(H256(hash_bytes)))
}
_ => Err("Invalid flag byte for OptionalHash"),
}
}
}
// Add From conversions for easier usage
impl From<H256> for OptionalHash {
fn from(hash: H256) -> Self {
OptionalHash::some(hash)
}
}
impl From<Option<H256>> for OptionalHash {
fn from(opt: Option<H256>) -> Self {
OptionalHash(opt)
}
}
impl From<OptionalHash> for Option<H256> {
fn from(opt_hash: OptionalHash) -> Self {
opt_hash.0
}
}
impl AsRef<[u8]> for OptionalHash {
fn as_ref(&self) -> &[u8] {
self.0.as_ref().unwrap().as_ref()
}
}
impl AsMut<[u8]> for OptionalHash {
fn as_mut(&mut self) -> &mut [u8] {
if self.0.is_none() {
self.0 = Some(H256::zero());
}
self.0.as_mut().unwrap().as_mut()
}
}
impl Encode for OptionalHash {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_fixed_len() -> usize {
33 // 1 byte for Some/None flag + 32 bytes for hash
}
fn ssz_bytes_len(&self) -> usize {
33
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
match &self.0 {
Some(hash) => {
buf.push(1); // Some flag
hash.ssz_append(buf);
}
None => {
buf.push(0); // None flag
buf.extend_from_slice(&[0u8; 32]); // Padding zeros
}
}
}
}
impl Decode for OptionalHash {
fn is_ssz_fixed_len() -> bool {
true
}
fn ssz_fixed_len() -> usize {
33 // 1 byte for Some/None flag + 32 bytes for hash
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
if bytes.len() != 33 {
return Err(ssz::DecodeError::InvalidByteLength {
len: bytes.len(),
expected: 33,
});
}
match bytes[0] {
0 => Ok(OptionalHash::none()),
1 => {
let hash = H256::from_ssz_bytes(&bytes[1..])?;
Ok(OptionalHash::some(hash))
}
_ => Err(ssz::DecodeError::BytesInvalid(
"Invalid flag byte for OptionalHash".to_string(),
)),
}
}
}
unsafe impl Send for OptionalHash {}
unsafe impl Sync for OptionalHash {}
pub trait HashElement: pub trait HashElement:
Clone + Debug + Eq + Hash + AsRef<[u8]> + AsMut<[u8]> + Decode + Encode + Send + Sync Clone + Debug + Eq + Hash + AsRef<[u8]> + AsMut<[u8]> + Decode + Encode + Send + Sync
{ {
@ -18,13 +185,28 @@ pub trait HashElement:
} }
} }
impl HashElement for OptionalHash {
fn end_pad(height: usize) -> Self {
OptionalHash::some(ZERO_HASHES[height])
}
fn null() -> Self {
OptionalHash::none()
}
fn is_null(&self) -> bool {
self.is_none()
}
}
// Keep the H256 implementation for backward compatibility
impl HashElement for H256 { impl HashElement for H256 {
fn end_pad(height: usize) -> Self { fn end_pad(height: usize) -> Self {
ZERO_HASHES[height] ZERO_HASHES[height]
} }
fn null() -> Self { fn null() -> Self {
H256::repeat_byte(1) H256::repeat_byte(0x01)
} }
} }
@ -70,7 +252,7 @@ pub trait MerkleTreeRead {
self.leaves() self.leaves()
); );
} }
if self.node(0, leaf_index) == Self::E::null() { if self.node(0, leaf_index).is_null() {
bail!("Not ready to generate proof for leaf_index={}", leaf_index); bail!("Not ready to generate proof for leaf_index={}", leaf_index);
} }
if self.height() == 1 { if self.height() == 1 {
@ -102,7 +284,7 @@ pub trait MerkleTreeRead {
index_in_layer >>= 1; index_in_layer >>= 1;
} }
lemma.push(self.root()); lemma.push(self.root());
if lemma.contains(&Self::E::null()) { if lemma.iter().any(|e| e.is_null()) {
bail!( bail!(
"Not enough data to generate proof, lemma={:?} path={:?}", "Not enough data to generate proof, lemma={:?} path={:?}",
lemma, lemma,

View File

@ -1,4 +1,4 @@
use crate::merkle_tree::ZERO_HASHES; use crate::merkle_tree::{OptionalHash, ZERO_HASHES};
use crate::{Algorithm, HashElement}; use crate::{Algorithm, HashElement};
use ethereum_types::H256; use ethereum_types::H256;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
@ -50,3 +50,22 @@ impl Algorithm<H256> for Sha3Algorithm {
Self::leaf_raw(data) Self::leaf_raw(data)
} }
} }
impl Algorithm<OptionalHash> for Sha3Algorithm {
fn parent(left: &OptionalHash, right: &OptionalHash) -> OptionalHash {
match (&left.0, &right.0) {
(Some(l), Some(r)) => {
// Use the H256 implementation directly to ensure identical logic
let result = <Self as Algorithm<H256>>::parent(l, r);
OptionalHash::some(result)
}
_ => OptionalHash::none(),
}
}
fn leaf(data: &[u8]) -> OptionalHash {
// Use the H256 implementation directly to ensure identical logic
let result = <Self as Algorithm<H256>>::leaf(data);
OptionalHash::some(result)
}
}

View File

@ -2,7 +2,7 @@ mod proof;
use anyhow::{anyhow, bail, Error}; use anyhow::{anyhow, bail, Error};
use append_merkle::{ use append_merkle::{
AppendMerkleTree, Proof as RawProof, RangeProof as RawRangeProof, Sha3Algorithm, AppendMerkleTree, OptionalHash, Proof as RawProof, RangeProof as RawRangeProof, Sha3Algorithm,
}; };
use ethereum_types::{Address, H256, U256}; use ethereum_types::{Address, H256, U256};
use merkle_light::merkle::MerkleTree; use merkle_light::merkle::MerkleTree;
@ -32,7 +32,7 @@ pub type DataRoot = H256;
pub type FlowProof = RawProof<H256>; pub type FlowProof = RawProof<H256>;
pub type FlowRangeProof = RawRangeProof<H256>; pub type FlowRangeProof = RawRangeProof<H256>;
pub type Merkle = AppendMerkleTree<H256, Sha3Algorithm>; pub type Merkle = AppendMerkleTree<OptionalHash, Sha3Algorithm>;
// Each chunk is 32 bytes. // Each chunk is 32 bytes.
pub const CHUNK_SIZE: usize = 256; pub const CHUNK_SIZE: usize = 256;

View File

@ -12,7 +12,9 @@ use crate::log_store::{
use crate::{try_option, ZgsKeyValueDB}; use crate::{try_option, ZgsKeyValueDB};
use any::Any; use any::Any;
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use append_merkle::{MerkleTreeRead, NodeDatabase, NodeTransaction}; use append_merkle::{
AppendMerkleTree, MerkleTreeRead, NodeDatabase, NodeTransaction, OptionalHash,
};
use itertools::Itertools; use itertools::Itertools;
use kvdb::DBTransaction; use kvdb::DBTransaction;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -72,7 +74,8 @@ impl FlowStore {
batch_index batch_index
) )
})?; })?;
merkle.gen_proof(sector_index) let optional_proof = merkle.gen_proof(sector_index)?;
AppendMerkleTree::convert_proof_to_h256(optional_proof)
} }
pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> { pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> {
@ -577,12 +580,12 @@ fn layer_size_key(layer: usize) -> Vec<u8> {
pub struct NodeDBTransaction(DBTransaction); pub struct NodeDBTransaction(DBTransaction);
impl NodeDatabase<DataRoot> for FlowDBStore { impl NodeDatabase<OptionalHash> for FlowDBStore {
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<DataRoot>> { fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
Ok(self Ok(self
.kvdb .kvdb
.get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))? .get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))?
.map(|v| DataRoot::from_slice(&v))) .map(|v| OptionalHash::from_bytes(v.as_slice().try_into().unwrap()).unwrap()))
} }
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> { fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
@ -592,11 +595,11 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
} }
} }
fn start_transaction(&self) -> Box<dyn NodeTransaction<DataRoot>> { fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
Box::new(NodeDBTransaction(self.kvdb.transaction())) Box::new(NodeDBTransaction(self.kvdb.transaction()))
} }
fn commit(&self, tx: Box<dyn NodeTransaction<DataRoot>>) -> Result<()> { fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
let db_tx: Box<NodeDBTransaction> = tx let db_tx: Box<NodeDBTransaction> = tx
.into_any() .into_any()
.downcast() .downcast()
@ -605,21 +608,21 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
} }
} }
impl NodeTransaction<DataRoot> for NodeDBTransaction { impl NodeTransaction<OptionalHash> for NodeDBTransaction {
fn save_node(&mut self, layer: usize, pos: usize, node: &DataRoot) { fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
self.0.put( self.0.put(
COL_FLOW_MPT_NODES, COL_FLOW_MPT_NODES,
&encode_mpt_node_key(layer, pos), &encode_mpt_node_key(layer, pos),
node.as_bytes(), &node.as_bytes(),
); );
} }
fn save_node_list(&mut self, nodes: &[(usize, usize, &DataRoot)]) { fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
for (layer_index, position, data) in nodes { for (layer_index, position, data) in nodes {
self.0.put( self.0.put(
COL_FLOW_MPT_NODES, COL_FLOW_MPT_NODES,
&encode_mpt_node_key(*layer_index, *position), &encode_mpt_node_key(*layer_index, *position),
data.as_bytes(), &data.as_bytes(),
); );
} }
} }

View File

@ -204,9 +204,9 @@ impl EntryBatch {
} }
} }
} }
Ok(Some( Ok(try_option!(self.to_merkle_tree(is_first_chunk)?)
try_option!(self.to_merkle_tree(is_first_chunk)?).root(), .root()
)) .into())
} }
pub fn submit_seal_result(&mut self, answer: SealAnswer) -> Result<()> { pub fn submit_seal_result(&mut self, answer: SealAnswer) -> Result<()> {
@ -243,7 +243,7 @@ impl EntryBatch {
pub fn to_merkle_tree(&self, is_first_chunk: bool) -> Result<Option<Merkle>> { pub fn to_merkle_tree(&self, is_first_chunk: bool) -> Result<Option<Merkle>> {
let initial_leaves = if is_first_chunk { let initial_leaves = if is_first_chunk {
vec![H256::zero()] vec![H256::zero().into()]
} else { } else {
vec![] vec![]
}; };
@ -256,7 +256,7 @@ impl EntryBatch {
); );
merkle.append_list(data_to_merkle_leaves(&leaf_data).expect("aligned")); merkle.append_list(data_to_merkle_leaves(&leaf_data).expect("aligned"));
} }
merkle.append_subtree(subtree.subtree_height, subtree.root)?; merkle.append_subtree(subtree.subtree_height, subtree.root.into())?;
} }
if merkle.leaves() != SECTORS_PER_LOAD { if merkle.leaves() != SECTORS_PER_LOAD {
let leaf_data = try_option!( let leaf_data = try_option!(

View File

@ -9,7 +9,7 @@ use crate::log_store::{
}; };
use crate::{try_option, ZgsKeyValueDB}; use crate::{try_option, ZgsKeyValueDB};
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use append_merkle::{Algorithm, MerkleTreeRead, Sha3Algorithm}; use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
use ethereum_types::H256; use ethereum_types::H256;
use kvdb_rocksdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use merkle_light::merkle::{log2_pow2, MerkleTree}; use merkle_light::merkle::{log2_pow2, MerkleTree};
@ -55,13 +55,10 @@ const PAD_DELAY: Duration = Duration::from_secs(2);
// Process at most 1M entries (256MB) pad data at a time. // Process at most 1M entries (256MB) pad data at a time.
const PAD_MAX_SIZE: usize = 1 << 20; const PAD_MAX_SIZE: usize = 1 << 20;
static PAD_SEGMENT_ROOT: Lazy<H256> = Lazy::new(|| { static PAD_SEGMENT_ROOT: Lazy<OptionalHash> = Lazy::new(|| {
Merkle::new( let h256_leaves = data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap();
data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap(),
0, Merkle::new(h256_leaves, 0, None).root()
None,
)
.root()
}); });
pub struct UpdateFlowMessage { pub struct UpdateFlowMessage {
pub pad_data: usize, pub pad_data: usize,
@ -130,7 +127,8 @@ impl MerkleManager {
fn try_initialize(&mut self, flow_store: &FlowStore) -> Result<()> { fn try_initialize(&mut self, flow_store: &FlowStore) -> Result<()> {
if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 { if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 {
self.last_chunk_merkle.append(H256::zero()); self.last_chunk_merkle
.append(OptionalHash::some(H256::zero()));
self.pora_chunks_merkle self.pora_chunks_merkle
.update_last(self.last_chunk_merkle.root()); .update_last(self.last_chunk_merkle.root());
} else if self.last_chunk_merkle.leaves() != 0 { } else if self.last_chunk_merkle.leaves() != 0 {
@ -222,9 +220,17 @@ impl LogStoreChunkWrite for LogManager {
self.append_entries(flow_entry_array, &mut merkle)?; self.append_entries(flow_entry_array, &mut merkle)?;
if let Some(file_proof) = maybe_file_proof { if let Some(file_proof) = maybe_file_proof {
// Convert H256 proof to OptionalHash proof
let optional_proof = AppendMerkleTree::convert_proof_from_h256(file_proof)?;
// Convert H256 merkle nodes to OptionalHash merkle nodes
let optional_nodes: Vec<(usize, OptionalHash)> = tx
.merkle_nodes
.into_iter()
.map(|(depth, hash)| (depth, OptionalHash::some(hash)))
.collect();
merkle.pora_chunks_merkle.fill_with_file_proof( merkle.pora_chunks_merkle.fill_with_file_proof(
file_proof, optional_proof,
tx.merkle_nodes, optional_nodes,
tx.start_entry_index, tx.start_entry_index,
)?; )?;
} }
@ -424,9 +430,9 @@ impl LogStoreWrite for LogManager {
// `merkle` is used in `validate_range_proof`. // `merkle` is used in `validate_range_proof`.
let mut merkle = self.merkle.write(); let mut merkle = self.merkle.write();
if valid { if valid {
merkle merkle.pora_chunks_merkle.fill_with_range_proof(
.pora_chunks_merkle AppendMerkleTree::convert_range_proof_from_h256(data.proof.clone())?,
.fill_with_range_proof(data.proof.clone())?; )?;
} }
Ok(valid) Ok(valid)
} }
@ -637,7 +643,7 @@ impl LogStoreRead for LogManager {
let tx = self let tx = self
.get_tx_by_seq_number(tx_seq)? .get_tx_by_seq_number(tx_seq)?
.ok_or_else(|| anyhow!("tx missing"))?; .ok_or_else(|| anyhow!("tx missing"))?;
let leaves = data_to_merkle_leaves(&data.chunks.data)?; let leaves = data_to_merkle_leaves_h256(&data.chunks.data)?;
data.proof.validate::<Sha3Algorithm>( data.proof.validate::<Sha3Algorithm>(
&leaves, &leaves,
(data.chunks.start_index + tx.start_entry_index) as usize, (data.chunks.start_index + tx.start_entry_index) as usize,
@ -646,7 +652,7 @@ impl LogStoreRead for LogManager {
.merkle .merkle
.read_recursive() .read_recursive()
.pora_chunks_merkle .pora_chunks_merkle
.check_root(&data.proof.root())) .check_root(&data.proof.root().into()))
} }
fn get_sync_progress(&self) -> Result<Option<(u64, H256)>> { fn get_sync_progress(&self) -> Result<Option<(u64, H256)>> {
@ -686,7 +692,7 @@ impl LogStoreRead for LogManager {
fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> { fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
Ok(( Ok((
merkle.pora_chunks_merkle.root(), merkle.pora_chunks_merkle.root().unwrap(),
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64, merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64,
)) ))
} }
@ -871,7 +877,9 @@ impl LogManager {
None => self.gen_proof_at_version(flow_index, None), None => self.gen_proof_at_version(flow_index, None),
Some(root) => { Some(root) => {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
let tx_seq = merkle.pora_chunks_merkle.tx_seq_at_root(&root)?; let tx_seq = merkle
.pora_chunks_merkle
.tx_seq_at_root(&OptionalHash::from(root))?;
self.gen_proof_at_version(flow_index, Some(tx_seq)) self.gen_proof_at_version(flow_index, Some(tx_seq))
} }
} }
@ -885,11 +893,15 @@ impl LogManager {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
let seg_index = sector_to_segment(flow_index); let seg_index = sector_to_segment(flow_index);
let top_proof = match maybe_tx_seq { let top_proof = match maybe_tx_seq {
None => merkle.pora_chunks_merkle.gen_proof(seg_index)?, None => AppendMerkleTree::convert_proof_to_h256(
Some(tx_seq) => merkle merkle.pora_chunks_merkle.gen_proof(seg_index)?,
)?,
Some(tx_seq) => AppendMerkleTree::convert_proof_to_h256(
merkle
.pora_chunks_merkle .pora_chunks_merkle
.at_version(tx_seq)? .at_version(tx_seq)?
.gen_proof(seg_index)?, .gen_proof(seg_index)?,
)?,
}; };
// TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so // TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so
@ -906,13 +918,17 @@ impl LogManager {
.gen_proof_in_batch(seg_index, flow_index as usize % PORA_CHUNK_SIZE)? .gen_proof_in_batch(seg_index, flow_index as usize % PORA_CHUNK_SIZE)?
} else { } else {
match maybe_tx_seq { match maybe_tx_seq {
None => merkle None => AppendMerkleTree::convert_proof_to_h256(
merkle
.last_chunk_merkle .last_chunk_merkle
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?, .gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
Some(tx_version) => merkle )?,
Some(tx_version) => AppendMerkleTree::convert_proof_to_h256(
merkle
.last_chunk_merkle .last_chunk_merkle
.at_version(tx_version)? .at_version(tx_version)?
.gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?, .gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?,
)?,
} }
}; };
entry_proof(&top_proof, &sub_proof) entry_proof(&top_proof, &sub_proof)
@ -938,9 +954,10 @@ impl LogManager {
if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE { if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE {
merkle merkle
.last_chunk_merkle .last_chunk_merkle
.append_subtree(subtree_depth, subtree_root)?; .append_subtree(subtree_depth, OptionalHash::some(subtree_root))?;
if merkle.last_chunk_merkle.leaves() == subtree_size { if merkle.last_chunk_merkle.leaves() == subtree_size {
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree. // `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.append_subtree(1, merkle.last_chunk_merkle.root())?; .append_subtree(1, merkle.last_chunk_merkle.root())?;
@ -960,9 +977,10 @@ impl LogManager {
// the chunks boundary. // the chunks boundary.
assert_eq!(merkle.last_chunk_merkle.leaves(), 0); assert_eq!(merkle.last_chunk_merkle.leaves(), 0);
assert!(subtree_size >= PORA_CHUNK_SIZE); assert!(subtree_size >= PORA_CHUNK_SIZE);
merkle merkle.pora_chunks_merkle.append_subtree(
.pora_chunks_merkle subtree_depth - log2_pow2(PORA_CHUNK_SIZE),
.append_subtree(subtree_depth - log2_pow2(PORA_CHUNK_SIZE), subtree_root)?; OptionalHash::some(subtree_root),
)?;
} }
} }
@ -997,9 +1015,8 @@ impl LogManager {
let mut completed_chunk_index = None; let mut completed_chunk_index = None;
if pad_data.len() < last_chunk_pad { if pad_data.len() < last_chunk_pad {
is_full_empty = false; is_full_empty = false;
merkle let pad_leaves = data_to_merkle_leaves(&pad_data)?;
.last_chunk_merkle merkle.last_chunk_merkle.append_list(pad_leaves);
.append_list(data_to_merkle_leaves(&pad_data)?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
@ -1007,9 +1024,8 @@ impl LogManager {
if last_chunk_pad != 0 { if last_chunk_pad != 0 {
is_full_empty = false; is_full_empty = false;
// Pad the last chunk. // Pad the last chunk.
merkle let last_chunk_leaves = data_to_merkle_leaves(&pad_data[..last_chunk_pad])?;
.last_chunk_merkle merkle.last_chunk_merkle.append_list(last_chunk_leaves);
.append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
@ -1019,7 +1035,7 @@ impl LogManager {
// Pad with more complete chunks. // Pad with more complete chunks.
let mut start_index = last_chunk_pad / ENTRY_SIZE; let mut start_index = last_chunk_pad / ENTRY_SIZE;
while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE { while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE {
merkle.pora_chunks_merkle.append(*PAD_SEGMENT_ROOT); merkle.pora_chunks_merkle.append(PAD_SEGMENT_ROOT.clone());
start_index += PORA_CHUNK_SIZE; start_index += PORA_CHUNK_SIZE;
} }
assert_eq!(pad_data.len(), start_index * ENTRY_SIZE); assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);
@ -1104,7 +1120,7 @@ impl LogManager {
if chunk_index < merkle.pora_chunks_merkle.leaves() as u64 { if chunk_index < merkle.pora_chunks_merkle.leaves() as u64 {
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.fill_leaf(chunk_index as usize, chunk_root); .fill_leaf(chunk_index as usize, OptionalHash::some(chunk_root));
} else { } else {
// TODO(zz): This assumption may be false in the future. // TODO(zz): This assumption may be false in the future.
unreachable!("We always insert tx nodes before put_chunks"); unreachable!("We always insert tx nodes before put_chunks");
@ -1253,7 +1269,7 @@ impl LogManager {
let mut to_insert_subtrees = Vec::new(); let mut to_insert_subtrees = Vec::new();
let mut start_index = 0; let mut start_index = 0;
for (subtree_height, root) in subtree_list { for (subtree_height, root) in subtree_list {
to_insert_subtrees.push((start_index, subtree_height, root)); to_insert_subtrees.push((start_index, subtree_height, root.unwrap()));
start_index += 1 << (subtree_height - 1); start_index += 1 << (subtree_height - 1);
} }
self.flow_store self.flow_store
@ -1301,14 +1317,14 @@ macro_rules! try_option {
/// This should be called with input checked. /// This should be called with input checked.
pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result<FileMerkleTree> { pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result<FileMerkleTree> {
Ok(FileMerkleTree::new( Ok(FileMerkleTree::new(
data_to_merkle_leaves(leaf_data)? data_to_merkle_leaves_h256(leaf_data)?
.into_iter() .into_iter()
.map(|h| h.0) .map(|h| h.0)
.collect::<Vec<[u8; 32]>>(), .collect::<Vec<[u8; 32]>>(),
)) ))
} }
pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> { pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<OptionalHash>> {
let start_time = Instant::now(); let start_time = Instant::now();
if leaf_data.len() % ENTRY_SIZE != 0 { if leaf_data.len() % ENTRY_SIZE != 0 {
bail!("merkle_tree: mismatched data size"); bail!("merkle_tree: mismatched data size");
@ -1331,6 +1347,12 @@ pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result<Vec<H256>> {
Ok(r) Ok(r)
} }
/// Convenience function that combines data_to_merkle_leaves and conversion to H256
pub fn data_to_merkle_leaves_h256(leaf_data: &[u8]) -> Result<Vec<H256>> {
let optional_hashes = data_to_merkle_leaves(leaf_data)?;
Ok(optional_hashes.into_iter().map(|oh| oh.unwrap()).collect())
}
pub fn bytes_to_entries(size_bytes: u64) -> u64 { pub fn bytes_to_entries(size_bytes: u64) -> u64 {
if size_bytes % ENTRY_SIZE as u64 == 0 { if size_bytes % ENTRY_SIZE as u64 == 0 {
size_bytes / ENTRY_SIZE as u64 size_bytes / ENTRY_SIZE as u64

View File

@ -1,9 +1,9 @@
use crate::log_store::log_manager::{ use crate::log_store::log_manager::{
data_to_merkle_leaves, sub_merkle_tree, tx_subtree_root_list_padded, LogConfig, LogManager, data_to_merkle_leaves, data_to_merkle_leaves_h256, sub_merkle_tree,
PORA_CHUNK_SIZE, tx_subtree_root_list_padded, LogConfig, LogManager, PORA_CHUNK_SIZE,
}; };
use crate::log_store::{LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite}; use crate::log_store::{LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite};
use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, Sha3Algorithm}; use append_merkle::{Algorithm, AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
use ethereum_types::H256; use ethereum_types::H256;
use rand::random; use rand::random;
use shared_types::{compute_padded_chunk_size, ChunkArray, Transaction, CHUNK_SIZE}; use shared_types::{compute_padded_chunk_size, ChunkArray, Transaction, CHUNK_SIZE};
@ -22,11 +22,17 @@ fn test_put_get() {
data[i * CHUNK_SIZE] = random(); data[i * CHUNK_SIZE] = random();
} }
let (padded_chunks, _) = compute_padded_chunk_size(data_size); let (padded_chunks, _) = compute_padded_chunk_size(data_size);
let mut merkle = AppendMerkleTree::<H256, Sha3Algorithm>::new(vec![H256::zero()], 0, None); let mut merkle = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(start_offset - 1)).unwrap()); vec![OptionalHash::some(H256::zero())],
0,
None,
);
let padding_leaves = data_to_merkle_leaves(&LogManager::padding_raw(start_offset - 1)).unwrap();
merkle.append_list(padding_leaves);
let mut data_padded = data.clone(); let mut data_padded = data.clone();
data_padded.append(&mut vec![0u8; CHUNK_SIZE]); data_padded.append(&mut vec![0u8; CHUNK_SIZE]);
merkle.append_list(data_to_merkle_leaves(&data_padded).unwrap()); let data_leaves = data_to_merkle_leaves(&data_padded).unwrap();
merkle.append_list(data_leaves);
merkle.commit(Some(0)); merkle.commit(Some(0));
let tx_merkle = sub_merkle_tree(&data).unwrap(); let tx_merkle = sub_merkle_tree(&data).unwrap();
let tx = Transaction { let tx = Transaction {
@ -78,16 +84,17 @@ fn test_put_get() {
.unwrap() .unwrap()
.unwrap(); .unwrap();
assert_eq!(chunk_with_proof.chunk, chunk_array.chunk_at(i).unwrap()); assert_eq!(chunk_with_proof.chunk, chunk_array.chunk_at(i).unwrap());
assert_eq!( assert_eq!(
chunk_with_proof.proof, chunk_with_proof.proof,
merkle.gen_proof(i + start_offset).unwrap() merkle.gen_proof_h256(i + start_offset).unwrap()
); );
let r = chunk_with_proof.proof.validate::<Sha3Algorithm>( let r = chunk_with_proof.proof.validate::<Sha3Algorithm>(
&Sha3Algorithm::leaf(&chunk_with_proof.chunk.0), &Sha3Algorithm::leaf(&chunk_with_proof.chunk.0),
i + start_offset, i + start_offset,
); );
assert!(r.is_ok(), "proof={:?} \n r={:?}", chunk_with_proof.proof, r); assert!(r.is_ok(), "proof={:?} \n r={:?}", chunk_with_proof.proof, r);
assert!(merkle.check_root(&chunk_with_proof.proof.root())); assert!(merkle.check_root(&chunk_with_proof.proof.root().into()));
} }
for i in (0..chunk_count).step_by(PORA_CHUNK_SIZE / 3) { for i in (0..chunk_count).step_by(PORA_CHUNK_SIZE / 3) {
let end = std::cmp::min(i + PORA_CHUNK_SIZE, chunk_count); let end = std::cmp::min(i + PORA_CHUNK_SIZE, chunk_count);
@ -102,7 +109,7 @@ fn test_put_get() {
assert!(chunk_array_with_proof assert!(chunk_array_with_proof
.proof .proof
.validate::<Sha3Algorithm>( .validate::<Sha3Algorithm>(
&data_to_merkle_leaves(&chunk_array_with_proof.chunks.data).unwrap(), &data_to_merkle_leaves_h256(&chunk_array_with_proof.chunks.data).unwrap(),
i + start_offset i + start_offset
) )
.is_ok()); .is_ok());
@ -119,12 +126,12 @@ fn test_root() {
} }
let mt = sub_merkle_tree(&data).unwrap(); let mt = sub_merkle_tree(&data).unwrap();
println!("{:?} {}", mt.root(), hex::encode(mt.root())); println!("{:?} {}", mt.root(), hex::encode(mt.root()));
let append_mt = AppendMerkleTree::<H256, Sha3Algorithm>::new( let append_mt = AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new(
data_to_merkle_leaves(&data).unwrap(), data_to_merkle_leaves(&data).unwrap(),
0, 0,
None, None,
); );
assert_eq!(mt.root(), append_mt.root().0); assert_eq!(mt.root(), append_mt.root().unwrap().0);
} }
} }

View File

@ -6,7 +6,7 @@ use crate::log_store::log_manager::{
use crate::log_store::metrics; use crate::log_store::metrics;
use crate::{try_option, LogManager, ZgsKeyValueDB}; use crate::{try_option, LogManager, ZgsKeyValueDB};
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use append_merkle::{AppendMerkleTree, MerkleTreeRead, Sha3Algorithm}; use append_merkle::{AppendMerkleTree, MerkleTreeRead, OptionalHash, Sha3Algorithm};
use ethereum_types::H256; use ethereum_types::H256;
use merkle_light::merkle::log2_pow2; use merkle_light::merkle::log2_pow2;
use shared_types::{DataRoot, Transaction}; use shared_types::{DataRoot, Transaction};
@ -329,7 +329,7 @@ impl TransactionStore {
&self, &self,
pora_chunk_index: usize, pora_chunk_index: usize,
mut tx_seq: u64, mut tx_seq: u64,
) -> Result<AppendMerkleTree<H256, Sha3Algorithm>> { ) -> Result<AppendMerkleTree<OptionalHash, Sha3Algorithm>> {
let last_chunk_start_index = pora_chunk_index as u64 * PORA_CHUNK_SIZE as u64; let last_chunk_start_index = pora_chunk_index as u64 * PORA_CHUNK_SIZE as u64;
let mut tx_list = Vec::new(); let mut tx_list = Vec::new();
// Find the first tx within the last chunk. // Find the first tx within the last chunk.
@ -384,9 +384,13 @@ impl TransactionStore {
} }
let mut merkle = if last_chunk_start_index == 0 { let mut merkle = if last_chunk_start_index == 0 {
// The first entry hash is initialized as zero. // The first entry hash is initialized as zero.
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth(vec![H256::zero()], 1, None) AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new_with_depth(
vec![H256::zero().into()],
1,
None,
)
} else { } else {
AppendMerkleTree::<H256, Sha3Algorithm>::new_with_depth( AppendMerkleTree::<OptionalHash, Sha3Algorithm>::new_with_depth(
vec![], vec![],
log2_pow2(PORA_CHUNK_SIZE) + 1, log2_pow2(PORA_CHUNK_SIZE) + 1,
None, None,
@ -400,9 +404,12 @@ impl TransactionStore {
cmp::min(first_subtree, PORA_CHUNK_SIZE) - (merkle.leaves() % first_subtree); cmp::min(first_subtree, PORA_CHUNK_SIZE) - (merkle.leaves() % first_subtree);
merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(pad_len))?); merkle.append_list(data_to_merkle_leaves(&LogManager::padding_raw(pad_len))?);
} }
// Since we are building the last merkle with a given last tx_seq, it's ensured // Convert H256 to OptionalHash for append_subtree_list
// that appending subtrees will not go beyond the max size. let subtree_list_optional_hash = subtree_list
merkle.append_subtree_list(subtree_list)?; .into_iter()
.map(|(depth, hash)| (depth, hash.into()))
.collect();
merkle.append_subtree_list(subtree_list_optional_hash)?;
merkle.commit(Some(tx_seq)); merkle.commit(Some(tx_seq));
} }
Ok(merkle) Ok(merkle)