Update merkle tree trait.

This commit is contained in:
Peilun Li 2024-10-08 16:19:06 +08:00
parent 5100c22933
commit 7589bdf4bb
6 changed files with 57 additions and 35 deletions

View File

@ -104,7 +104,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
} }
for (layer_index, position, h) in initial_data.extra_mpt_nodes { for (layer_index, position, h) in initial_data.extra_mpt_nodes {
// TODO: Delete duplicate nodes from DB. // TODO: Delete duplicate nodes from DB.
merkle.layers[layer_index][position] = h; merkle.node_manager.add_node(layer_index, position, h);
} }
Ok(merkle) Ok(merkle)
} }
@ -385,7 +385,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
for layer in &self.layers { for layer in &self.layers {
right_most_nodes.push((layer.len() - 1, layer.last().unwrap().clone())); right_most_nodes.push((layer.len() - 1, layer.last().unwrap().clone()));
} }
let root = self.root().clone(); let root = self.root();
self.delta_nodes_map self.delta_nodes_map
.insert(tx_seq, DeltaNodes::new(right_most_nodes)); .insert(tx_seq, DeltaNodes::new(right_most_nodes));
self.root_to_tx_seq_map.insert(root, tx_seq); self.root_to_tx_seq_map.insert(root, tx_seq);
@ -566,7 +566,7 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
bail!("empty tree"); bail!("empty tree");
} }
Ok(HistoryTree { Ok(HistoryTree {
layers: &self.layers, node_manager: &self.node_manager,
delta_nodes, delta_nodes,
leaf_height: self.leaf_height, leaf_height: self.leaf_height,
}) })
@ -596,10 +596,10 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
fn first_known_root_at(&self, index: usize) -> (usize, E) { fn first_known_root_at(&self, index: usize) -> (usize, E) {
let mut height = 0; let mut height = 0;
let mut index_in_layer = index; let mut index_in_layer = index;
while height < self.layers.len() { while height < self.node_manager.num_layers() {
let node = self.node(height, index_in_layer); let node = self.node(height, index_in_layer);
if !node.is_null() { if !node.is_null() {
return (height + 1, node.clone()); return (height + 1, node);
} }
height += 1; height += 1;
index_in_layer /= 2; index_in_layer /= 2;
@ -644,7 +644,7 @@ impl<E: HashElement> DeltaNodes<E> {
pub struct HistoryTree<'m, E: HashElement> { pub struct HistoryTree<'m, E: HashElement> {
/// A reference to the global tree nodes. /// A reference to the global tree nodes.
layers: &'m Vec<Vec<E>>, node_manager: &'m NodeManager<E>,
/// The delta nodes that are difference from `layers`. /// The delta nodes that are difference from `layers`.
/// This could be a reference, we just take ownership for convenience. /// This could be a reference, we just take ownership for convenience.
delta_nodes: &'m DeltaNodes<E>, delta_nodes: &'m DeltaNodes<E>,
@ -655,16 +655,18 @@ pub struct HistoryTree<'m, E: HashElement> {
impl<E: HashElement, A: Algorithm<E>> MerkleTreeRead for AppendMerkleTree<E, A> { impl<E: HashElement, A: Algorithm<E>> MerkleTreeRead for AppendMerkleTree<E, A> {
type E = E; type E = E;
fn node(&self, layer: usize, index: usize) -> &Self::E { fn node(&self, layer: usize, index: usize) -> Self::E {
&self.layers[layer][index] self.node_manager
.get_node(layer, index)
.expect("index checked")
} }
fn height(&self) -> usize { fn height(&self) -> usize {
self.layers.len() self.node_manager.num_layers()
} }
fn layer_len(&self, layer_height: usize) -> usize { fn layer_len(&self, layer_height: usize) -> usize {
self.layers[layer_height].len() self.node_manager.layer_size(layer_height)
} }
fn padding_node(&self, height: usize) -> Self::E { fn padding_node(&self, height: usize) -> Self::E {
@ -674,10 +676,13 @@ impl<E: HashElement, A: Algorithm<E>> MerkleTreeRead for AppendMerkleTree<E, A>
impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> { impl<'a, E: HashElement> MerkleTreeRead for HistoryTree<'a, E> {
type E = E; type E = E;
fn node(&self, layer: usize, index: usize) -> &Self::E { fn node(&self, layer: usize, index: usize) -> Self::E {
match self.delta_nodes.get(layer, index).expect("range checked") { match self.delta_nodes.get(layer, index).expect("range checked") {
Some(node) if *node != E::null() => node, Some(node) if *node != E::null() => node.clone(),
_ => &self.layers[layer][index], _ => self
.node_manager
.get_node(layer, index)
.expect("index checked"),
} }
} }

View File

@ -49,7 +49,7 @@ pub trait Algorithm<E: HashElement> {
pub trait MerkleTreeRead { pub trait MerkleTreeRead {
type E: HashElement; type E: HashElement;
fn node(&self, layer: usize, index: usize) -> &Self::E; fn node(&self, layer: usize, index: usize) -> Self::E;
fn height(&self) -> usize; fn height(&self) -> usize;
fn layer_len(&self, layer_height: usize) -> usize; fn layer_len(&self, layer_height: usize) -> usize;
fn padding_node(&self, height: usize) -> Self::E; fn padding_node(&self, height: usize) -> Self::E;
@ -58,7 +58,7 @@ pub trait MerkleTreeRead {
self.layer_len(0) self.layer_len(0)
} }
fn root(&self) -> &Self::E { fn root(&self) -> Self::E {
self.node(self.height() - 1, 0) self.node(self.height() - 1, 0)
} }
@ -70,16 +70,16 @@ pub trait MerkleTreeRead {
self.leaves() self.leaves()
); );
} }
if self.node(0, leaf_index) == &Self::E::null() { if self.node(0, leaf_index) == Self::E::null() {
bail!("Not ready to generate proof for leaf_index={}", leaf_index); bail!("Not ready to generate proof for leaf_index={}", leaf_index);
} }
if self.height() == 1 { if self.height() == 1 {
return Proof::new(vec![self.root().clone(), self.root().clone()], vec![]); return Proof::new(vec![self.root(), self.root().clone()], vec![]);
} }
let mut lemma: Vec<Self::E> = Vec::with_capacity(self.height()); // path + root let mut lemma: Vec<Self::E> = Vec::with_capacity(self.height()); // path + root
let mut path: Vec<bool> = Vec::with_capacity(self.height() - 2); // path - 1 let mut path: Vec<bool> = Vec::with_capacity(self.height() - 2); // path - 1
let mut index_in_layer = leaf_index; let mut index_in_layer = leaf_index;
lemma.push(self.node(0, leaf_index).clone()); lemma.push(self.node(0, leaf_index));
for height in 0..(self.height() - 1) { for height in 0..(self.height() - 1) {
trace!( trace!(
"gen_proof: height={} index={} hash={:?}", "gen_proof: height={} index={} hash={:?}",
@ -93,15 +93,15 @@ pub trait MerkleTreeRead {
// TODO: This can be skipped if the tree size is available in validation. // TODO: This can be skipped if the tree size is available in validation.
lemma.push(self.padding_node(height)); lemma.push(self.padding_node(height));
} else { } else {
lemma.push(self.node(height, index_in_layer + 1).clone()); lemma.push(self.node(height, index_in_layer + 1));
} }
} else { } else {
path.push(false); path.push(false);
lemma.push(self.node(height, index_in_layer - 1).clone()); lemma.push(self.node(height, index_in_layer - 1));
} }
index_in_layer >>= 1; index_in_layer >>= 1;
} }
lemma.push(self.root().clone()); lemma.push(self.root());
if lemma.contains(&Self::E::null()) { if lemma.contains(&Self::E::null()) {
bail!( bail!(
"Not enough data to generate proof, lemma={:?} path={:?}", "Not enough data to generate proof, lemma={:?} path={:?}",

View File

@ -6,6 +6,7 @@ use tracing::error;
pub struct NodeManager<E: HashElement> { pub struct NodeManager<E: HashElement> {
cache: HashMap<(usize, usize), E>, cache: HashMap<(usize, usize), E>,
layer_size: Vec<usize>,
db: Arc<dyn NodeDatabase<E>>, db: Arc<dyn NodeDatabase<E>>,
} }
@ -13,6 +14,7 @@ impl<E: HashElement> NodeManager<E> {
pub fn new(db: Arc<dyn NodeDatabase<E>>) -> Self { pub fn new(db: Arc<dyn NodeDatabase<E>>) -> Self {
Self { Self {
cache: HashMap::new(), cache: HashMap::new(),
layer_size: vec![],
db, db,
} }
} }
@ -32,6 +34,21 @@ impl<E: HashElement> NodeManager<E> {
error!("Failed to save node: {}", e); error!("Failed to save node: {}", e);
} }
self.cache.insert((layer, pos), node); self.cache.insert((layer, pos), node);
if pos + 1 > self.layer_size[layer] {
self.layer_size[layer] = pos + 1;
}
}
pub fn add_layer(&mut self) {
self.layer_size.push(0);
}
pub fn layer_size(&self, layer: usize) -> usize {
self.layer_size[layer]
}
pub fn num_layers(&self) -> usize {
self.layer_size.len()
} }
} }

View File

@ -436,7 +436,7 @@ impl FlowDBStore {
let mut expected_index = 0; let mut expected_index = 0;
let empty_data = vec![0; PORA_CHUNK_SIZE * ENTRY_SIZE]; let empty_data = vec![0; PORA_CHUNK_SIZE * ENTRY_SIZE];
let empty_root = *Merkle::new( let empty_root = Merkle::new(
Arc::new(EmptyNodeDatabase {}), Arc::new(EmptyNodeDatabase {}),
data_to_merkle_leaves(&empty_data)?, data_to_merkle_leaves(&empty_data)?,
0, 0,

View File

@ -206,7 +206,7 @@ impl EntryBatch {
} }
} }
Ok(Some( Ok(Some(
*try_option!(self.to_merkle_tree(is_first_chunk)?).root(), try_option!(self.to_merkle_tree(is_first_chunk)?).root(),
)) ))
} }

View File

@ -116,7 +116,7 @@ impl MerkleManager {
if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 { if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 {
self.last_chunk_merkle.append(H256::zero()); self.last_chunk_merkle.append(H256::zero());
self.pora_chunks_merkle self.pora_chunks_merkle
.update_last(*self.last_chunk_merkle.root()); .update_last(self.last_chunk_merkle.root());
} else if self.last_chunk_merkle.leaves() != 0 { } else if self.last_chunk_merkle.leaves() != 0 {
let last_chunk_start_index = self.last_chunk_start_index(); let last_chunk_start_index = self.last_chunk_start_index();
let last_chunk_data = flow_store.get_available_entries( let last_chunk_data = flow_store.get_available_entries(
@ -355,7 +355,7 @@ impl LogStoreWrite for LogManager {
merkle.revert_merkle_tree(tx_seq, &self.tx_store)?; merkle.revert_merkle_tree(tx_seq, &self.tx_store)?;
merkle.try_initialize(&self.flow_store)?; merkle.try_initialize(&self.flow_store)?;
assert_eq!( assert_eq!(
Some(*merkle.last_chunk_merkle.root()), Some(merkle.last_chunk_merkle.root()),
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.leaf_at(merkle.pora_chunks_merkle.leaves() - 1)? .leaf_at(merkle.pora_chunks_merkle.leaves() - 1)?
@ -577,7 +577,7 @@ impl LogStoreRead for LogManager {
fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> { fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> {
let merkle = self.merkle.read_recursive(); let merkle = self.merkle.read_recursive();
Ok(( Ok((
*merkle.pora_chunks_merkle.root(), merkle.pora_chunks_merkle.root(),
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64, merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64,
)) ))
} }
@ -727,7 +727,7 @@ impl LogManager {
last_chunk_merkle.leaves(), last_chunk_merkle.leaves(),
); );
if last_chunk_merkle.leaves() != 0 { if last_chunk_merkle.leaves() != 0 {
pora_chunks_merkle.append(*last_chunk_merkle.root()); pora_chunks_merkle.append(last_chunk_merkle.root());
// update the merkle root // update the merkle root
pora_chunks_merkle.commit(start_tx_seq); pora_chunks_merkle.commit(start_tx_seq);
} }
@ -893,16 +893,16 @@ impl LogManager {
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree. // `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.append_subtree(1, *merkle.last_chunk_merkle.root())?; .append_subtree(1, merkle.last_chunk_merkle.root())?;
} else { } else {
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(*merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
} }
if merkle.last_chunk_merkle.leaves() == PORA_CHUNK_SIZE { if merkle.last_chunk_merkle.leaves() == PORA_CHUNK_SIZE {
batch_root_map.insert( batch_root_map.insert(
merkle.pora_chunks_merkle.leaves() - 1, merkle.pora_chunks_merkle.leaves() - 1,
(*merkle.last_chunk_merkle.root(), 1), (merkle.last_chunk_merkle.root(), 1),
); );
self.complete_last_chunk_merkle( self.complete_last_chunk_merkle(
merkle.pora_chunks_merkle.leaves() - 1, merkle.pora_chunks_merkle.leaves() - 1,
@ -958,7 +958,7 @@ impl LogManager {
.append_list(data_to_merkle_leaves(&pad_data)?); .append_list(data_to_merkle_leaves(&pad_data)?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(*merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
} else { } else {
if last_chunk_pad != 0 { if last_chunk_pad != 0 {
is_full_empty = false; is_full_empty = false;
@ -968,10 +968,10 @@ impl LogManager {
.append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?); .append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?);
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(*merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
root_map.insert( root_map.insert(
merkle.pora_chunks_merkle.leaves() - 1, merkle.pora_chunks_merkle.leaves() - 1,
(*merkle.last_chunk_merkle.root(), 1), (merkle.last_chunk_merkle.root(), 1),
); );
completed_chunk_index = Some(merkle.pora_chunks_merkle.leaves() - 1); completed_chunk_index = Some(merkle.pora_chunks_merkle.leaves() - 1);
} }
@ -982,7 +982,7 @@ impl LogManager {
let data = pad_data[start_index * ENTRY_SIZE let data = pad_data[start_index * ENTRY_SIZE
..(start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE] ..(start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE]
.to_vec(); .to_vec();
let root = *Merkle::new( let root = Merkle::new(
Arc::new(EmptyNodeDatabase {}), Arc::new(EmptyNodeDatabase {}),
data_to_merkle_leaves(&data)?, data_to_merkle_leaves(&data)?,
0, 0,
@ -1068,7 +1068,7 @@ impl LogManager {
} }
merkle merkle
.pora_chunks_merkle .pora_chunks_merkle
.update_last(*merkle.last_chunk_merkle.root()); .update_last(merkle.last_chunk_merkle.root());
} }
let chunk_roots = self.flow_store.append_entries(flow_entry_array)?; let chunk_roots = self.flow_store.append_entries(flow_entry_array)?;
for (chunk_index, chunk_root) in chunk_roots { for (chunk_index, chunk_root) in chunk_roots {