mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-11-03 16:17:27 +00:00
fix error
This commit is contained in:
parent
f29f4f8b61
commit
b900b9c48d
@ -259,8 +259,11 @@ impl<E: HashElement, A: Algorithm<E>> AppendMerkleTree<E, A> {
|
|||||||
}
|
}
|
||||||
self.node_manager.start_transaction();
|
self.node_manager.start_transaction();
|
||||||
let start_index = self.leaves();
|
let start_index = self.leaves();
|
||||||
|
println!("append_subtree_inner");
|
||||||
self.append_subtree_inner(subtree_depth, subtree_root)?;
|
self.append_subtree_inner(subtree_depth, subtree_root)?;
|
||||||
|
println!("recompute");
|
||||||
self.recompute_after_append_subtree(start_index, subtree_depth - 1);
|
self.recompute_after_append_subtree(start_index, subtree_depth - 1);
|
||||||
|
println!("commit");
|
||||||
self.node_manager.commit();
|
self.node_manager.commit();
|
||||||
metrics::APPEND_SUBTREE.update_since(start_time);
|
metrics::APPEND_SUBTREE.update_since(start_time);
|
||||||
|
|
||||||
|
|||||||
@ -40,6 +40,45 @@ impl OptionalHash {
|
|||||||
pub fn as_ref(&self) -> Option<&H256> {
|
pub fn as_ref(&self) -> Option<&H256> {
|
||||||
self.0.as_ref()
|
self.0.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create OptionalHash from a byte slice
|
||||||
|
pub fn from_slice(bytes: &[u8]) -> Result<Self, &'static str> {
|
||||||
|
if bytes.len() != 32 {
|
||||||
|
return Err("Invalid byte length for H256");
|
||||||
|
}
|
||||||
|
let mut hash_bytes = [0u8; 32];
|
||||||
|
hash_bytes.copy_from_slice(bytes);
|
||||||
|
Ok(OptionalHash::some(H256(hash_bytes)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to bytes for storage (33 bytes: 1 flag + 32 hash)
|
||||||
|
pub fn as_bytes(&self) -> [u8; 33] {
|
||||||
|
let mut bytes = [0u8; 33];
|
||||||
|
match &self.0 {
|
||||||
|
Some(hash) => {
|
||||||
|
bytes[0] = 1; // Some flag
|
||||||
|
bytes[1..].copy_from_slice(hash.as_ref());
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
bytes[0] = 0; // None flag
|
||||||
|
// bytes[1..] remain zeros
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create OptionalHash from storage bytes (33 bytes)
|
||||||
|
pub fn from_bytes(bytes: &[u8; 33]) -> Result<Self, &'static str> {
|
||||||
|
match bytes[0] {
|
||||||
|
0 => Ok(OptionalHash::none()),
|
||||||
|
1 => {
|
||||||
|
let mut hash_bytes = [0u8; 32];
|
||||||
|
hash_bytes.copy_from_slice(&bytes[1..]);
|
||||||
|
Ok(OptionalHash::some(H256(hash_bytes)))
|
||||||
|
}
|
||||||
|
_ => Err("Invalid flag byte for OptionalHash"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add From conversions for easier usage
|
// Add From conversions for easier usage
|
||||||
@ -82,18 +121,25 @@ impl Encode for OptionalHash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn ssz_fixed_len() -> usize {
|
fn ssz_fixed_len() -> usize {
|
||||||
32 // Same as H256 - just 32 bytes, no flag byte
|
33 // 1 byte for Some/None flag + 32 bytes for hash
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ssz_bytes_len(&self) -> usize {
|
fn ssz_bytes_len(&self) -> usize {
|
||||||
32
|
33
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
||||||
// Use H256::zero() for None, actual hash for Some
|
match &self.0 {
|
||||||
let hash = self.0.unwrap();
|
Some(hash) => {
|
||||||
|
buf.push(1); // Some flag
|
||||||
hash.ssz_append(buf);
|
hash.ssz_append(buf);
|
||||||
}
|
}
|
||||||
|
None => {
|
||||||
|
buf.push(0); // None flag
|
||||||
|
buf.extend_from_slice(&[0u8; 32]); // Padding zeros
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decode for OptionalHash {
|
impl Decode for OptionalHash {
|
||||||
@ -102,20 +148,28 @@ impl Decode for OptionalHash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn ssz_fixed_len() -> usize {
|
fn ssz_fixed_len() -> usize {
|
||||||
32 // Same as H256
|
33 // 1 byte for Some/None flag + 32 bytes for hash
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||||
if bytes.len() != 32 {
|
if bytes.len() != 33 {
|
||||||
return Err(ssz::DecodeError::InvalidByteLength {
|
return Err(ssz::DecodeError::InvalidByteLength {
|
||||||
len: bytes.len(),
|
len: bytes.len(),
|
||||||
expected: 32,
|
expected: 33,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let hash = H256::from_ssz_bytes(bytes)?;
|
match bytes[0] {
|
||||||
|
0 => Ok(OptionalHash::none()),
|
||||||
|
1 => {
|
||||||
|
let hash = H256::from_ssz_bytes(&bytes[1..])?;
|
||||||
Ok(OptionalHash::some(hash))
|
Ok(OptionalHash::some(hash))
|
||||||
}
|
}
|
||||||
|
_ => Err(ssz::DecodeError::BytesInvalid(
|
||||||
|
"Invalid flag byte for OptionalHash".to_string(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl Send for OptionalHash {}
|
unsafe impl Send for OptionalHash {}
|
||||||
|
|||||||
@ -580,12 +580,12 @@ fn layer_size_key(layer: usize) -> Vec<u8> {
|
|||||||
|
|
||||||
pub struct NodeDBTransaction(DBTransaction);
|
pub struct NodeDBTransaction(DBTransaction);
|
||||||
|
|
||||||
impl NodeDatabase<DataRoot> for FlowDBStore {
|
impl NodeDatabase<OptionalHash> for FlowDBStore {
|
||||||
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<DataRoot>> {
|
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
|
||||||
Ok(self
|
Ok(self
|
||||||
.kvdb
|
.kvdb
|
||||||
.get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))?
|
.get(COL_FLOW_MPT_NODES, &encode_mpt_node_key(layer, pos))?
|
||||||
.map(|v| DataRoot::from_slice(&v)))
|
.map(|v| OptionalHash::from_bytes(v.as_slice().try_into().unwrap()).unwrap()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
|
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
|
||||||
@ -595,11 +595,11 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_transaction(&self) -> Box<dyn NodeTransaction<DataRoot>> {
|
fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
|
||||||
Box::new(NodeDBTransaction(self.kvdb.transaction()))
|
Box::new(NodeDBTransaction(self.kvdb.transaction()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn commit(&self, tx: Box<dyn NodeTransaction<DataRoot>>) -> Result<()> {
|
fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
|
||||||
let db_tx: Box<NodeDBTransaction> = tx
|
let db_tx: Box<NodeDBTransaction> = tx
|
||||||
.into_any()
|
.into_any()
|
||||||
.downcast()
|
.downcast()
|
||||||
@ -608,21 +608,21 @@ impl NodeDatabase<DataRoot> for FlowDBStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NodeTransaction<DataRoot> for NodeDBTransaction {
|
impl NodeTransaction<OptionalHash> for NodeDBTransaction {
|
||||||
fn save_node(&mut self, layer: usize, pos: usize, node: &DataRoot) {
|
fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
|
||||||
self.0.put(
|
self.0.put(
|
||||||
COL_FLOW_MPT_NODES,
|
COL_FLOW_MPT_NODES,
|
||||||
&encode_mpt_node_key(layer, pos),
|
&encode_mpt_node_key(layer, pos),
|
||||||
node.as_bytes(),
|
&node.as_bytes(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_node_list(&mut self, nodes: &[(usize, usize, &DataRoot)]) {
|
fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
|
||||||
for (layer_index, position, data) in nodes {
|
for (layer_index, position, data) in nodes {
|
||||||
self.0.put(
|
self.0.put(
|
||||||
COL_FLOW_MPT_NODES,
|
COL_FLOW_MPT_NODES,
|
||||||
&encode_mpt_node_key(*layer_index, *position),
|
&encode_mpt_node_key(*layer_index, *position),
|
||||||
data.as_bytes(),
|
&data.as_bytes(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -653,63 +653,63 @@ impl NodeTransaction<DataRoot> for NodeDBTransaction {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adapter implementation for OptionalHash that delegates to the H256 implementation
|
// // Adapter implementation for OptionalHash that delegates to the H256 implementation
|
||||||
impl NodeDatabase<OptionalHash> for FlowDBStore {
|
// impl NodeDatabase<OptionalHash> for FlowDBStore {
|
||||||
fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
|
// fn get_node(&self, layer: usize, pos: usize) -> Result<Option<OptionalHash>> {
|
||||||
Ok(self.get_node(layer, pos)?.map(OptionalHash::some))
|
// Ok(self.get_node(layer, pos)?.map(OptionalHash::some))
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
|
// fn get_layer_size(&self, layer: usize) -> Result<Option<usize>> {
|
||||||
// Layer size is the same regardless of hash type
|
// // Layer size is the same regardless of hash type
|
||||||
<Self as NodeDatabase<DataRoot>>::get_layer_size(self, layer)
|
// <Self as NodeDatabase<DataRoot>>::get_layer_size(self, layer)
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
|
// fn start_transaction(&self) -> Box<dyn NodeTransaction<OptionalHash>> {
|
||||||
Box::new(OptionalHashNodeDBTransaction(self.start_transaction()))
|
// Box::new(OptionalHashNodeDBTransaction(self.start_transaction()))
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
|
// fn commit(&self, tx: Box<dyn NodeTransaction<OptionalHash>>) -> Result<()> {
|
||||||
let h256_tx = tx
|
// let h256_tx = tx
|
||||||
.into_any()
|
// .into_any()
|
||||||
.downcast::<OptionalHashNodeDBTransaction>()
|
// .downcast::<OptionalHashNodeDBTransaction>()
|
||||||
.map_err(|_| anyhow::anyhow!("Failed to downcast OptionalHashNodeDBTransaction"))?;
|
// .map_err(|_| anyhow::anyhow!("Failed to downcast OptionalHashNodeDBTransaction"))?;
|
||||||
self.commit(h256_tx.0)
|
// self.commit(h256_tx.0)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
// Wrapper for NodeTransaction<OptionalHash> that delegates to NodeTransaction<H256>
|
// // Wrapper for NodeTransaction<OptionalHash> that delegates to NodeTransaction<H256>
|
||||||
pub struct OptionalHashNodeDBTransaction(Box<dyn NodeTransaction<DataRoot>>);
|
// pub struct OptionalHashNodeDBTransaction(Box<dyn NodeTransaction<DataRoot>>);
|
||||||
|
|
||||||
impl NodeTransaction<OptionalHash> for OptionalHashNodeDBTransaction {
|
// impl NodeTransaction<OptionalHash> for OptionalHashNodeDBTransaction {
|
||||||
fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
|
// fn save_node(&mut self, layer: usize, pos: usize, node: &OptionalHash) {
|
||||||
self.0.save_node(layer, pos, &node.unwrap());
|
// self.0.save_node(layer, pos, &node.unwrap());
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
|
// fn save_node_list(&mut self, nodes: &[(usize, usize, &OptionalHash)]) {
|
||||||
let h256_nodes: Vec<(usize, usize, DataRoot)> = nodes
|
// let h256_nodes: Vec<(usize, usize, DataRoot)> = nodes
|
||||||
.iter()
|
// .iter()
|
||||||
.map(|(layer, pos, oh)| (*layer, *pos, oh.unwrap()))
|
// .map(|(layer, pos, oh)| (*layer, *pos, oh.unwrap()))
|
||||||
.collect();
|
// .collect();
|
||||||
let h256_node_refs: Vec<(usize, usize, &DataRoot)> = h256_nodes
|
// let h256_node_refs: Vec<(usize, usize, &DataRoot)> = h256_nodes
|
||||||
.iter()
|
// .iter()
|
||||||
.map(|(layer, pos, h)| (*layer, *pos, h))
|
// .map(|(layer, pos, h)| (*layer, *pos, h))
|
||||||
.collect();
|
// .collect();
|
||||||
self.0.save_node_list(&h256_node_refs);
|
// self.0.save_node_list(&h256_node_refs);
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn remove_node_list(&mut self, nodes: &[(usize, usize)]) {
|
// fn remove_node_list(&mut self, nodes: &[(usize, usize)]) {
|
||||||
self.0.remove_node_list(nodes);
|
// self.0.remove_node_list(nodes);
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn save_layer_size(&mut self, layer: usize, size: usize) {
|
// fn save_layer_size(&mut self, layer: usize, size: usize) {
|
||||||
self.0.save_layer_size(layer, size);
|
// self.0.save_layer_size(layer, size);
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn remove_layer_size(&mut self, layer: usize) {
|
// fn remove_layer_size(&mut self, layer: usize) {
|
||||||
self.0.remove_layer_size(layer);
|
// self.0.remove_layer_size(layer);
|
||||||
}
|
// }
|
||||||
|
|
||||||
fn into_any(self: Box<Self>) -> Box<dyn Any> {
|
// fn into_any(self: Box<Self>) -> Box<dyn Any> {
|
||||||
Box::new(self)
|
// Box::new(self)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|||||||
@ -946,17 +946,24 @@ impl LogManager {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
println!(
|
||||||
|
"append_subtree_list: tx_seq={} tx_start_index={} merkle_list={:?}",
|
||||||
|
tx_seq, tx_start_index, merkle_list
|
||||||
|
);
|
||||||
|
|
||||||
self.pad_tx(tx_seq, tx_start_index, &mut *merkle)?;
|
self.pad_tx(tx_seq, tx_start_index, &mut *merkle)?;
|
||||||
|
|
||||||
for (subtree_depth, subtree_root) in merkle_list {
|
for (subtree_depth, subtree_root) in merkle_list {
|
||||||
let subtree_size = 1 << (subtree_depth - 1);
|
let subtree_size = 1 << (subtree_depth - 1);
|
||||||
|
println!("append new subtree to pora_chunks_merkle");
|
||||||
if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE {
|
if merkle.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE {
|
||||||
|
println!("append to last_chunk_");
|
||||||
merkle
|
merkle
|
||||||
.last_chunk_merkle
|
.last_chunk_merkle
|
||||||
.append_subtree(subtree_depth, OptionalHash::some(subtree_root))?;
|
.append_subtree(subtree_depth, OptionalHash::some(subtree_root))?;
|
||||||
if merkle.last_chunk_merkle.leaves() == subtree_size {
|
if merkle.last_chunk_merkle.leaves() == subtree_size {
|
||||||
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
|
// `last_chunk_merkle` was empty, so this is a new leaf in the top_tree.
|
||||||
|
|
||||||
merkle
|
merkle
|
||||||
.pora_chunks_merkle
|
.pora_chunks_merkle
|
||||||
.append_subtree(1, merkle.last_chunk_merkle.root())?;
|
.append_subtree(1, merkle.last_chunk_merkle.root())?;
|
||||||
@ -972,17 +979,23 @@ impl LogManager {
|
|||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
println!("append to pora_chunks_");
|
||||||
// `last_chunk_merkle` has been padded here, so a subtree should not be across
|
// `last_chunk_merkle` has been padded here, so a subtree should not be across
|
||||||
// the chunks boundary.
|
// the chunks boundary.
|
||||||
assert_eq!(merkle.last_chunk_merkle.leaves(), 0);
|
assert_eq!(merkle.last_chunk_merkle.leaves(), 0);
|
||||||
assert!(subtree_size >= PORA_CHUNK_SIZE);
|
assert!(subtree_size >= PORA_CHUNK_SIZE);
|
||||||
merkle.pora_chunks_merkle.append_subtree(
|
merkle.pora_chunks_merkle.append_subtree(
|
||||||
subtree_depth - log2_pow2(PORA_CHUNK_SIZE),
|
subtree_depth - log2_pow2(PORA_CHUNK_SIZE),
|
||||||
subtree_root.into(),
|
OptionalHash::some(subtree_root),
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
println!(
|
||||||
|
"after append_subtree_list: tx_seq={} tx_start_index={} last_chunk={}",
|
||||||
|
tx_seq,
|
||||||
|
tx_start_index,
|
||||||
|
merkle.last_chunk_merkle.leaves()
|
||||||
|
);
|
||||||
metrics::APPEND_SUBTREE_LIST.update_since(start_time);
|
metrics::APPEND_SUBTREE_LIST.update_since(start_time);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@ -168,6 +168,7 @@ fn test_revert() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_put_tx() {
|
fn test_put_tx() {
|
||||||
for i in 0..12 {
|
for i in 0..12 {
|
||||||
|
println!("{}", i);
|
||||||
let chunk_count = 0xF << i;
|
let chunk_count = 0xF << i;
|
||||||
let mut store = create_store();
|
let mut store = create_store();
|
||||||
put_tx(&mut store, chunk_count, 0);
|
put_tx(&mut store, chunk_count, 0);
|
||||||
@ -200,7 +201,9 @@ fn put_tx(store: &mut LogManager, chunk_count: usize, seq: u64) {
|
|||||||
// TODO: This can come from `tx_merkle`.
|
// TODO: This can come from `tx_merkle`.
|
||||||
merkle_nodes,
|
merkle_nodes,
|
||||||
};
|
};
|
||||||
|
println!("put tx");
|
||||||
store.put_tx(tx.clone()).unwrap();
|
store.put_tx(tx.clone()).unwrap();
|
||||||
|
println!("put tx done");
|
||||||
for start_index in (0..chunk_count).step_by(PORA_CHUNK_SIZE) {
|
for start_index in (0..chunk_count).step_by(PORA_CHUNK_SIZE) {
|
||||||
let end = cmp::min((start_index + PORA_CHUNK_SIZE) * CHUNK_SIZE, data.len());
|
let end = cmp::min((start_index + PORA_CHUNK_SIZE) * CHUNK_SIZE, data.len());
|
||||||
let chunk_array = ChunkArray {
|
let chunk_array = ChunkArray {
|
||||||
@ -209,5 +212,6 @@ fn put_tx(store: &mut LogManager, chunk_count: usize, seq: u64) {
|
|||||||
};
|
};
|
||||||
store.put_chunks(tx.seq, chunk_array.clone()).unwrap();
|
store.put_chunks(tx.seq, chunk_array.clone()).unwrap();
|
||||||
}
|
}
|
||||||
|
println!("put chunks done");
|
||||||
store.finalize_tx(tx.seq).unwrap();
|
store.finalize_tx(tx.seq).unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user