mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-01-18 11:05:18 +00:00
Hardcode pad data segment root. (#250)
Some checks are pending
abi-consistent-check / build-and-compare (push) Waiting to run
code-coverage / unittest-cov (push) Waiting to run
rust / check (push) Waiting to run
rust / test (push) Waiting to run
rust / lints (push) Waiting to run
functional-test / test (push) Waiting to run
Some checks are pending
abi-consistent-check / build-and-compare (push) Waiting to run
code-coverage / unittest-cov (push) Waiting to run
rust / check (push) Waiting to run
rust / test (push) Waiting to run
rust / lints (push) Waiting to run
functional-test / test (push) Waiting to run
* Hardcode pad data segment root. * fix deref --------- Co-authored-by: Peter Zhang <peter@0g.ai>
This commit is contained in:
parent
506d234562
commit
2f9960e8e7
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -7302,6 +7302,7 @@ dependencies = [
|
|||||||
"kvdb-rocksdb",
|
"kvdb-rocksdb",
|
||||||
"merkle_light",
|
"merkle_light",
|
||||||
"merkle_tree",
|
"merkle_tree",
|
||||||
|
"once_cell",
|
||||||
"parking_lot 0.12.3",
|
"parking_lot 0.12.3",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rayon",
|
"rayon",
|
||||||
|
@ -31,6 +31,7 @@ parking_lot = "0.12.3"
|
|||||||
serde_json = "1.0.127"
|
serde_json = "1.0.127"
|
||||||
tokio = { version = "1.38.0", features = ["full"] }
|
tokio = { version = "1.38.0", features = ["full"] }
|
||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
|
once_cell = { version = "1.19.0", features = [] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
use super::tx_store::BlockHashAndSubmissionIndex;
|
||||||
|
use super::{FlowSeal, MineLoadChunk, SealAnswer, SealTask};
|
||||||
use crate::config::ShardConfig;
|
use crate::config::ShardConfig;
|
||||||
use crate::log_store::flow_store::{batch_iter_sharded, FlowConfig, FlowDBStore, FlowStore};
|
use crate::log_store::flow_store::{batch_iter_sharded, FlowConfig, FlowDBStore, FlowStore};
|
||||||
use crate::log_store::tx_store::TransactionStore;
|
use crate::log_store::tx_store::TransactionStore;
|
||||||
@ -11,6 +13,7 @@ use ethereum_types::H256;
|
|||||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use merkle_light::merkle::{log2_pow2, MerkleTree};
|
use merkle_light::merkle::{log2_pow2, MerkleTree};
|
||||||
use merkle_tree::RawLeafSha3Algorithm;
|
use merkle_tree::RawLeafSha3Algorithm;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use rayon::iter::ParallelIterator;
|
use rayon::iter::ParallelIterator;
|
||||||
use rayon::prelude::ParallelSlice;
|
use rayon::prelude::ParallelSlice;
|
||||||
@ -25,9 +28,6 @@ use std::sync::mpsc;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tracing::{debug, error, info, instrument, trace, warn};
|
use tracing::{debug, error, info, instrument, trace, warn};
|
||||||
|
|
||||||
use super::tx_store::BlockHashAndSubmissionIndex;
|
|
||||||
use super::{FlowSeal, MineLoadChunk, SealAnswer, SealTask};
|
|
||||||
|
|
||||||
/// 256 Bytes
|
/// 256 Bytes
|
||||||
pub const ENTRY_SIZE: usize = 256;
|
pub const ENTRY_SIZE: usize = 256;
|
||||||
/// 1024 Entries.
|
/// 1024 Entries.
|
||||||
@ -47,6 +47,14 @@ pub const COL_NUM: u32 = 9;
|
|||||||
// Process at most 1M entries (256MB) pad data at a time.
|
// Process at most 1M entries (256MB) pad data at a time.
|
||||||
const PAD_MAX_SIZE: usize = 1 << 20;
|
const PAD_MAX_SIZE: usize = 1 << 20;
|
||||||
|
|
||||||
|
static PAD_SEGMENT_ROOT: Lazy<H256> = Lazy::new(|| {
|
||||||
|
Merkle::new(
|
||||||
|
data_to_merkle_leaves(&[0; ENTRY_SIZE * PORA_CHUNK_SIZE]).unwrap(),
|
||||||
|
0,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.root()
|
||||||
|
});
|
||||||
pub struct UpdateFlowMessage {
|
pub struct UpdateFlowMessage {
|
||||||
pub root_map: BTreeMap<usize, (H256, usize)>,
|
pub root_map: BTreeMap<usize, (H256, usize)>,
|
||||||
pub pad_data: usize,
|
pub pad_data: usize,
|
||||||
@ -967,12 +975,11 @@ impl LogManager {
|
|||||||
// Pad with more complete chunks.
|
// Pad with more complete chunks.
|
||||||
let mut start_index = last_chunk_pad / ENTRY_SIZE;
|
let mut start_index = last_chunk_pad / ENTRY_SIZE;
|
||||||
while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE {
|
while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE {
|
||||||
let data = pad_data[start_index * ENTRY_SIZE
|
merkle.pora_chunks_merkle.append(*PAD_SEGMENT_ROOT);
|
||||||
..(start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE]
|
root_map.insert(
|
||||||
.to_vec();
|
merkle.pora_chunks_merkle.leaves() - 1,
|
||||||
let root = Merkle::new(data_to_merkle_leaves(&data)?, 0, None).root();
|
(*PAD_SEGMENT_ROOT, 1),
|
||||||
merkle.pora_chunks_merkle.append(root);
|
);
|
||||||
root_map.insert(merkle.pora_chunks_merkle.leaves() - 1, (root, 1));
|
|
||||||
start_index += PORA_CHUNK_SIZE;
|
start_index += PORA_CHUNK_SIZE;
|
||||||
}
|
}
|
||||||
assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);
|
assert_eq!(pad_data.len(), start_index * ENTRY_SIZE);
|
||||||
|
Loading…
Reference in New Issue
Block a user