mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-04-04 15:35:18 +00:00
Compare commits
2 Commits
b76fa7be9b
...
b131dc532f
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b131dc532f | ||
![]() |
4e2c5fa8a4 |
@ -113,12 +113,16 @@ impl Transaction {
|
||||
1 << (depth - 1)
|
||||
}
|
||||
|
||||
pub fn num_entries(&self) -> usize {
|
||||
self.merkle_nodes.iter().fold(0, |size, &(depth, _)| {
|
||||
pub fn num_entries_of_list(merkle_nodes: &[(usize, DataRoot)]) -> usize {
|
||||
merkle_nodes.iter().fold(0, |size, &(depth, _)| {
|
||||
size + Transaction::num_entries_of_node(depth)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn num_entries(&self) -> usize {
|
||||
Self::num_entries_of_list(&self.merkle_nodes)
|
||||
}
|
||||
|
||||
pub fn hash(&self) -> H256 {
|
||||
let bytes = self.as_ssz_bytes();
|
||||
let mut h = Keccak::v256();
|
||||
|
@ -257,7 +257,7 @@ impl LogStoreWrite for LogManager {
|
||||
}
|
||||
let maybe_same_data_tx_seq = self.tx_store.put_tx(tx.clone())?.first().cloned();
|
||||
// TODO(zz): Should we validate received tx?
|
||||
self.append_subtree_list(tx.merkle_nodes.clone(), &mut merkle)?;
|
||||
self.append_subtree_list(tx.start_entry_index, tx.merkle_nodes.clone(), &mut merkle)?;
|
||||
merkle.commit_merkle(tx.seq)?;
|
||||
debug!(
|
||||
"commit flow root: root={:?}",
|
||||
@ -868,6 +868,7 @@ impl LogManager {
|
||||
#[instrument(skip(self, merkle))]
|
||||
fn append_subtree_list(
|
||||
&self,
|
||||
tx_start_index: u64,
|
||||
merkle_list: Vec<(usize, DataRoot)>,
|
||||
merkle: &mut MerkleManager,
|
||||
) -> Result<()> {
|
||||
@ -875,7 +876,7 @@ impl LogManager {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.pad_tx(1 << (merkle_list[0].0 - 1), &mut *merkle)?;
|
||||
self.pad_tx(tx_start_index, &mut *merkle)?;
|
||||
|
||||
let mut batch_root_map = BTreeMap::new();
|
||||
for (subtree_depth, subtree_root) in merkle_list {
|
||||
@ -923,18 +924,18 @@ impl LogManager {
|
||||
}
|
||||
|
||||
#[instrument(skip(self, merkle))]
|
||||
fn pad_tx(&self, first_subtree_size: u64, merkle: &mut MerkleManager) -> Result<()> {
|
||||
fn pad_tx(&self, tx_start_index: u64, merkle: &mut MerkleManager) -> Result<()> {
|
||||
// Check if we need to pad the flow.
|
||||
let mut tx_start_flow_index =
|
||||
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64;
|
||||
let extra = tx_start_flow_index % first_subtree_size;
|
||||
let pad_size = tx_start_index - tx_start_flow_index;
|
||||
trace!(
|
||||
"before pad_tx {} {}",
|
||||
merkle.pora_chunks_merkle.leaves(),
|
||||
merkle.last_chunk_merkle.leaves()
|
||||
);
|
||||
if extra != 0 {
|
||||
for pad_data in Self::padding((first_subtree_size - extra) as usize) {
|
||||
if pad_size != 0 {
|
||||
for pad_data in Self::padding(pad_size as usize) {
|
||||
let mut is_full_empty = true;
|
||||
let mut root_map = BTreeMap::new();
|
||||
|
||||
@ -997,12 +998,10 @@ impl LogManager {
|
||||
// Update the flow database.
|
||||
// This should be called before `complete_last_chunk_merkle` so that we do not save
|
||||
// subtrees with data known.
|
||||
self.flow_store
|
||||
.append_entries(ChunkArray {
|
||||
data: pad_data.to_vec(),
|
||||
start_index: tx_start_flow_index,
|
||||
})
|
||||
.unwrap();
|
||||
self.flow_store.append_entries(ChunkArray {
|
||||
data: pad_data.to_vec(),
|
||||
start_index: tx_start_flow_index,
|
||||
})?;
|
||||
}
|
||||
|
||||
tx_start_flow_index += data_size as u64;
|
||||
|
@ -1 +1 @@
|
||||
a0b536c6acff24b5d4bf20d9db4e95c399e61196
|
||||
75c251804a29ab22adced50d92478cf0baf834bc
|
||||
|
File diff suppressed because one or more lines are too long
@ -25,10 +25,23 @@
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "pricePerSector",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"bytecode": "0x6080604052348015600f57600080fd5b5060a08061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063da6eb36a14602d575b600080fd5b603d6038366004603f565b505050565b005b600080600060608486031215605357600080fd5b50508135936020830135935060409092013591905056fea2646970667358221220fba54ab16c6496385cdd933e87b05b9e545a857b82ffa918f0d0e4a34ae41d7164736f6c63430008100033",
|
||||
"deployedBytecode": "0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063da6eb36a14602d575b600080fd5b603d6038366004603f565b505050565b005b600080600060608486031215605357600080fd5b50508135936020830135935060409092013591905056fea2646970667358221220fba54ab16c6496385cdd933e87b05b9e545a857b82ffa918f0d0e4a34ae41d7164736f6c63430008100033",
|
||||
"bytecode": "0x608060405234801561001057600080fd5b5060be8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122044ebf96fcad90f0bbc521513843d64fbc182c5c913a8210a4d638393793be63064736f6c63430008100033",
|
||||
"deployedBytecode": "0x6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122044ebf96fcad90f0bbc521513843d64fbc182c5c913a8210a4d638393793be63064736f6c63430008100033",
|
||||
"linkReferences": {},
|
||||
"deployedLinkReferences": {}
|
||||
}
|
||||
|
@ -70,8 +70,8 @@
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"bytecode": "0x608060405234801561001057600080fd5b5060f18061001f6000396000f3fe60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ebb4f7274983bea96e7fd68a63e91f4ad67260ff76111312d8c8559b9b5b621064736f6c63430008100033",
|
||||
"deployedBytecode": "0x60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ebb4f7274983bea96e7fd68a63e91f4ad67260ff76111312d8c8559b9b5b621064736f6c63430008100033",
|
||||
"bytecode": "0x608060405234801561001057600080fd5b5060f18061001f6000396000f3fe60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ce57385afc7714a4000e530d1e1154d214fc1c0e2392abde201018635be1a2ab64736f6c63430008100033",
|
||||
"deployedBytecode": "0x60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ce57385afc7714a4000e530d1e1154d214fc1c0e2392abde201018635be1a2ab64736f6c63430008100033",
|
||||
"linkReferences": {},
|
||||
"deployedLinkReferences": {}
|
||||
}
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
81
tests/shard_submission_test.py
Normal file
81
tests/shard_submission_test.py
Normal file
@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env python3
|
||||
import time
|
||||
|
||||
import base64
|
||||
import random
|
||||
from test_framework.test_framework import TestFramework
|
||||
from utility.submission import ENTRY_SIZE, submit_data
|
||||
from utility.submission import create_submission
|
||||
from utility.utils import (
|
||||
assert_equal,
|
||||
wait_until,
|
||||
)
|
||||
|
||||
|
||||
class ShardSubmitTest(TestFramework):
|
||||
|
||||
def setup_params(self):
|
||||
self.num_blockchain_nodes = 1
|
||||
self.num_nodes = 4
|
||||
self.zgs_node_configs[0] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "0/4"
|
||||
}
|
||||
self.zgs_node_configs[1] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "1/4"
|
||||
}
|
||||
self.zgs_node_configs[2] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "2/4"
|
||||
}
|
||||
self.zgs_node_configs[3] = {
|
||||
"db_max_num_sectors": 2 ** 30,
|
||||
"shard_position": "3/4"
|
||||
}
|
||||
|
||||
def run_test(self):
|
||||
data_size = [
|
||||
256*960,
|
||||
256*1024,
|
||||
2,
|
||||
255,
|
||||
256*960,
|
||||
256*120,
|
||||
256,
|
||||
257,
|
||||
1023,
|
||||
1024,
|
||||
1025,
|
||||
256 * 1023,
|
||||
256 * 1023 + 1,
|
||||
256 * 1024,
|
||||
256 * 1024 + 1,
|
||||
256 * 1025,
|
||||
256 * 2048 - 1,
|
||||
256 * 2048,
|
||||
256 * 16385,
|
||||
256 * 1024 * 256,
|
||||
]
|
||||
|
||||
for i, v in enumerate(data_size):
|
||||
self.submission_data(v, i + 1, True)
|
||||
|
||||
def submission_data(self, size, submission_index, rand_data=True):
|
||||
self.log.info("file size: %d", size)
|
||||
chunk_data = random.randbytes(size) if rand_data else b"\x10" * size
|
||||
|
||||
submissions, data_root = create_submission(chunk_data)
|
||||
self.log.info("data root: %s, submissions: %s", data_root, submissions)
|
||||
self.contract.submit(submissions)
|
||||
|
||||
wait_until(lambda: self.contract.num_submissions() == submission_index)
|
||||
|
||||
for i in range(4):
|
||||
client = self.nodes[i]
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root) is not None)
|
||||
submit_data(client, chunk_data)
|
||||
wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"])
|
||||
|
||||
if __name__ == "__main__":
|
||||
ShardSubmitTest().main()
|
@ -194,13 +194,16 @@ def generate_merkle_tree_by_batch(data):
|
||||
|
||||
|
||||
def submit_data(client, data):
|
||||
# NOTE: we assume the data is unique in this function, otherwise zgs_getFileInfo will only get the information of the first data with same root
|
||||
shard_config = client.rpc.zgs_getShardConfig()
|
||||
shard_id = int(shard_config["shardId"])
|
||||
num_shard = int(shard_config["numShard"])
|
||||
|
||||
segments = data_to_segments(data)
|
||||
file_info = client.zgs_get_file_info(segments[0]["root"])
|
||||
start_seg_index = file_info["tx"]["startEntryIndex"] // 1024
|
||||
for index, segment in enumerate(segments):
|
||||
if index % num_shard == shard_id:
|
||||
if (start_seg_index + index) % num_shard == shard_id:
|
||||
client.zgs_upload_segment(segment)
|
||||
return segments
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user