Compare commits

..

No commits in common. "b131dc532fd0829d2353e07de8a77c3a2a1cc9c8" and "b76fa7be9b01d980cc9c88d52fc682ab5311a2f6" have entirely different histories.

13 changed files with 32 additions and 132 deletions

View File

@ -113,16 +113,12 @@ impl Transaction {
1 << (depth - 1)
}
pub fn num_entries_of_list(merkle_nodes: &[(usize, DataRoot)]) -> usize {
merkle_nodes.iter().fold(0, |size, &(depth, _)| {
pub fn num_entries(&self) -> usize {
self.merkle_nodes.iter().fold(0, |size, &(depth, _)| {
size + Transaction::num_entries_of_node(depth)
})
}
pub fn num_entries(&self) -> usize {
Self::num_entries_of_list(&self.merkle_nodes)
}
pub fn hash(&self) -> H256 {
let bytes = self.as_ssz_bytes();
let mut h = Keccak::v256();

View File

@ -257,7 +257,7 @@ impl LogStoreWrite for LogManager {
}
let maybe_same_data_tx_seq = self.tx_store.put_tx(tx.clone())?.first().cloned();
// TODO(zz): Should we validate received tx?
self.append_subtree_list(tx.start_entry_index, tx.merkle_nodes.clone(), &mut merkle)?;
self.append_subtree_list(tx.merkle_nodes.clone(), &mut merkle)?;
merkle.commit_merkle(tx.seq)?;
debug!(
"commit flow root: root={:?}",
@ -868,7 +868,6 @@ impl LogManager {
#[instrument(skip(self, merkle))]
fn append_subtree_list(
&self,
tx_start_index: u64,
merkle_list: Vec<(usize, DataRoot)>,
merkle: &mut MerkleManager,
) -> Result<()> {
@ -876,7 +875,7 @@ impl LogManager {
return Ok(());
}
self.pad_tx(tx_start_index, &mut *merkle)?;
self.pad_tx(1 << (merkle_list[0].0 - 1), &mut *merkle)?;
let mut batch_root_map = BTreeMap::new();
for (subtree_depth, subtree_root) in merkle_list {
@ -924,18 +923,18 @@ impl LogManager {
}
#[instrument(skip(self, merkle))]
fn pad_tx(&self, tx_start_index: u64, merkle: &mut MerkleManager) -> Result<()> {
fn pad_tx(&self, first_subtree_size: u64, merkle: &mut MerkleManager) -> Result<()> {
// Check if we need to pad the flow.
let mut tx_start_flow_index =
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64;
let pad_size = tx_start_index - tx_start_flow_index;
let extra = tx_start_flow_index % first_subtree_size;
trace!(
"before pad_tx {} {}",
merkle.pora_chunks_merkle.leaves(),
merkle.last_chunk_merkle.leaves()
);
if pad_size != 0 {
for pad_data in Self::padding(pad_size as usize) {
if extra != 0 {
for pad_data in Self::padding((first_subtree_size - extra) as usize) {
let mut is_full_empty = true;
let mut root_map = BTreeMap::new();
@ -998,10 +997,12 @@ impl LogManager {
// Update the flow database.
// This should be called before `complete_last_chunk_merkle` so that we do not save
// subtrees with data known.
self.flow_store.append_entries(ChunkArray {
data: pad_data.to_vec(),
start_index: tx_start_flow_index,
})?;
self.flow_store
.append_entries(ChunkArray {
data: pad_data.to_vec(),
start_index: tx_start_flow_index,
})
.unwrap();
}
tx_start_flow_index += data_size as u64;

View File

@ -1 +1 @@
75c251804a29ab22adced50d92478cf0baf834bc
a0b536c6acff24b5d4bf20d9db4e95c399e61196

File diff suppressed because one or more lines are too long

View File

@ -25,23 +25,10 @@
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "pricePerSector",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "pure",
"type": "function"
}
],
"bytecode": "0x608060405234801561001057600080fd5b5060be8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122044ebf96fcad90f0bbc521513843d64fbc182c5c913a8210a4d638393793be63064736f6c63430008100033",
"deployedBytecode": "0x6080604052348015600f57600080fd5b506004361060325760003560e01c806361ec5082146037578063da6eb36a14604b575b600080fd5b600060405190815260200160405180910390f35b605b6056366004605d565b505050565b005b600080600060608486031215607157600080fd5b50508135936020830135935060409092013591905056fea264697066735822122044ebf96fcad90f0bbc521513843d64fbc182c5c913a8210a4d638393793be63064736f6c63430008100033",
"bytecode": "0x6080604052348015600f57600080fd5b5060a08061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063da6eb36a14602d575b600080fd5b603d6038366004603f565b505050565b005b600080600060608486031215605357600080fd5b50508135936020830135935060409092013591905056fea2646970667358221220fba54ab16c6496385cdd933e87b05b9e545a857b82ffa918f0d0e4a34ae41d7164736f6c63430008100033",
"deployedBytecode": "0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063da6eb36a14602d575b600080fd5b603d6038366004603f565b505050565b005b600080600060608486031215605357600080fd5b50508135936020830135935060409092013591905056fea2646970667358221220fba54ab16c6496385cdd933e87b05b9e545a857b82ffa918f0d0e4a34ae41d7164736f6c63430008100033",
"linkReferences": {},
"deployedLinkReferences": {}
}

View File

@ -70,8 +70,8 @@
"type": "function"
}
],
"bytecode": "0x608060405234801561001057600080fd5b5060f18061001f6000396000f3fe60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ce57385afc7714a4000e530d1e1154d214fc1c0e2392abde201018635be1a2ab64736f6c63430008100033",
"deployedBytecode": "0x60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ce57385afc7714a4000e530d1e1154d214fc1c0e2392abde201018635be1a2ab64736f6c63430008100033",
"bytecode": "0x608060405234801561001057600080fd5b5060f18061001f6000396000f3fe60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ebb4f7274983bea96e7fd68a63e91f4ad67260ff76111312d8c8559b9b5b621064736f6c63430008100033",
"deployedBytecode": "0x60806040526004361060265760003560e01c806359e9670014602b578063b7a3c04c14603c575b600080fd5b603a60363660046058565b5050565b005b348015604757600080fd5b50603a60533660046079565b505050565b60008060408385031215606a57600080fd5b50508035926020909101359150565b600080600060608486031215608d57600080fd5b8335925060208401356001600160a01b038116811460aa57600080fd5b92959294505050604091909101359056fea2646970667358221220ebb4f7274983bea96e7fd68a63e91f4ad67260ff76111312d8c8559b9b5b621064736f6c63430008100033",
"linkReferences": {},
"deployedLinkReferences": {}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,81 +0,0 @@
#!/usr/bin/env python3
import time
import base64
import random
from test_framework.test_framework import TestFramework
from utility.submission import ENTRY_SIZE, submit_data
from utility.submission import create_submission
from utility.utils import (
assert_equal,
wait_until,
)
class ShardSubmitTest(TestFramework):
def setup_params(self):
self.num_blockchain_nodes = 1
self.num_nodes = 4
self.zgs_node_configs[0] = {
"db_max_num_sectors": 2 ** 30,
"shard_position": "0/4"
}
self.zgs_node_configs[1] = {
"db_max_num_sectors": 2 ** 30,
"shard_position": "1/4"
}
self.zgs_node_configs[2] = {
"db_max_num_sectors": 2 ** 30,
"shard_position": "2/4"
}
self.zgs_node_configs[3] = {
"db_max_num_sectors": 2 ** 30,
"shard_position": "3/4"
}
def run_test(self):
data_size = [
256*960,
256*1024,
2,
255,
256*960,
256*120,
256,
257,
1023,
1024,
1025,
256 * 1023,
256 * 1023 + 1,
256 * 1024,
256 * 1024 + 1,
256 * 1025,
256 * 2048 - 1,
256 * 2048,
256 * 16385,
256 * 1024 * 256,
]
for i, v in enumerate(data_size):
self.submission_data(v, i + 1, True)
def submission_data(self, size, submission_index, rand_data=True):
self.log.info("file size: %d", size)
chunk_data = random.randbytes(size) if rand_data else b"\x10" * size
submissions, data_root = create_submission(chunk_data)
self.log.info("data root: %s, submissions: %s", data_root, submissions)
self.contract.submit(submissions)
wait_until(lambda: self.contract.num_submissions() == submission_index)
for i in range(4):
client = self.nodes[i]
wait_until(lambda: client.zgs_get_file_info(data_root) is not None)
submit_data(client, chunk_data)
wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"])
if __name__ == "__main__":
ShardSubmitTest().main()

View File

@ -194,16 +194,13 @@ def generate_merkle_tree_by_batch(data):
def submit_data(client, data):
# NOTE: we assume the data is unique in this function, otherwise zgs_getFileInfo will only get the information of the first data with same root
shard_config = client.rpc.zgs_getShardConfig()
shard_id = int(shard_config["shardId"])
num_shard = int(shard_config["numShard"])
segments = data_to_segments(data)
file_info = client.zgs_get_file_info(segments[0]["root"])
start_seg_index = file_info["tx"]["startEntryIndex"] // 1024
for index, segment in enumerate(segments):
if (start_seg_index + index) % num_shard == shard_id:
if index % num_shard == shard_id:
client.zgs_upload_segment(segment)
return segments