mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-04-04 15:35:18 +00:00
Remove async lock for storage.
This commit is contained in:
parent
38cbb8e182
commit
6aa11b7b70
@ -122,7 +122,7 @@ impl LogEntryFetcher {
|
|||||||
pub fn start_remove_finalized_block_task(
|
pub fn start_remove_finalized_block_task(
|
||||||
&self,
|
&self,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
block_hash_cache: Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
block_hash_cache: Arc<RwLock<BTreeMap<u64, Option<BlockHashAndSubmissionIndex>>>>,
|
||||||
default_finalized_block_count: u64,
|
default_finalized_block_count: u64,
|
||||||
remove_finalized_block_interval_minutes: u64,
|
remove_finalized_block_interval_minutes: u64,
|
||||||
@ -133,7 +133,7 @@ impl LogEntryFetcher {
|
|||||||
loop {
|
loop {
|
||||||
debug!("processing finalized block");
|
debug!("processing finalized block");
|
||||||
|
|
||||||
let processed_block_number = match store.read().await.get_sync_progress() {
|
let processed_block_number = match store.get_sync_progress() {
|
||||||
Ok(Some((processed_block_number, _))) => Some(processed_block_number),
|
Ok(Some((processed_block_number, _))) => Some(processed_block_number),
|
||||||
Ok(None) => None,
|
Ok(None) => None,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -176,9 +176,7 @@ impl LogEntryFetcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for key in pending_keys.into_iter() {
|
for key in pending_keys.into_iter() {
|
||||||
if let Err(e) =
|
if let Err(e) = store.delete_block_hash_by_number(key) {
|
||||||
store.write().await.delete_block_hash_by_number(key)
|
|
||||||
{
|
|
||||||
error!(
|
error!(
|
||||||
"remove block tx for number {} error: e={:?}",
|
"remove block tx for number {} error: e={:?}",
|
||||||
key, e
|
key, e
|
||||||
|
@ -33,7 +33,7 @@ pub enum LogSyncEvent {
|
|||||||
pub struct LogSyncManager {
|
pub struct LogSyncManager {
|
||||||
config: LogSyncConfig,
|
config: LogSyncConfig,
|
||||||
log_fetcher: LogEntryFetcher,
|
log_fetcher: LogEntryFetcher,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
data_cache: DataCache,
|
data_cache: DataCache,
|
||||||
|
|
||||||
next_tx_seq: u64,
|
next_tx_seq: u64,
|
||||||
@ -48,9 +48,9 @@ impl LogSyncManager {
|
|||||||
pub async fn spawn(
|
pub async fn spawn(
|
||||||
config: LogSyncConfig,
|
config: LogSyncConfig,
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
) -> Result<broadcast::Sender<LogSyncEvent>> {
|
) -> Result<broadcast::Sender<LogSyncEvent>> {
|
||||||
let next_tx_seq = store.read().await.next_tx_seq();
|
let next_tx_seq = store.next_tx_seq();
|
||||||
|
|
||||||
let executor_clone = executor.clone();
|
let executor_clone = executor.clone();
|
||||||
let mut shutdown_sender = executor.shutdown_sender();
|
let mut shutdown_sender = executor.shutdown_sender();
|
||||||
@ -81,8 +81,6 @@ impl LogSyncManager {
|
|||||||
|
|
||||||
let block_hash_cache = Arc::new(RwLock::new(
|
let block_hash_cache = Arc::new(RwLock::new(
|
||||||
store
|
store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_block_hashes()?
|
.get_block_hashes()?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(x, y)| (x, Some(y)))
|
.map(|(x, y)| (x, Some(y)))
|
||||||
@ -124,7 +122,7 @@ impl LogSyncManager {
|
|||||||
// TODO(zz): Handle reorg instead of return.
|
// TODO(zz): Handle reorg instead of return.
|
||||||
let mut need_handle_reorg = false;
|
let mut need_handle_reorg = false;
|
||||||
let (mut start_block_number, mut start_block_hash) =
|
let (mut start_block_number, mut start_block_hash) =
|
||||||
match log_sync_manager.store.read().await.get_sync_progress()? {
|
match log_sync_manager.store.get_sync_progress()? {
|
||||||
// No previous progress, so just use config.
|
// No previous progress, so just use config.
|
||||||
None => {
|
None => {
|
||||||
let block_number = log_sync_manager.config.start_block_number;
|
let block_number = log_sync_manager.config.start_block_number;
|
||||||
@ -176,7 +174,7 @@ impl LogSyncManager {
|
|||||||
log_sync_manager.handle_data(reorg_rx).await?;
|
log_sync_manager.handle_data(reorg_rx).await?;
|
||||||
|
|
||||||
if let Some((block_number, block_hash)) =
|
if let Some((block_number, block_hash)) =
|
||||||
log_sync_manager.store.read().await.get_sync_progress()?
|
log_sync_manager.store.get_sync_progress()?
|
||||||
{
|
{
|
||||||
start_block_number = block_number;
|
start_block_number = block_number;
|
||||||
start_block_hash = block_hash;
|
start_block_hash = block_hash;
|
||||||
@ -301,7 +299,7 @@ impl LogSyncManager {
|
|||||||
async fn process_reverted(&mut self, tx_seq: u64) {
|
async fn process_reverted(&mut self, tx_seq: u64) {
|
||||||
warn!("revert for chain reorg: seq={}", tx_seq);
|
warn!("revert for chain reorg: seq={}", tx_seq);
|
||||||
{
|
{
|
||||||
let store = self.store.read().await;
|
let store = self.store.clone();
|
||||||
for seq in tx_seq..self.next_tx_seq {
|
for seq in tx_seq..self.next_tx_seq {
|
||||||
if matches!(store.check_tx_completed(seq), Ok(true)) {
|
if matches!(store.check_tx_completed(seq), Ok(true)) {
|
||||||
if let Ok(Some(tx)) = store.get_tx_by_seq_number(seq) {
|
if let Ok(Some(tx)) = store.get_tx_by_seq_number(seq) {
|
||||||
@ -325,7 +323,7 @@ impl LogSyncManager {
|
|||||||
let _ = self.event_send.send(LogSyncEvent::ReorgDetected { tx_seq });
|
let _ = self.event_send.send(LogSyncEvent::ReorgDetected { tx_seq });
|
||||||
|
|
||||||
// TODO(zz): `wrapping_sub` here is a hack to handle the case of tx_seq=0.
|
// TODO(zz): `wrapping_sub` here is a hack to handle the case of tx_seq=0.
|
||||||
if let Err(e) = self.store.write().await.revert_to(tx_seq.wrapping_sub(1)) {
|
if let Err(e) = self.store.revert_to(tx_seq.wrapping_sub(1)) {
|
||||||
error!("revert_to fails: e={:?}", e);
|
error!("revert_to fails: e={:?}", e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -353,7 +351,7 @@ impl LogSyncManager {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.store.write().await.put_sync_progress((
|
self.store.put_sync_progress((
|
||||||
block_number,
|
block_number,
|
||||||
block_hash,
|
block_hash,
|
||||||
first_submission_index,
|
first_submission_index,
|
||||||
@ -396,12 +394,12 @@ impl LogSyncManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn put_tx_inner(&mut self, tx: Transaction) -> bool {
|
async fn put_tx_inner(&mut self, tx: Transaction) -> bool {
|
||||||
if let Err(e) = self.store.write().await.put_tx(tx.clone()) {
|
if let Err(e) = self.store.put_tx(tx.clone()) {
|
||||||
error!("put_tx error: e={:?}", e);
|
error!("put_tx error: e={:?}", e);
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
if let Some(data) = self.data_cache.pop_data(&tx.data_merkle_root) {
|
if let Some(data) = self.data_cache.pop_data(&tx.data_merkle_root) {
|
||||||
let store = self.store.write().await;
|
let store = self.store.clone();
|
||||||
// We are holding a mutable reference of LogSyncManager, so no chain reorg is
|
// We are holding a mutable reference of LogSyncManager, so no chain reorg is
|
||||||
// possible after put_tx.
|
// possible after put_tx.
|
||||||
if let Err(e) = store
|
if let Err(e) = store
|
||||||
|
@ -9,10 +9,9 @@ pub trait PoraLoader: Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl PoraLoader for Arc<RwLock<dyn Store>> {
|
impl PoraLoader for Arc<dyn Store> {
|
||||||
async fn load_sealed_data(&self, chunk_index: u64) -> Option<MineLoadChunk> {
|
async fn load_sealed_data(&self, chunk_index: u64) -> Option<MineLoadChunk> {
|
||||||
let store = &*self.read().await;
|
match self.flow().load_sealed_data(chunk_index) {
|
||||||
match store.flow().load_sealed_data(chunk_index) {
|
|
||||||
Ok(Some(chunk)) => Some(chunk),
|
Ok(Some(chunk)) => Some(chunk),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
|
@ -21,11 +21,11 @@ fn set_miner_id(store: &dyn Store, miner_id: &H256) -> storage::error::Result<()
|
|||||||
|
|
||||||
pub(crate) async fn check_and_request_miner_id(
|
pub(crate) async fn check_and_request_miner_id(
|
||||||
config: &MinerConfig,
|
config: &MinerConfig,
|
||||||
store: &RwLock<dyn Store>,
|
store: &dyn Store,
|
||||||
provider: &Arc<MineServiceMiddleware>,
|
provider: &Arc<MineServiceMiddleware>,
|
||||||
) -> Result<H256, String> {
|
) -> Result<H256, String> {
|
||||||
let db_miner_id = load_miner_id(&*store.read().await)
|
let db_miner_id =
|
||||||
.map_err(|e| format!("miner_id on db corrupt: {:?}", e))?;
|
load_miner_id(store).map_err(|e| format!("miner_id on db corrupt: {:?}", e))?;
|
||||||
|
|
||||||
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ pub(crate) async fn check_and_request_miner_id(
|
|||||||
}
|
}
|
||||||
(None, Some(c_id)) => {
|
(None, Some(c_id)) => {
|
||||||
check_miner_id(&mine_contract, c_id).await?;
|
check_miner_id(&mine_contract, c_id).await?;
|
||||||
set_miner_id(&*store.write().await, &c_id)
|
set_miner_id(store, &c_id)
|
||||||
.map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
|
.map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
|
||||||
Ok(c_id)
|
Ok(c_id)
|
||||||
}
|
}
|
||||||
@ -53,8 +53,7 @@ pub(crate) async fn check_and_request_miner_id(
|
|||||||
(None, None) => {
|
(None, None) => {
|
||||||
let beneficiary = provider.address();
|
let beneficiary = provider.address();
|
||||||
let id = request_miner_id(&mine_contract, beneficiary).await?;
|
let id = request_miner_id(&mine_contract, beneficiary).await?;
|
||||||
set_miner_id(&*store.write().await, &id)
|
set_miner_id(store, &id).map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
|
||||||
.map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
|
|
||||||
Ok(id)
|
Ok(id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ const CHAIN_STATUS_QUERY_PERIOD: u64 = 5;
|
|||||||
|
|
||||||
pub struct Sealer {
|
pub struct Sealer {
|
||||||
flow_contract: ZgsFlow<MineServiceMiddleware>,
|
flow_contract: ZgsFlow<MineServiceMiddleware>,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
context_cache: BTreeMap<u128, EpochRangeWithContextDigest>,
|
context_cache: BTreeMap<u128, EpochRangeWithContextDigest>,
|
||||||
last_context_flow_length: u64,
|
last_context_flow_length: u64,
|
||||||
miner_id: H256,
|
miner_id: H256,
|
||||||
@ -32,7 +32,7 @@ impl Sealer {
|
|||||||
pub fn spawn(
|
pub fn spawn(
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
provider: Arc<MineServiceMiddleware>,
|
provider: Arc<MineServiceMiddleware>,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
config: &MinerConfig,
|
config: &MinerConfig,
|
||||||
miner_id: H256,
|
miner_id: H256,
|
||||||
) {
|
) {
|
||||||
@ -152,19 +152,11 @@ impl Sealer {
|
|||||||
|
|
||||||
async fn fetch_task(&self) -> Result<Option<Vec<SealTask>>> {
|
async fn fetch_task(&self) -> Result<Option<Vec<SealTask>>> {
|
||||||
let seal_index_max = self.last_context_flow_length as usize / SECTORS_PER_SEAL;
|
let seal_index_max = self.last_context_flow_length as usize / SECTORS_PER_SEAL;
|
||||||
self.store
|
self.store.flow().pull_seal_chunk(seal_index_max)
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.flow()
|
|
||||||
.pull_seal_chunk(seal_index_max)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn submit_answer(&self, answers: Vec<SealAnswer>) -> Result<()> {
|
async fn submit_answer(&self, answers: Vec<SealAnswer>) -> Result<()> {
|
||||||
self.store
|
self.store.flow().submit_seal_result(answers)
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.flow_mut()
|
|
||||||
.submit_seal_result(answers)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn seal_iteration(&mut self) -> Result<bool> {
|
async fn seal_iteration(&mut self) -> Result<bool> {
|
||||||
|
@ -29,13 +29,13 @@ impl MineService {
|
|||||||
executor: task_executor::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
_network_send: mpsc::UnboundedSender<NetworkMessage>,
|
_network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
config: MinerConfig,
|
config: MinerConfig,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
) -> Result<broadcast::Sender<MinerMessage>, String> {
|
) -> Result<broadcast::Sender<MinerMessage>, String> {
|
||||||
let provider = Arc::new(config.make_provider().await?);
|
let provider = Arc::new(config.make_provider().await?);
|
||||||
|
|
||||||
let (msg_send, msg_recv) = broadcast::channel(1024);
|
let (msg_send, msg_recv) = broadcast::channel(1024);
|
||||||
|
|
||||||
let miner_id = check_and_request_miner_id(&config, &store, &provider).await?;
|
let miner_id = check_and_request_miner_id(&config, store.as_ref(), &provider).await?;
|
||||||
debug!("miner id setting complete.");
|
debug!("miner id setting complete.");
|
||||||
|
|
||||||
let mine_context_receiver = MineContextWatcher::spawn(
|
let mine_context_receiver = MineContextWatcher::spawn(
|
||||||
|
@ -22,7 +22,7 @@ pub struct Submitter {
|
|||||||
mine_contract: PoraMine<MineServiceMiddleware>,
|
mine_contract: PoraMine<MineServiceMiddleware>,
|
||||||
flow_contract: ZgsFlow<MineServiceMiddleware>,
|
flow_contract: ZgsFlow<MineServiceMiddleware>,
|
||||||
default_gas_limit: Option<U256>,
|
default_gas_limit: Option<U256>,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Submitter {
|
impl Submitter {
|
||||||
@ -30,7 +30,7 @@ impl Submitter {
|
|||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
|
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
|
||||||
provider: Arc<MineServiceMiddleware>,
|
provider: Arc<MineServiceMiddleware>,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
config: &MinerConfig,
|
config: &MinerConfig,
|
||||||
) {
|
) {
|
||||||
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
let mine_contract = PoraMine::new(config.mine_address, provider.clone());
|
||||||
@ -80,8 +80,6 @@ impl Submitter {
|
|||||||
|
|
||||||
let flow_proof = self
|
let flow_proof = self
|
||||||
.store
|
.store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_proof_at_root(
|
.get_proof_at_root(
|
||||||
&mine_answer.context_flow_root,
|
&mine_answer.context_flow_root,
|
||||||
mine_answer.recall_position,
|
mine_answer.recall_position,
|
||||||
|
@ -32,7 +32,7 @@ impl PrunerConfig {
|
|||||||
|
|
||||||
pub struct Pruner {
|
pub struct Pruner {
|
||||||
config: PrunerConfig,
|
config: PrunerConfig,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
|
|
||||||
sender: mpsc::UnboundedSender<PrunerMessage>,
|
sender: mpsc::UnboundedSender<PrunerMessage>,
|
||||||
miner_sender: Option<broadcast::Sender<MinerMessage>>,
|
miner_sender: Option<broadcast::Sender<MinerMessage>>,
|
||||||
@ -42,10 +42,10 @@ impl Pruner {
|
|||||||
pub async fn spawn(
|
pub async fn spawn(
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
mut config: PrunerConfig,
|
mut config: PrunerConfig,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
miner_sender: Option<broadcast::Sender<MinerMessage>>,
|
miner_sender: Option<broadcast::Sender<MinerMessage>>,
|
||||||
) -> Result<mpsc::UnboundedReceiver<PrunerMessage>> {
|
) -> Result<mpsc::UnboundedReceiver<PrunerMessage>> {
|
||||||
if let Some(shard_config) = get_shard_config(&store).await? {
|
if let Some(shard_config) = get_shard_config(store.as_ref()).await? {
|
||||||
config.shard_config = shard_config;
|
config.shard_config = shard_config;
|
||||||
}
|
}
|
||||||
let (tx, rx) = mpsc::unbounded_channel();
|
let (tx, rx) = mpsc::unbounded_channel();
|
||||||
@ -76,7 +76,7 @@ impl Pruner {
|
|||||||
batch.push(index);
|
batch.push(index);
|
||||||
if batch.len() == self.config.batch_size || iter.peek().is_none() {
|
if batch.len() == self.config.batch_size || iter.peek().is_none() {
|
||||||
debug!(start = batch.first(), end = batch.last(), "prune batch");
|
debug!(start = batch.first(), end = batch.last(), "prune batch");
|
||||||
self.store.write().await.remove_chunks_batch(&batch)?;
|
self.store.remove_chunks_batch(&batch)?;
|
||||||
batch = Vec::with_capacity(self.config.batch_size);
|
batch = Vec::with_capacity(self.config.batch_size);
|
||||||
tokio::time::sleep(self.config.batch_wait_time).await;
|
tokio::time::sleep(self.config.batch_wait_time).await;
|
||||||
}
|
}
|
||||||
@ -87,7 +87,7 @@ impl Pruner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn maybe_update(&mut self) -> Result<Option<Box<dyn Send + Iterator<Item = u64>>>> {
|
async fn maybe_update(&mut self) -> Result<Option<Box<dyn Send + Iterator<Item = u64>>>> {
|
||||||
let current_size = self.store.read().await.flow().get_num_entries()?;
|
let current_size = self.store.flow().get_num_entries()?;
|
||||||
debug!(
|
debug!(
|
||||||
current_size = current_size,
|
current_size = current_size,
|
||||||
config = ?self.config.shard_config,
|
config = ?self.config.shard_config,
|
||||||
@ -109,7 +109,7 @@ impl Pruner {
|
|||||||
config.num_shard *= 2;
|
config.num_shard *= 2;
|
||||||
|
|
||||||
// Generate delete list
|
// Generate delete list
|
||||||
let flow_len = self.store.read().await.get_context()?.1;
|
let flow_len = self.store.get_context()?.1;
|
||||||
let start_index = old_shard_id + (!rand_bit) as usize * old_num_shard;
|
let start_index = old_shard_id + (!rand_bit) as usize * old_num_shard;
|
||||||
return Ok(Some(Box::new(
|
return Ok(Some(Box::new(
|
||||||
(start_index as u64..flow_len).step_by(config.num_shard),
|
(start_index as u64..flow_len).step_by(config.num_shard),
|
||||||
@ -124,16 +124,16 @@ impl Pruner {
|
|||||||
}
|
}
|
||||||
self.sender
|
self.sender
|
||||||
.send(PrunerMessage::ChangeShardConfig(self.config.shard_config))?;
|
.send(PrunerMessage::ChangeShardConfig(self.config.shard_config))?;
|
||||||
let mut store = self.store.write().await;
|
self.store
|
||||||
store
|
.flow()
|
||||||
.flow_mut()
|
|
||||||
.update_shard_config(self.config.shard_config);
|
.update_shard_config(self.config.shard_config);
|
||||||
store.set_config_encoded(&SHARD_CONFIG_KEY, &self.config.shard_config)
|
self.store
|
||||||
|
.set_config_encoded(&SHARD_CONFIG_KEY, &self.config.shard_config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_shard_config(store: &RwLock<dyn Store>) -> Result<Option<ShardConfig>> {
|
async fn get_shard_config(store: &dyn Store) -> Result<Option<ShardConfig>> {
|
||||||
store.read().await.get_config_decoded(&SHARD_CONFIG_KEY)
|
store.get_config_decoded(&SHARD_CONFIG_KEY)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -269,13 +269,7 @@ impl Libp2pEventHandler {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let timestamp = timestamp_now();
|
let timestamp = timestamp_now();
|
||||||
let shard_config = self
|
let shard_config = self.store.get_store().flow().get_shard_config();
|
||||||
.store
|
|
||||||
.get_store()
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.flow()
|
|
||||||
.get_shard_config();
|
|
||||||
|
|
||||||
let msg = AnnounceFile {
|
let msg = AnnounceFile {
|
||||||
tx_id,
|
tx_id,
|
||||||
@ -598,7 +592,7 @@ mod tests {
|
|||||||
sync_recv: SyncReceiver,
|
sync_recv: SyncReceiver,
|
||||||
chunk_pool_send: mpsc::UnboundedSender<ChunkPoolMessage>,
|
chunk_pool_send: mpsc::UnboundedSender<ChunkPoolMessage>,
|
||||||
chunk_pool_recv: mpsc::UnboundedReceiver<ChunkPoolMessage>,
|
chunk_pool_recv: mpsc::UnboundedReceiver<ChunkPoolMessage>,
|
||||||
store: Arc<RwLock<dyn Store>>,
|
store: Arc<dyn Store>,
|
||||||
file_location_cache: Arc<FileLocationCache>,
|
file_location_cache: Arc<FileLocationCache>,
|
||||||
peers: Arc<RwLock<PeerManager>>,
|
peers: Arc<RwLock<PeerManager>>,
|
||||||
}
|
}
|
||||||
@ -621,7 +615,7 @@ mod tests {
|
|||||||
sync_recv,
|
sync_recv,
|
||||||
chunk_pool_send,
|
chunk_pool_send,
|
||||||
chunk_pool_recv,
|
chunk_pool_recv,
|
||||||
store: Arc::new(RwLock::new(store)),
|
store: Arc::new(store),
|
||||||
file_location_cache: Arc::new(FileLocationCache::default()),
|
file_location_cache: Arc::new(FileLocationCache::default()),
|
||||||
peers: Arc::new(RwLock::new(PeerManager::new(Config::default()))),
|
peers: Arc::new(RwLock::new(PeerManager::new(Config::default()))),
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ impl RouterService {
|
|||||||
_miner_send: Option<broadcast::Sender<MinerMessage>>,
|
_miner_send: Option<broadcast::Sender<MinerMessage>>,
|
||||||
chunk_pool_send: UnboundedSender<ChunkPoolMessage>,
|
chunk_pool_send: UnboundedSender<ChunkPoolMessage>,
|
||||||
pruner_recv: Option<mpsc::UnboundedReceiver<PrunerMessage>>,
|
pruner_recv: Option<mpsc::UnboundedReceiver<PrunerMessage>>,
|
||||||
store: Arc<RwLock<dyn LogStore>>,
|
store: Arc<dyn LogStore>,
|
||||||
file_location_cache: Arc<FileLocationCache>,
|
file_location_cache: Arc<FileLocationCache>,
|
||||||
local_keypair: Keypair,
|
local_keypair: Keypair,
|
||||||
config: Config,
|
config: Config,
|
||||||
|
@ -23,8 +23,6 @@ impl RpcServer for RpcServerImpl {
|
|||||||
.ctx
|
.ctx
|
||||||
.log_store
|
.log_store
|
||||||
.get_store()
|
.get_store()
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_sync_progress()?
|
.get_sync_progress()?
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
@ -156,14 +154,7 @@ impl RpcServer for RpcServerImpl {
|
|||||||
|
|
||||||
async fn get_shard_config(&self) -> RpcResult<ShardConfig> {
|
async fn get_shard_config(&self) -> RpcResult<ShardConfig> {
|
||||||
debug!("zgs_getShardConfig");
|
debug!("zgs_getShardConfig");
|
||||||
let shard_config = self
|
let shard_config = self.ctx.log_store.get_store().flow().get_shard_config();
|
||||||
.ctx
|
|
||||||
.log_store
|
|
||||||
.get_store()
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.flow()
|
|
||||||
.get_shard_config();
|
|
||||||
Ok(shard_config)
|
Ok(shard_config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ struct ChunkPoolComponents {
|
|||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct ClientBuilder {
|
pub struct ClientBuilder {
|
||||||
runtime_context: Option<RuntimeContext>,
|
runtime_context: Option<RuntimeContext>,
|
||||||
store: Option<Arc<RwLock<dyn Store>>>,
|
store: Option<Arc<dyn Store>>,
|
||||||
async_store: Option<storage_async::Store>,
|
async_store: Option<storage_async::Store>,
|
||||||
file_location_cache: Option<Arc<FileLocationCache>>,
|
file_location_cache: Option<Arc<FileLocationCache>>,
|
||||||
network: Option<NetworkComponents>,
|
network: Option<NetworkComponents>,
|
||||||
@ -89,10 +89,10 @@ impl ClientBuilder {
|
|||||||
/// Initializes in-memory storage.
|
/// Initializes in-memory storage.
|
||||||
pub fn with_memory_store(mut self) -> Result<Self, String> {
|
pub fn with_memory_store(mut self) -> Result<Self, String> {
|
||||||
// TODO(zz): Set config.
|
// TODO(zz): Set config.
|
||||||
let store = Arc::new(RwLock::new(
|
let store = Arc::new(
|
||||||
LogManager::memorydb(LogConfig::default())
|
LogManager::memorydb(LogConfig::default())
|
||||||
.map_err(|e| format!("Unable to start in-memory store: {:?}", e))?,
|
.map_err(|e| format!("Unable to start in-memory store: {:?}", e))?,
|
||||||
));
|
);
|
||||||
|
|
||||||
self.store = Some(store.clone());
|
self.store = Some(store.clone());
|
||||||
|
|
||||||
@ -105,10 +105,10 @@ impl ClientBuilder {
|
|||||||
|
|
||||||
/// Initializes RocksDB storage.
|
/// Initializes RocksDB storage.
|
||||||
pub fn with_rocksdb_store(mut self, config: &StorageConfig) -> Result<Self, String> {
|
pub fn with_rocksdb_store(mut self, config: &StorageConfig) -> Result<Self, String> {
|
||||||
let store = Arc::new(RwLock::new(
|
let store = Arc::new(
|
||||||
LogManager::rocksdb(LogConfig::default(), &config.db_dir)
|
LogManager::rocksdb(LogConfig::default(), &config.db_dir)
|
||||||
.map_err(|e| format!("Unable to start RocksDB store: {:?}", e))?,
|
.map_err(|e| format!("Unable to start RocksDB store: {:?}", e))?,
|
||||||
));
|
);
|
||||||
|
|
||||||
self.store = Some(store.clone());
|
self.store = Some(store.clone());
|
||||||
|
|
||||||
|
@ -28,14 +28,14 @@ macro_rules! delegate {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Store {
|
pub struct Store {
|
||||||
/// Log and transaction storage.
|
/// Log and transaction storage.
|
||||||
store: Arc<RwLock<dyn LogStore>>,
|
store: Arc<dyn LogStore>,
|
||||||
|
|
||||||
/// Tokio executor for spawning worker tasks.
|
/// Tokio executor for spawning worker tasks.
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Store {
|
impl Store {
|
||||||
pub fn new(store: Arc<RwLock<dyn LogStore>>, executor: TaskExecutor) -> Self {
|
pub fn new(store: Arc<dyn LogStore>, executor: TaskExecutor) -> Self {
|
||||||
Store { store, executor }
|
Store { store, executor }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ impl Store {
|
|||||||
|
|
||||||
async fn spawn<T, F>(&self, f: F) -> Result<T>
|
async fn spawn<T, F>(&self, f: F) -> Result<T>
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut dyn LogStore) -> Result<T> + Send + 'static,
|
F: FnOnce(&dyn LogStore) -> Result<T> + Send + 'static,
|
||||||
T: Send + 'static,
|
T: Send + 'static,
|
||||||
{
|
{
|
||||||
let store = self.store.clone();
|
let store = self.store.clone();
|
||||||
@ -73,7 +73,7 @@ impl Store {
|
|||||||
self.executor.spawn(
|
self.executor.spawn(
|
||||||
async move {
|
async move {
|
||||||
// FIXME(zz): Not all functions need `write`. Refactor store usage.
|
// FIXME(zz): Not all functions need `write`. Refactor store usage.
|
||||||
let res = f(&mut *store.write().await);
|
let res = f(&*store);
|
||||||
|
|
||||||
if tx.send(res).is_err() {
|
if tx.send(res).is_err() {
|
||||||
error!("Unable to complete async storage operation: the receiver dropped");
|
error!("Unable to complete async storage operation: the receiver dropped");
|
||||||
@ -87,7 +87,7 @@ impl Store {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FIXME(zz): Refactor the lock and async call here.
|
// FIXME(zz): Refactor the lock and async call here.
|
||||||
pub fn get_store(&self) -> &RwLock<dyn LogStore> {
|
pub fn get_store(&self) -> &dyn LogStore {
|
||||||
self.store.as_ref()
|
self.store.as_ref()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ impl FlowStore {
|
|||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct FlowConfig {
|
pub struct FlowConfig {
|
||||||
pub batch_size: usize,
|
pub batch_size: usize,
|
||||||
pub shard_config: ShardConfig,
|
pub shard_config: Arc<RwLock<ShardConfig>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for FlowConfig {
|
impl Default for FlowConfig {
|
||||||
@ -208,7 +208,7 @@ impl FlowRead for FlowStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_shard_config(&self) -> ShardConfig {
|
fn get_shard_config(&self) -> ShardConfig {
|
||||||
self.config.shard_config
|
self.config.shard_config.read().clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,7 +233,7 @@ impl FlowWrite for FlowStore {
|
|||||||
.expect("in range");
|
.expect("in range");
|
||||||
|
|
||||||
let chunk_index = chunk.start_index / self.config.batch_size as u64;
|
let chunk_index = chunk.start_index / self.config.batch_size as u64;
|
||||||
if !self.config.shard_config.in_range(chunk_index) {
|
if !self.config.shard_config.read().in_range(chunk_index) {
|
||||||
// The data are in a shard range that we are not storing.
|
// The data are in a shard range that we are not storing.
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -273,8 +273,8 @@ impl FlowWrite for FlowStore {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_shard_config(&mut self, shard_config: ShardConfig) {
|
fn update_shard_config(&self, shard_config: ShardConfig) {
|
||||||
self.config.shard_config = shard_config;
|
*self.config.shard_config.write() = shard_config;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -313,7 +313,7 @@ impl FlowSeal for FlowStore {
|
|||||||
Ok(Some(tasks))
|
Ok(Some(tasks))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn submit_seal_result(&mut self, answers: Vec<SealAnswer>) -> Result<()> {
|
fn submit_seal_result(&self, answers: Vec<SealAnswer>) -> Result<()> {
|
||||||
let mut to_seal_set = self.to_seal_set.write();
|
let mut to_seal_set = self.to_seal_set.write();
|
||||||
let is_consistent = |answer: &SealAnswer| {
|
let is_consistent = |answer: &SealAnswer| {
|
||||||
to_seal_set
|
to_seal_set
|
||||||
|
@ -213,7 +213,7 @@ pub trait FlowWrite {
|
|||||||
fn truncate(&self, start_index: u64) -> Result<()>;
|
fn truncate(&self, start_index: u64) -> Result<()>;
|
||||||
|
|
||||||
/// Update the shard config.
|
/// Update the shard config.
|
||||||
fn update_shard_config(&mut self, shard_config: ShardConfig);
|
fn update_shard_config(&self, shard_config: ShardConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct SealTask {
|
pub struct SealTask {
|
||||||
@ -247,7 +247,7 @@ pub trait FlowSeal {
|
|||||||
|
|
||||||
/// Submit sealing result
|
/// Submit sealing result
|
||||||
|
|
||||||
fn submit_seal_result(&mut self, answers: Vec<SealAnswer>) -> Result<()>;
|
fn submit_seal_result(&self, answers: Vec<SealAnswer>) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Flow: FlowRead + FlowWrite + FlowSeal {}
|
pub trait Flow: FlowRead + FlowWrite + FlowSeal {}
|
||||||
|
@ -29,7 +29,7 @@ impl SyncStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_tx_seq_range(&self) -> Result<(Option<u64>, Option<u64>)> {
|
pub async fn get_tx_seq_range(&self) -> Result<(Option<u64>, Option<u64>)> {
|
||||||
let store = self.store.get_store().read().await;
|
let store = self.store.get_store();
|
||||||
|
|
||||||
// load next_tx_seq
|
// load next_tx_seq
|
||||||
let next_tx_seq = store.get_config_decoded(&KEY_NEXT_TX_SEQ)?;
|
let next_tx_seq = store.get_config_decoded(&KEY_NEXT_TX_SEQ)?;
|
||||||
@ -43,8 +43,6 @@ impl SyncStore {
|
|||||||
pub async fn set_next_tx_seq(&self, tx_seq: u64) -> Result<()> {
|
pub async fn set_next_tx_seq(&self, tx_seq: u64) -> Result<()> {
|
||||||
self.store
|
self.store
|
||||||
.get_store()
|
.get_store()
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.set_config_encoded(&KEY_NEXT_TX_SEQ, &tx_seq)
|
.set_config_encoded(&KEY_NEXT_TX_SEQ, &tx_seq)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,13 +50,11 @@ impl SyncStore {
|
|||||||
debug!(%tx_seq, "set_max_tx_seq");
|
debug!(%tx_seq, "set_max_tx_seq");
|
||||||
self.store
|
self.store
|
||||||
.get_store()
|
.get_store()
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.set_config_encoded(&KEY_MAX_TX_SEQ, &tx_seq)
|
.set_config_encoded(&KEY_MAX_TX_SEQ, &tx_seq)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn add_pending_tx(&self, tx_seq: u64) -> Result<bool> {
|
pub async fn add_pending_tx(&self, tx_seq: u64) -> Result<bool> {
|
||||||
let store = self.store.get_store().write().await;
|
let store = self.store.get_store();
|
||||||
|
|
||||||
// already in ready queue
|
// already in ready queue
|
||||||
if self.ready_txs.has(store.deref(), tx_seq)? {
|
if self.ready_txs.has(store.deref(), tx_seq)? {
|
||||||
@ -70,7 +66,7 @@ impl SyncStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upgrade_tx_to_ready(&self, tx_seq: u64) -> Result<bool> {
|
pub async fn upgrade_tx_to_ready(&self, tx_seq: u64) -> Result<bool> {
|
||||||
let store = self.store.get_store().write().await;
|
let store = self.store.get_store();
|
||||||
|
|
||||||
let mut tx = ConfigTx::default();
|
let mut tx = ConfigTx::default();
|
||||||
|
|
||||||
@ -91,7 +87,7 @@ impl SyncStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn downgrade_tx_to_pending(&self, tx_seq: u64) -> Result<bool> {
|
pub async fn downgrade_tx_to_pending(&self, tx_seq: u64) -> Result<bool> {
|
||||||
let store = self.store.get_store().write().await;
|
let store = self.store.get_store();
|
||||||
|
|
||||||
let mut tx = ConfigTx::default();
|
let mut tx = ConfigTx::default();
|
||||||
|
|
||||||
@ -112,7 +108,7 @@ impl SyncStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn random_tx(&self) -> Result<Option<u64>> {
|
pub async fn random_tx(&self) -> Result<Option<u64>> {
|
||||||
let store = self.store.get_store().read().await;
|
let store = self.store.get_store();
|
||||||
|
|
||||||
// try to find a tx in ready queue with high priority
|
// try to find a tx in ready queue with high priority
|
||||||
if let Some(val) = self.ready_txs.random(store.deref())? {
|
if let Some(val) = self.ready_txs.random(store.deref())? {
|
||||||
@ -124,7 +120,7 @@ impl SyncStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn remove_tx(&self, tx_seq: u64) -> Result<bool> {
|
pub async fn remove_tx(&self, tx_seq: u64) -> Result<bool> {
|
||||||
let store = self.store.get_store().write().await;
|
let store = self.store.get_store();
|
||||||
|
|
||||||
// removed in ready queue
|
// removed in ready queue
|
||||||
if self.ready_txs.remove(store.deref(), None, tx_seq)? {
|
if self.ready_txs.remove(store.deref(), None, tx_seq)? {
|
||||||
|
@ -424,8 +424,6 @@ impl SerialSyncController {
|
|||||||
let validation_result = self
|
let validation_result = self
|
||||||
.store
|
.store
|
||||||
.get_store()
|
.get_store()
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.validate_and_insert_range_proof(self.tx_seq, &response);
|
.validate_and_insert_range_proof(self.tx_seq, &response);
|
||||||
|
|
||||||
match validation_result {
|
match validation_result {
|
||||||
@ -447,13 +445,7 @@ impl SerialSyncController {
|
|||||||
|
|
||||||
self.failures = 0;
|
self.failures = 0;
|
||||||
|
|
||||||
let shard_config = self
|
let shard_config = self.store.get_store().flow().get_shard_config();
|
||||||
.store
|
|
||||||
.get_store()
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.flow()
|
|
||||||
.get_shard_config();
|
|
||||||
let next_chunk = shard_config.next_segment_index(
|
let next_chunk = shard_config.next_segment_index(
|
||||||
(from_chunk / PORA_CHUNK_SIZE as u64) as usize,
|
(from_chunk / PORA_CHUNK_SIZE as u64) as usize,
|
||||||
(self.tx_start_chunk_in_flow / PORA_CHUNK_SIZE as u64) as usize,
|
(self.tx_start_chunk_in_flow / PORA_CHUNK_SIZE as u64) as usize,
|
||||||
@ -1107,8 +1099,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let chunks = peer_store
|
let chunks = peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1141,8 +1131,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let mut chunks = peer_store
|
let mut chunks = peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1210,8 +1198,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let chunks = peer_store
|
let chunks = peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1276,8 +1262,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let chunks = peer_store
|
let chunks = peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1350,8 +1334,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let chunks = peer_store
|
let chunks = peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, chunk_count)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1395,8 +1377,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let chunks = peer_store
|
let chunks = peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, 1024)
|
.get_chunks_with_proof_by_tx_and_index_range(tx_seq, 0, 1024)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1575,7 +1555,7 @@ mod tests {
|
|||||||
fn create_controller(
|
fn create_controller(
|
||||||
task_executor: TaskExecutor,
|
task_executor: TaskExecutor,
|
||||||
peer_id: Option<PeerId>,
|
peer_id: Option<PeerId>,
|
||||||
store: Arc<RwLock<LogManager>>,
|
store: Arc<LogManager>,
|
||||||
tx_id: TxID,
|
tx_id: TxID,
|
||||||
num_chunks: usize,
|
num_chunks: usize,
|
||||||
) -> (SerialSyncController, UnboundedReceiver<NetworkMessage>) {
|
) -> (SerialSyncController, UnboundedReceiver<NetworkMessage>) {
|
||||||
|
@ -129,7 +129,7 @@ impl SyncService {
|
|||||||
pub async fn spawn(
|
pub async fn spawn(
|
||||||
executor: task_executor::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
store: Arc<RwLock<dyn LogStore>>,
|
store: Arc<dyn LogStore>,
|
||||||
file_location_cache: Arc<FileLocationCache>,
|
file_location_cache: Arc<FileLocationCache>,
|
||||||
event_recv: broadcast::Receiver<LogSyncEvent>,
|
event_recv: broadcast::Receiver<LogSyncEvent>,
|
||||||
) -> Result<SyncSender> {
|
) -> Result<SyncSender> {
|
||||||
@ -148,7 +148,7 @@ impl SyncService {
|
|||||||
config: Config,
|
config: Config,
|
||||||
executor: task_executor::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
store: Arc<RwLock<dyn LogStore>>,
|
store: Arc<dyn LogStore>,
|
||||||
file_location_cache: Arc<FileLocationCache>,
|
file_location_cache: Arc<FileLocationCache>,
|
||||||
event_recv: broadcast::Receiver<LogSyncEvent>,
|
event_recv: broadcast::Receiver<LogSyncEvent>,
|
||||||
) -> Result<SyncSender> {
|
) -> Result<SyncSender> {
|
||||||
@ -723,8 +723,8 @@ mod tests {
|
|||||||
runtime: TestRuntime,
|
runtime: TestRuntime,
|
||||||
|
|
||||||
chunk_count: usize,
|
chunk_count: usize,
|
||||||
store: Arc<RwLock<LogManager>>,
|
store: Arc<LogManager>,
|
||||||
peer_store: Arc<RwLock<LogManager>>,
|
peer_store: Arc<LogManager>,
|
||||||
txs: Vec<Transaction>,
|
txs: Vec<Transaction>,
|
||||||
init_data: Vec<u8>,
|
init_data: Vec<u8>,
|
||||||
|
|
||||||
@ -913,8 +913,6 @@ mod tests {
|
|||||||
|
|
||||||
runtime
|
runtime
|
||||||
.peer_store
|
.peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.validate_range_proof(0, &response)
|
.validate_range_proof(0, &response)
|
||||||
.expect("validate proof");
|
.expect("validate proof");
|
||||||
}
|
}
|
||||||
@ -1186,10 +1184,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
thread::sleep(Duration::from_millis(1000));
|
thread::sleep(Duration::from_millis(1000));
|
||||||
assert_eq!(
|
assert_eq!(store.get_tx_by_seq_number(tx_seq).unwrap(), None);
|
||||||
store.read().await.get_tx_by_seq_number(tx_seq).unwrap(),
|
|
||||||
None
|
|
||||||
);
|
|
||||||
assert!(network_recv.try_recv().is_err());
|
assert!(network_recv.try_recv().is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1205,18 +1200,13 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
thread::sleep(Duration::from_millis(1000));
|
thread::sleep(Duration::from_millis(1000));
|
||||||
assert!(runtime
|
assert!(runtime.peer_store.check_tx_completed(tx_seq).unwrap());
|
||||||
.peer_store
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.check_tx_completed(tx_seq)
|
|
||||||
.unwrap());
|
|
||||||
assert!(runtime.network_recv.try_recv().is_err());
|
assert!(runtime.network_recv.try_recv().is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_tx_finalized(store: Arc<RwLock<LogManager>>, tx_seq: u64) {
|
async fn wait_for_tx_finalized(store: Arc<LogManager>, tx_seq: u64) {
|
||||||
let deadline = Instant::now() + Duration::from_millis(5000);
|
let deadline = Instant::now() + Duration::from_millis(5000);
|
||||||
while !store.read().await.check_tx_completed(tx_seq).unwrap() {
|
while !store.check_tx_completed(tx_seq).unwrap() {
|
||||||
if Instant::now() >= deadline {
|
if Instant::now() >= deadline {
|
||||||
panic!("Failed to wait tx completed");
|
panic!("Failed to wait tx completed");
|
||||||
}
|
}
|
||||||
@ -1238,12 +1228,7 @@ mod tests {
|
|||||||
|
|
||||||
receive_dial(&mut runtime, &sync_send).await;
|
receive_dial(&mut runtime, &sync_send).await;
|
||||||
|
|
||||||
assert!(!runtime
|
assert!(!runtime.store.check_tx_completed(tx_seq).unwrap());
|
||||||
.store
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.check_tx_completed(tx_seq)
|
|
||||||
.unwrap());
|
|
||||||
|
|
||||||
assert!(!matches!(
|
assert!(!matches!(
|
||||||
sync_send
|
sync_send
|
||||||
@ -1313,12 +1298,7 @@ mod tests {
|
|||||||
|
|
||||||
receive_dial(&mut runtime, &sync_send).await;
|
receive_dial(&mut runtime, &sync_send).await;
|
||||||
|
|
||||||
assert!(!runtime
|
assert!(!runtime.store.check_tx_completed(tx_seq).unwrap());
|
||||||
.store
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.check_tx_completed(tx_seq)
|
|
||||||
.unwrap());
|
|
||||||
|
|
||||||
receive_chunk_request(
|
receive_chunk_request(
|
||||||
&mut runtime.network_recv,
|
&mut runtime.network_recv,
|
||||||
@ -1368,13 +1348,8 @@ mod tests {
|
|||||||
|
|
||||||
receive_dial(&mut runtime, &sync_send).await;
|
receive_dial(&mut runtime, &sync_send).await;
|
||||||
|
|
||||||
assert!(!runtime
|
assert!(!runtime.store.check_tx_completed(tx_seq).unwrap());
|
||||||
.store
|
assert!(!runtime.store.check_tx_completed(0).unwrap());
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.check_tx_completed(tx_seq)
|
|
||||||
.unwrap());
|
|
||||||
assert!(!runtime.store.read().await.check_tx_completed(0).unwrap());
|
|
||||||
|
|
||||||
receive_chunk_request(
|
receive_chunk_request(
|
||||||
&mut runtime.network_recv,
|
&mut runtime.network_recv,
|
||||||
@ -1460,12 +1435,7 @@ mod tests {
|
|||||||
|
|
||||||
receive_dial(&mut runtime, &sync_send).await;
|
receive_dial(&mut runtime, &sync_send).await;
|
||||||
|
|
||||||
assert!(!runtime
|
assert!(!runtime.store.check_tx_completed(tx_seq).unwrap());
|
||||||
.store
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.check_tx_completed(tx_seq)
|
|
||||||
.unwrap());
|
|
||||||
|
|
||||||
receive_chunk_request(
|
receive_chunk_request(
|
||||||
&mut runtime.network_recv,
|
&mut runtime.network_recv,
|
||||||
@ -1503,12 +1473,7 @@ mod tests {
|
|||||||
|
|
||||||
receive_dial(&mut runtime, &sync_send).await;
|
receive_dial(&mut runtime, &sync_send).await;
|
||||||
|
|
||||||
assert!(!runtime
|
assert!(!runtime.store.check_tx_completed(tx_seq).unwrap());
|
||||||
.store
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.check_tx_completed(tx_seq)
|
|
||||||
.unwrap());
|
|
||||||
|
|
||||||
receive_chunk_request(
|
receive_chunk_request(
|
||||||
&mut runtime.network_recv,
|
&mut runtime.network_recv,
|
||||||
@ -1589,12 +1554,7 @@ mod tests {
|
|||||||
|
|
||||||
receive_dial(&mut runtime, &sync_send).await;
|
receive_dial(&mut runtime, &sync_send).await;
|
||||||
|
|
||||||
assert!(!runtime
|
assert!(!runtime.store.check_tx_completed(tx_seq).unwrap());
|
||||||
.store
|
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.check_tx_completed(tx_seq)
|
|
||||||
.unwrap());
|
|
||||||
|
|
||||||
assert!(!matches!(
|
assert!(!matches!(
|
||||||
sync_send
|
sync_send
|
||||||
@ -1620,7 +1580,7 @@ mod tests {
|
|||||||
async fn receive_chunk_request(
|
async fn receive_chunk_request(
|
||||||
network_recv: &mut UnboundedReceiver<NetworkMessage>,
|
network_recv: &mut UnboundedReceiver<NetworkMessage>,
|
||||||
sync_send: &SyncSender,
|
sync_send: &SyncSender,
|
||||||
peer_store: Arc<RwLock<LogManager>>,
|
peer_store: Arc<LogManager>,
|
||||||
init_peer_id: PeerId,
|
init_peer_id: PeerId,
|
||||||
tx_seq: u64,
|
tx_seq: u64,
|
||||||
index_start: u64,
|
index_start: u64,
|
||||||
@ -1654,8 +1614,6 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let chunks = peer_store
|
let chunks = peer_store
|
||||||
.read()
|
|
||||||
.await
|
|
||||||
.get_chunks_with_proof_by_tx_and_index_range(
|
.get_chunks_with_proof_by_tx_and_index_range(
|
||||||
tx_seq,
|
tx_seq,
|
||||||
req.index_start as usize,
|
req.index_start as usize,
|
||||||
|
@ -17,8 +17,8 @@ use tokio::sync::RwLock;
|
|||||||
pub fn create_2_store(
|
pub fn create_2_store(
|
||||||
chunk_count: Vec<usize>,
|
chunk_count: Vec<usize>,
|
||||||
) -> (
|
) -> (
|
||||||
Arc<RwLock<LogManager>>,
|
Arc<LogManager>,
|
||||||
Arc<RwLock<LogManager>>,
|
Arc<LogManager>,
|
||||||
Vec<Transaction>,
|
Vec<Transaction>,
|
||||||
Vec<Vec<u8>>,
|
Vec<Vec<u8>>,
|
||||||
) {
|
) {
|
||||||
@ -37,12 +37,7 @@ pub fn create_2_store(
|
|||||||
offset = ret.2;
|
offset = ret.2;
|
||||||
}
|
}
|
||||||
|
|
||||||
(
|
(Arc::new(store), Arc::new(peer_store), txs, data)
|
||||||
Arc::new(RwLock::new(store)),
|
|
||||||
Arc::new(RwLock::new(peer_store)),
|
|
||||||
txs,
|
|
||||||
data,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_data(
|
fn generate_data(
|
||||||
@ -115,7 +110,7 @@ pub mod tests {
|
|||||||
|
|
||||||
impl Default for TestStoreRuntime {
|
impl Default for TestStoreRuntime {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
let store = Arc::new(RwLock::new(Self::new_store()));
|
let store = Arc::new(Self::new_store());
|
||||||
Self::new(store)
|
Self::new(store)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -125,7 +120,7 @@ pub mod tests {
|
|||||||
LogManager::memorydb(LogConfig::default()).unwrap()
|
LogManager::memorydb(LogConfig::default()).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(store: Arc<RwLock<dyn LogStore>>) -> TestStoreRuntime {
|
pub fn new(store: Arc<dyn LogStore>) -> TestStoreRuntime {
|
||||||
let runtime = TestRuntime::default();
|
let runtime = TestRuntime::default();
|
||||||
let executor = runtime.task_executor.clone();
|
let executor = runtime.task_executor.clone();
|
||||||
Self {
|
Self {
|
||||||
|
Loading…
Reference in New Issue
Block a user