mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-01-14 17:15:18 +00:00
Sync recent announced files with priority (#300)
Some checks are pending
abi-consistent-check / build-and-compare (push) Waiting to run
code-coverage / unittest-cov (push) Waiting to run
rust / check (push) Waiting to run
rust / test (push) Waiting to run
rust / lints (push) Waiting to run
functional-test / test (push) Waiting to run
Some checks are pending
abi-consistent-check / build-and-compare (push) Waiting to run
code-coverage / unittest-cov (push) Waiting to run
rust / check (push) Waiting to run
rust / test (push) Waiting to run
rust / lints (push) Waiting to run
functional-test / test (push) Waiting to run
* Upgrade rust toolchain to avoid macro-proc issue of latest rust analyzer * Update random sync metrics * Adjust default config for sync layer * Cache the recent announced file for random sync with priority * Fix clippy * fix auto sync failure reason issue * Add cached ready txs info in rpc * fix lint
This commit is contained in:
parent
910b5af1c7
commit
8790fe1d66
4
.github/actions/setup-rust/action.yml
vendored
4
.github/actions/setup-rust/action.yml
vendored
@ -2,11 +2,11 @@ name: Setup Rust (cache & toolchain)
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install toolchain 1.75.0
|
||||
- name: Install toolchain 1.78.0
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: 1.75.0
|
||||
toolchain: 1.78.0
|
||||
components: rustfmt, clippy
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
@ -18,7 +18,6 @@ pub struct RpcServerImpl {
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for RpcServerImpl {
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn find_file(&self, tx_seq: u64) -> RpcResult<()> {
|
||||
info!("admin_findFile({tx_seq})");
|
||||
|
||||
@ -39,7 +38,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn shutdown(&self) -> RpcResult<()> {
|
||||
info!("admin_shutdown()");
|
||||
|
||||
@ -51,7 +49,6 @@ impl RpcServer for RpcServerImpl {
|
||||
.map_err(|e| error::internal_error(format!("Failed to send shutdown command: {:?}", e)))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn start_sync_file(&self, tx_seq: u64) -> RpcResult<()> {
|
||||
info!("admin_startSyncFile({tx_seq})");
|
||||
|
||||
@ -72,7 +69,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn start_sync_chunks(
|
||||
&self,
|
||||
tx_seq: u64,
|
||||
@ -102,7 +98,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn terminate_sync(&self, tx_seq: u64) -> RpcResult<bool> {
|
||||
info!("admin_terminateSync({tx_seq})");
|
||||
|
||||
@ -131,7 +126,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_sync_status(&self, tx_seq: u64) -> RpcResult<String> {
|
||||
info!("admin_getSyncStatus({tx_seq})");
|
||||
|
||||
@ -148,7 +142,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_sync_info(&self, tx_seq: Option<u64>) -> RpcResult<HashMap<u64, FileSyncInfo>> {
|
||||
info!(?tx_seq, "admin_getSyncInfo()");
|
||||
|
||||
@ -163,7 +156,6 @@ impl RpcServer for RpcServerImpl {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_network_info(&self) -> RpcResult<NetworkInfo> {
|
||||
info!("admin_getNetworkInfo()");
|
||||
|
||||
|
@ -17,7 +17,6 @@ pub struct RpcServerImpl {
|
||||
|
||||
#[async_trait]
|
||||
impl RpcServer for RpcServerImpl {
|
||||
#[tracing::instrument(skip(self), err)]
|
||||
async fn get_status(&self) -> RpcResult<Status> {
|
||||
info!("zgs_getStatus()");
|
||||
let sync_progress = self
|
||||
|
@ -1,4 +1,7 @@
|
||||
use crate::{controllers::SyncState, SyncRequest, SyncResponse, SyncSender};
|
||||
use crate::{
|
||||
controllers::{FailureReason, SyncState},
|
||||
SyncRequest, SyncResponse, SyncSender,
|
||||
};
|
||||
use anyhow::{bail, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration};
|
||||
@ -126,7 +129,10 @@ impl Batcher {
|
||||
"Failed to sync file and terminate the failed file sync"
|
||||
);
|
||||
self.terminate_file_sync(tx_seq, false).await;
|
||||
Ok(Some(SyncResult::Failed))
|
||||
match reason {
|
||||
FailureReason::TimeoutFindFile => Ok(Some(SyncResult::Timeout)),
|
||||
_ => Ok(Some(SyncResult::Failed)),
|
||||
}
|
||||
}
|
||||
|
||||
// finding peers timeout
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::{batcher::Batcher, sync_store::SyncStore};
|
||||
use super::{batcher::Batcher, metrics::RandomBatcherMetrics, sync_store::SyncStore};
|
||||
use crate::{
|
||||
auto_sync::{batcher::SyncResult, metrics, sync_store::Queue},
|
||||
auto_sync::{batcher::SyncResult, sync_store::Queue},
|
||||
Config, SyncSender,
|
||||
};
|
||||
use anyhow::Result;
|
||||
@ -19,6 +19,7 @@ pub struct RandomBatcherState {
|
||||
pub tasks: Vec<u64>,
|
||||
pub pending_txs: usize,
|
||||
pub ready_txs: usize,
|
||||
pub cached_ready_txs: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -27,6 +28,7 @@ pub struct RandomBatcher {
|
||||
config: Config,
|
||||
batcher: Batcher,
|
||||
sync_store: Arc<SyncStore>,
|
||||
metrics: Arc<RandomBatcherMetrics>,
|
||||
}
|
||||
|
||||
impl RandomBatcher {
|
||||
@ -36,6 +38,7 @@ impl RandomBatcher {
|
||||
store: Store,
|
||||
sync_send: SyncSender,
|
||||
sync_store: Arc<SyncStore>,
|
||||
metrics: Arc<RandomBatcherMetrics>,
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
@ -47,17 +50,19 @@ impl RandomBatcher {
|
||||
sync_send,
|
||||
),
|
||||
sync_store,
|
||||
metrics,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_state(&self) -> Result<RandomBatcherState> {
|
||||
let (pending_txs, ready_txs) = self.sync_store.stat().await?;
|
||||
let (pending_txs, ready_txs, cached_ready_txs) = self.sync_store.stat().await?;
|
||||
|
||||
Ok(RandomBatcherState {
|
||||
name: self.name.clone(),
|
||||
tasks: self.batcher.tasks().await,
|
||||
pending_txs,
|
||||
ready_txs,
|
||||
cached_ready_txs,
|
||||
})
|
||||
}
|
||||
|
||||
@ -71,11 +76,10 @@ impl RandomBatcher {
|
||||
}
|
||||
|
||||
loop {
|
||||
// if let Ok(state) = self.get_state().await {
|
||||
// metrics::RANDOM_STATE_TXS_SYNCING.update(state.tasks.len() as u64);
|
||||
// metrics::RANDOM_STATE_TXS_READY.update(state.ready_txs as u64);
|
||||
// metrics::RANDOM_STATE_TXS_PENDING.update(state.pending_txs as u64);
|
||||
// }
|
||||
if let Ok(state) = self.get_state().await {
|
||||
self.metrics
|
||||
.update_state(state.ready_txs, state.pending_txs);
|
||||
}
|
||||
|
||||
match self.sync_once().await {
|
||||
Ok(true) => {}
|
||||
@ -106,11 +110,7 @@ impl RandomBatcher {
|
||||
};
|
||||
|
||||
debug!(%tx_seq, ?sync_result, "Completed to sync file, state = {:?}", self.get_state().await);
|
||||
match sync_result {
|
||||
SyncResult::Completed => metrics::RANDOM_SYNC_RESULT_COMPLETED.mark(1),
|
||||
SyncResult::Failed => metrics::RANDOM_SYNC_RESULT_FAILED.inc(1),
|
||||
SyncResult::Timeout => metrics::RANDOM_SYNC_RESULT_TIMEOUT.inc(1),
|
||||
}
|
||||
self.metrics.update_result(sync_result);
|
||||
|
||||
if matches!(sync_result, SyncResult::Completed) {
|
||||
self.sync_store.remove(tx_seq).await?;
|
||||
|
@ -45,7 +45,7 @@ impl HistoricalTxWriter {
|
||||
}
|
||||
|
||||
pub async fn get_state(&self) -> Result<HistoricalTxWriterState> {
|
||||
let (pending_txs, ready_txs) = self.sync_store.stat().await?;
|
||||
let (pending_txs, ready_txs, _) = self.sync_store.stat().await?;
|
||||
|
||||
Ok(HistoricalTxWriterState {
|
||||
next_tx_seq: self.next_tx_seq.load(Ordering::Relaxed),
|
||||
|
@ -19,6 +19,7 @@ use super::{
|
||||
batcher_random::RandomBatcher,
|
||||
batcher_serial::SerialBatcher,
|
||||
historical_tx_writer::HistoricalTxWriter,
|
||||
metrics,
|
||||
sync_store::{Queue, SyncStore},
|
||||
};
|
||||
|
||||
@ -45,11 +46,12 @@ impl AutoSyncManager {
|
||||
// use v2 db to avoid reading v1 files that announced from the whole network instead of neighbors
|
||||
Arc::new(SyncStore::new_with_name(
|
||||
store.clone(),
|
||||
config.ready_txs_cache_cap,
|
||||
"pendingv2",
|
||||
"readyv2",
|
||||
))
|
||||
} else {
|
||||
Arc::new(SyncStore::new(store.clone()))
|
||||
Arc::new(SyncStore::new(store.clone(), 0))
|
||||
};
|
||||
let catched_up = Arc::new(AtomicBool::new(false));
|
||||
|
||||
@ -83,6 +85,7 @@ impl AutoSyncManager {
|
||||
store.clone(),
|
||||
sync_send.clone(),
|
||||
sync_store,
|
||||
metrics::RANDOM_ANNOUNCED.clone(),
|
||||
);
|
||||
executor.spawn(random.clone().start(catched_up.clone()), "auto_sync_random");
|
||||
|
||||
@ -96,6 +99,7 @@ impl AutoSyncManager {
|
||||
if config.neighbors_only {
|
||||
let historical_sync_store = Arc::new(SyncStore::new_with_name(
|
||||
store.clone(),
|
||||
0,
|
||||
"pendingv2_historical",
|
||||
"readyv2_historical",
|
||||
));
|
||||
@ -111,6 +115,7 @@ impl AutoSyncManager {
|
||||
store,
|
||||
sync_send,
|
||||
historical_sync_store,
|
||||
metrics::RANDOM_HISTORICAL.clone(),
|
||||
);
|
||||
executor.spawn(
|
||||
random_historical.start(catched_up.clone()),
|
||||
|
@ -1,6 +1,46 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use metrics::{register_meter, Counter, CounterUsize, Gauge, GaugeUsize, Histogram, Meter, Sample};
|
||||
use metrics::{
|
||||
register_meter, register_meter_with_group, Counter, CounterUsize, Gauge, GaugeUsize, Histogram,
|
||||
Meter, Sample,
|
||||
};
|
||||
|
||||
use super::batcher::SyncResult;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RandomBatcherMetrics {
|
||||
pub ready_txs: Arc<dyn Gauge<usize>>,
|
||||
pub pending_txs: Arc<dyn Gauge<usize>>,
|
||||
|
||||
pub completed_qps: Arc<dyn Meter>,
|
||||
pub failed_qps: Arc<dyn Meter>,
|
||||
pub timeout_qps: Arc<dyn Meter>,
|
||||
}
|
||||
|
||||
impl RandomBatcherMetrics {
|
||||
pub fn new(group_name: &str) -> Self {
|
||||
Self {
|
||||
ready_txs: GaugeUsize::register_with_group(group_name, "ready_txs"),
|
||||
pending_txs: GaugeUsize::register_with_group(group_name, "pending_txs"),
|
||||
completed_qps: register_meter_with_group(group_name, "completed_qps"),
|
||||
failed_qps: register_meter_with_group(group_name, "failed_qps"),
|
||||
timeout_qps: register_meter_with_group(group_name, "timeout_qps"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_state(&self, ready_txs: usize, pending_txs: usize) {
|
||||
self.ready_txs.update(ready_txs);
|
||||
self.pending_txs.update(pending_txs);
|
||||
}
|
||||
|
||||
pub fn update_result(&self, result: SyncResult) {
|
||||
match result {
|
||||
SyncResult::Completed => self.completed_qps.mark(1),
|
||||
SyncResult::Failed => self.failed_qps.mark(1),
|
||||
SyncResult::Timeout => self.timeout_qps.mark(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
// sequential auto sync
|
||||
@ -14,11 +54,6 @@ lazy_static::lazy_static! {
|
||||
pub static ref SEQUENTIAL_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_sequential_sync_result_timeout");
|
||||
|
||||
// random auto sync
|
||||
// pub static ref RANDOM_STATE_TXS_SYNCING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_syncing", 1024);
|
||||
// pub static ref RANDOM_STATE_TXS_READY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_ready", 1024);
|
||||
// pub static ref RANDOM_STATE_TXS_PENDING: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register("sync_auto_random_state_txs_pending", 1024);
|
||||
|
||||
pub static ref RANDOM_SYNC_RESULT_COMPLETED: Arc<dyn Meter> = register_meter("sync_auto_random_sync_result_completed");
|
||||
pub static ref RANDOM_SYNC_RESULT_FAILED: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_failed");
|
||||
pub static ref RANDOM_SYNC_RESULT_TIMEOUT: Arc<dyn Counter<usize>> = CounterUsize::register("sync_auto_random_sync_result_timeout");
|
||||
pub static ref RANDOM_ANNOUNCED: Arc<RandomBatcherMetrics> = Arc::new(RandomBatcherMetrics::new("sync_auto_random_announced"));
|
||||
pub static ref RANDOM_HISTORICAL: Arc<RandomBatcherMetrics> = Arc::new(RandomBatcherMetrics::new("sync_auto_random_historical"));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use super::tx_store::TxStore;
|
||||
use super::tx_store::{CachedTxStore, TxStore};
|
||||
use anyhow::Result;
|
||||
use std::sync::Arc;
|
||||
use storage::log_store::{
|
||||
@ -33,35 +33,36 @@ pub struct SyncStore {
|
||||
|
||||
/// Ready transactions to sync with high priority since announcement
|
||||
/// already received from other peers.
|
||||
ready_txs: TxStore,
|
||||
ready_txs: CachedTxStore,
|
||||
}
|
||||
|
||||
impl SyncStore {
|
||||
pub fn new(store: Store) -> Self {
|
||||
Self {
|
||||
store: Arc::new(RwLock::new(store)),
|
||||
pending_txs: TxStore::new("pending"),
|
||||
ready_txs: TxStore::new("ready"),
|
||||
}
|
||||
pub fn new(store: Store, ready_txs_cache_cap: usize) -> Self {
|
||||
Self::new_with_name(store, ready_txs_cache_cap, "pending", "ready")
|
||||
}
|
||||
|
||||
pub fn new_with_name(store: Store, pending: &'static str, ready: &'static str) -> Self {
|
||||
pub fn new_with_name(
|
||||
store: Store,
|
||||
ready_txs_cache_cap: usize,
|
||||
pending: &'static str,
|
||||
ready: &'static str,
|
||||
) -> Self {
|
||||
Self {
|
||||
store: Arc::new(RwLock::new(store)),
|
||||
pending_txs: TxStore::new(pending),
|
||||
ready_txs: TxStore::new(ready),
|
||||
ready_txs: CachedTxStore::new(ready, ready_txs_cache_cap),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of pending txs and ready txs.
|
||||
pub async fn stat(&self) -> Result<(usize, usize)> {
|
||||
pub async fn stat(&self) -> Result<(usize, usize, usize)> {
|
||||
let async_store = self.store.read().await;
|
||||
let store = async_store.get_store();
|
||||
|
||||
let num_pending_txs = self.pending_txs.count(store)?;
|
||||
let num_ready_txs = self.ready_txs.count(store)?;
|
||||
let (num_ready_txs, num_cached_ready_txs) = self.ready_txs.count(store).await?;
|
||||
|
||||
Ok((num_pending_txs, num_ready_txs))
|
||||
Ok((num_pending_txs, num_ready_txs, num_cached_ready_txs))
|
||||
}
|
||||
|
||||
pub async fn get_tx_seq_range(&self) -> Result<(Option<u64>, Option<u64>)> {
|
||||
@ -112,7 +113,7 @@ impl SyncStore {
|
||||
|
||||
match queue {
|
||||
Queue::Ready => {
|
||||
if !self.ready_txs.add(store, Some(&mut tx), tx_seq)? {
|
||||
if !self.ready_txs.add(store, Some(&mut tx), tx_seq).await? {
|
||||
return Ok(InsertResult::AlreadyExists);
|
||||
}
|
||||
|
||||
@ -130,7 +131,7 @@ impl SyncStore {
|
||||
return Ok(InsertResult::AlreadyExists);
|
||||
}
|
||||
|
||||
let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq)?;
|
||||
let removed = self.ready_txs.remove(store, Some(&mut tx), tx_seq).await?;
|
||||
store.exec_configs(tx, DATA_DB_KEY)?;
|
||||
|
||||
if removed {
|
||||
@ -152,7 +153,7 @@ impl SyncStore {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let added = self.ready_txs.add(store, Some(&mut tx), tx_seq)?;
|
||||
let added = self.ready_txs.add(store, Some(&mut tx), tx_seq).await?;
|
||||
|
||||
store.exec_configs(tx, DATA_DB_KEY)?;
|
||||
|
||||
@ -164,7 +165,7 @@ impl SyncStore {
|
||||
let store = async_store.get_store();
|
||||
|
||||
// try to find a tx in ready queue with high priority
|
||||
if let Some(val) = self.ready_txs.random(store)? {
|
||||
if let Some(val) = self.ready_txs.random(store).await? {
|
||||
return Ok(Some(val));
|
||||
}
|
||||
|
||||
@ -177,7 +178,7 @@ impl SyncStore {
|
||||
let store = async_store.get_store();
|
||||
|
||||
// removed in ready queue
|
||||
if self.ready_txs.remove(store, None, tx_seq)? {
|
||||
if self.ready_txs.remove(store, None, tx_seq).await? {
|
||||
return Ok(Some(Queue::Ready));
|
||||
}
|
||||
|
||||
@ -199,7 +200,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_tx_seq_range() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// check values by default
|
||||
assert_eq!(store.get_tx_seq_range().await.unwrap(), (None, None));
|
||||
@ -215,7 +216,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_insert() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
assert_eq!(store.contains(1).await.unwrap(), None);
|
||||
assert_eq!(store.insert(1, Pending).await.unwrap(), NewAdded);
|
||||
@ -234,7 +235,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_upgrade() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// cannot upgrade by default
|
||||
assert!(!store.upgrade(3).await.unwrap());
|
||||
@ -253,7 +254,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_random() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// no tx by default
|
||||
assert_eq!(store.random().await.unwrap(), None);
|
||||
@ -273,7 +274,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_remove() {
|
||||
let runtime = TestStoreRuntime::default();
|
||||
let store = SyncStore::new(runtime.store.clone());
|
||||
let store = SyncStore::new(runtime.store.clone(), 0);
|
||||
|
||||
// cannot remove by default
|
||||
assert_eq!(store.remove(1).await.unwrap(), None);
|
||||
|
@ -1,8 +1,12 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Result;
|
||||
use rand::seq::IteratorRandom;
|
||||
use rand::Rng;
|
||||
use storage::log_store::config::{ConfigTx, ConfigurableExt};
|
||||
use storage::log_store::log_manager::DATA_DB_KEY;
|
||||
use storage::log_store::Store;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// TxStore is used to store pending transactions that to be synchronized in advance.
|
||||
///
|
||||
@ -138,6 +142,99 @@ impl TxStore {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cache the recent inserted tx in memory for random pick with priority.
|
||||
pub struct CachedTxStore {
|
||||
tx_store: TxStore,
|
||||
cache_cap: usize,
|
||||
cache: RwLock<HashSet<u64>>,
|
||||
}
|
||||
|
||||
impl CachedTxStore {
|
||||
pub fn new(name: &'static str, cache_cap: usize) -> Self {
|
||||
Self {
|
||||
tx_store: TxStore::new(name),
|
||||
cache_cap,
|
||||
cache: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has(&self, store: &dyn Store, tx_seq: u64) -> Result<bool> {
|
||||
self.tx_store.has(store, tx_seq)
|
||||
}
|
||||
|
||||
pub async fn count(&self, store: &dyn Store) -> Result<(usize, usize)> {
|
||||
if self.cache_cap == 0 {
|
||||
return Ok((self.tx_store.count(store)?, 0));
|
||||
}
|
||||
|
||||
let cache = self.cache.read().await;
|
||||
|
||||
Ok((self.tx_store.count(store)?, cache.len()))
|
||||
}
|
||||
|
||||
pub async fn add(
|
||||
&self,
|
||||
store: &dyn Store,
|
||||
db_tx: Option<&mut ConfigTx>,
|
||||
tx_seq: u64,
|
||||
) -> Result<bool> {
|
||||
if self.cache_cap == 0 {
|
||||
return self.tx_store.add(store, db_tx, tx_seq);
|
||||
}
|
||||
|
||||
let mut cache = self.cache.write().await;
|
||||
|
||||
let added = self.tx_store.add(store, db_tx, tx_seq)?;
|
||||
|
||||
if added {
|
||||
cache.insert(tx_seq);
|
||||
|
||||
if cache.len() > self.cache_cap {
|
||||
if let Some(popped) = cache.iter().choose(&mut rand::thread_rng()).cloned() {
|
||||
cache.remove(&popped);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(added)
|
||||
}
|
||||
|
||||
pub async fn random(&self, store: &dyn Store) -> Result<Option<u64>> {
|
||||
if self.cache_cap == 0 {
|
||||
return self.tx_store.random(store);
|
||||
}
|
||||
|
||||
let cache = self.cache.read().await;
|
||||
|
||||
if let Some(v) = cache.iter().choose(&mut rand::thread_rng()).cloned() {
|
||||
return Ok(Some(v));
|
||||
}
|
||||
|
||||
self.tx_store.random(store)
|
||||
}
|
||||
|
||||
pub async fn remove(
|
||||
&self,
|
||||
store: &dyn Store,
|
||||
db_tx: Option<&mut ConfigTx>,
|
||||
tx_seq: u64,
|
||||
) -> Result<bool> {
|
||||
if self.cache_cap == 0 {
|
||||
return self.tx_store.remove(store, db_tx, tx_seq);
|
||||
}
|
||||
|
||||
let mut cache: tokio::sync::RwLockWriteGuard<'_, HashSet<u64>> = self.cache.write().await;
|
||||
|
||||
let removed = self.tx_store.remove(store, db_tx, tx_seq)?;
|
||||
|
||||
if removed {
|
||||
cache.remove(&tx_seq);
|
||||
}
|
||||
|
||||
Ok(removed)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::test_util::tests::TestStoreRuntime;
|
||||
|
@ -62,6 +62,7 @@ pub struct Config {
|
||||
pub sequential_find_peer_timeout: Duration,
|
||||
#[serde(deserialize_with = "deserialize_duration")]
|
||||
pub random_find_peer_timeout: Duration,
|
||||
pub ready_txs_cache_cap: usize,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -69,18 +70,18 @@ impl Default for Config {
|
||||
Self {
|
||||
// sync service config
|
||||
neighbors_only: true,
|
||||
heartbeat_interval: Duration::from_secs(5),
|
||||
heartbeat_interval: Duration::from_secs(3),
|
||||
auto_sync_enabled: false,
|
||||
max_sync_files: 8,
|
||||
max_sync_files: 16,
|
||||
sync_file_by_rpc_enabled: true,
|
||||
sync_file_on_announcement_enabled: false,
|
||||
|
||||
// serial sync config
|
||||
max_chunks_to_request: 2 * 1024,
|
||||
max_request_failures: 5,
|
||||
max_request_failures: 3,
|
||||
peer_connect_timeout: Duration::from_secs(15),
|
||||
peer_disconnect_timeout: Duration::from_secs(15),
|
||||
peer_find_timeout: Duration::from_secs(120),
|
||||
peer_find_timeout: Duration::from_secs(5),
|
||||
peer_chunks_download_timeout: Duration::from_secs(15),
|
||||
peer_wait_outgoing_connection_timeout: Duration::from_secs(10),
|
||||
peer_next_chunks_request_wait_timeout: Duration::from_secs(3),
|
||||
@ -91,9 +92,10 @@ impl Default for Config {
|
||||
auto_sync_idle_interval: Duration::from_secs(3),
|
||||
auto_sync_error_interval: Duration::from_secs(10),
|
||||
max_sequential_workers: 0,
|
||||
max_random_workers: 2,
|
||||
sequential_find_peer_timeout: Duration::from_secs(60),
|
||||
random_find_peer_timeout: Duration::from_secs(500),
|
||||
max_random_workers: 8,
|
||||
sequential_find_peer_timeout: Duration::from_secs(5),
|
||||
random_find_peer_timeout: Duration::from_secs(5),
|
||||
ready_txs_cache_cap: 1_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -233,13 +233,13 @@ batcher_announcement_capacity = 100
|
||||
auto_sync_enabled = true
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously.
|
||||
# max_sync_files = 8
|
||||
# max_sync_files = 16
|
||||
|
||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||
# sync_file_by_rpc_enabled = true
|
||||
|
||||
# Maximum number of continous failures to terminate a file sync.
|
||||
# max_request_failures = 5
|
||||
# max_request_failures = 3
|
||||
|
||||
# Timeout to dial peers.
|
||||
# peer_connect_timeout = "15s"
|
||||
@ -248,7 +248,7 @@ auto_sync_enabled = true
|
||||
# peer_disconnect_timeout = "15s"
|
||||
|
||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||
# peer_find_timeout = "120s"
|
||||
# peer_find_timeout = "5s"
|
||||
|
||||
# Timeout to download data from remote peer.
|
||||
# peer_chunks_download_timeout = "15s"
|
||||
@ -261,13 +261,13 @@ auto_sync_enabled = true
|
||||
# max_sequential_workers = 0
|
||||
|
||||
# Maximum threads to sync files randomly.
|
||||
# max_random_workers = 2
|
||||
# max_random_workers = 8
|
||||
|
||||
# Timeout to terminate a file sync in sequence.
|
||||
# sequential_find_peer_timeout = "60s"
|
||||
# sequential_find_peer_timeout = "5s"
|
||||
|
||||
# Timeout to terminate a file sync randomly.
|
||||
# random_find_peer_timeout = "500s"
|
||||
# random_find_peer_timeout = "5s"
|
||||
|
||||
#######################################################################
|
||||
### File Location Cache Options ###
|
||||
|
@ -245,13 +245,13 @@ batcher_announcement_capacity = 100
|
||||
auto_sync_enabled = true
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously.
|
||||
# max_sync_files = 8
|
||||
# max_sync_files = 16
|
||||
|
||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||
# sync_file_by_rpc_enabled = true
|
||||
|
||||
# Maximum number of continous failures to terminate a file sync.
|
||||
# max_request_failures = 5
|
||||
# max_request_failures = 3
|
||||
|
||||
# Timeout to dial peers.
|
||||
# peer_connect_timeout = "15s"
|
||||
@ -260,7 +260,7 @@ auto_sync_enabled = true
|
||||
# peer_disconnect_timeout = "15s"
|
||||
|
||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||
# peer_find_timeout = "120s"
|
||||
# peer_find_timeout = "5s"
|
||||
|
||||
# Timeout to download data from remote peer.
|
||||
# peer_chunks_download_timeout = "15s"
|
||||
@ -273,13 +273,13 @@ auto_sync_enabled = true
|
||||
# max_sequential_workers = 0
|
||||
|
||||
# Maximum threads to sync files randomly.
|
||||
# max_random_workers = 2
|
||||
# max_random_workers = 8
|
||||
|
||||
# Timeout to terminate a file sync in sequence.
|
||||
# sequential_find_peer_timeout = "60s"
|
||||
# sequential_find_peer_timeout = "5s"
|
||||
|
||||
# Timeout to terminate a file sync randomly.
|
||||
# random_find_peer_timeout = "500s"
|
||||
# random_find_peer_timeout = "5s"
|
||||
|
||||
#######################################################################
|
||||
### File Location Cache Options ###
|
||||
|
@ -247,13 +247,13 @@
|
||||
# auto_sync_enabled = false
|
||||
|
||||
# Maximum number of files in sync from other peers simultaneously.
|
||||
# max_sync_files = 8
|
||||
# max_sync_files = 16
|
||||
|
||||
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
|
||||
# sync_file_by_rpc_enabled = true
|
||||
|
||||
# Maximum number of continous failures to terminate a file sync.
|
||||
# max_request_failures = 5
|
||||
# max_request_failures = 3
|
||||
|
||||
# Timeout to dial peers.
|
||||
# peer_connect_timeout = "15s"
|
||||
@ -262,7 +262,7 @@
|
||||
# peer_disconnect_timeout = "15s"
|
||||
|
||||
# Timeout to find peers via FIND_FILE P2P pubsub message.
|
||||
# peer_find_timeout = "120s"
|
||||
# peer_find_timeout = "5s"
|
||||
|
||||
# Timeout to download data from remote peer.
|
||||
# peer_chunks_download_timeout = "15s"
|
||||
@ -275,13 +275,13 @@
|
||||
# max_sequential_workers = 0
|
||||
|
||||
# Maximum threads to sync files randomly.
|
||||
# max_random_workers = 2
|
||||
# max_random_workers = 8
|
||||
|
||||
# Timeout to terminate a file sync in sequence.
|
||||
# sequential_find_peer_timeout = "60s"
|
||||
# sequential_find_peer_timeout = "5s"
|
||||
|
||||
# Timeout to terminate a file sync randomly.
|
||||
# random_find_peer_timeout = "500s"
|
||||
# random_find_peer_timeout = "5s"
|
||||
|
||||
#######################################################################
|
||||
### File Location Cache Options ###
|
||||
|
@ -1 +1 @@
|
||||
1.75.0
|
||||
1.78.0
|
||||
|
Loading…
Reference in New Issue
Block a user