mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-04-04 15:35:18 +00:00
add detailed metrics in slow operations
This commit is contained in:
parent
3cd3a336c2
commit
d5541abdd8
@ -1,13 +1,14 @@
|
|||||||
use super::load_chunk::EntryBatch;
|
|
||||||
use super::seal_task_manager::SealTaskManager;
|
|
||||||
use super::{MineLoadChunk, SealAnswer, SealTask};
|
|
||||||
use crate::config::ShardConfig;
|
use crate::config::ShardConfig;
|
||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
|
use crate::log_store::load_chunk::EntryBatch;
|
||||||
use crate::log_store::log_manager::{
|
use crate::log_store::log_manager::{
|
||||||
bytes_to_entries, data_to_merkle_leaves, COL_ENTRY_BATCH, COL_ENTRY_BATCH_ROOT,
|
bytes_to_entries, data_to_merkle_leaves, COL_ENTRY_BATCH, COL_ENTRY_BATCH_ROOT,
|
||||||
COL_FLOW_MPT_NODES, ENTRY_SIZE, PORA_CHUNK_SIZE,
|
COL_FLOW_MPT_NODES, ENTRY_SIZE, PORA_CHUNK_SIZE,
|
||||||
};
|
};
|
||||||
use crate::log_store::{FlowRead, FlowSeal, FlowWrite};
|
use crate::log_store::seal_task_manager::SealTaskManager;
|
||||||
|
use crate::log_store::{
|
||||||
|
metrics, FlowRead, FlowSeal, FlowWrite, MineLoadChunk, SealAnswer, SealTask,
|
||||||
|
};
|
||||||
use crate::{try_option, ZgsKeyValueDB};
|
use crate::{try_option, ZgsKeyValueDB};
|
||||||
use anyhow::{anyhow, bail, Result};
|
use anyhow::{anyhow, bail, Result};
|
||||||
use append_merkle::{MerkleTreeInitialData, MerkleTreeRead};
|
use append_merkle::{MerkleTreeInitialData, MerkleTreeRead};
|
||||||
@ -20,6 +21,7 @@ use std::cmp::Ordering;
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Instant;
|
||||||
use std::{cmp, mem};
|
use std::{cmp, mem};
|
||||||
use tracing::{debug, error, trace};
|
use tracing::{debug, error, trace};
|
||||||
use zgs_spec::{BYTES_PER_SECTOR, SEALS_PER_LOAD, SECTORS_PER_LOAD, SECTORS_PER_SEAL};
|
use zgs_spec::{BYTES_PER_SECTOR, SEALS_PER_LOAD, SECTORS_PER_LOAD, SECTORS_PER_SEAL};
|
||||||
@ -40,7 +42,11 @@ impl FlowStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_batch_root_list(&self, root_map: BTreeMap<usize, (DataRoot, usize)>) -> Result<()> {
|
pub fn put_batch_root_list(&self, root_map: BTreeMap<usize, (DataRoot, usize)>) -> Result<()> {
|
||||||
self.db.put_batch_root_list(root_map)
|
let start_time = Instant::now();
|
||||||
|
let res = self.db.put_batch_root_list(root_map);
|
||||||
|
|
||||||
|
metrics::PUT_BATCH_ROOT_LIST.update_since(start_time);
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert_subtree_list_for_batch(
|
pub fn insert_subtree_list_for_batch(
|
||||||
@ -48,6 +54,7 @@ impl FlowStore {
|
|||||||
batch_index: usize,
|
batch_index: usize,
|
||||||
subtree_list: Vec<(usize, usize, DataRoot)>,
|
subtree_list: Vec<(usize, usize, DataRoot)>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let start_time = Instant::now();
|
||||||
let mut batch = self
|
let mut batch = self
|
||||||
.db
|
.db
|
||||||
.get_entry_batch(batch_index as u64)?
|
.get_entry_batch(batch_index as u64)?
|
||||||
@ -55,6 +62,8 @@ impl FlowStore {
|
|||||||
batch.set_subtree_list(subtree_list);
|
batch.set_subtree_list(subtree_list);
|
||||||
self.db.put_entry_raw(vec![(batch_index as u64, batch)])?;
|
self.db.put_entry_raw(vec![(batch_index as u64, batch)])?;
|
||||||
|
|
||||||
|
metrics::INSERT_SUBTREE_LIST.update_since(start_time);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,7 +82,10 @@ impl FlowStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_mpt_node_list(&self, node_list: Vec<(usize, usize, DataRoot)>) -> Result<()> {
|
pub fn put_mpt_node_list(&self, node_list: Vec<(usize, usize, DataRoot)>) -> Result<()> {
|
||||||
self.db.put_mpt_node_list(node_list)
|
let start_time = Instant::now();
|
||||||
|
let res = self.db.put_mpt_node_list(node_list);
|
||||||
|
metrics::PUT_MPT_NODE.update_since(start_time);
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> {
|
pub fn delete_batch_list(&self, batch_list: &[u64]) -> Result<()> {
|
||||||
|
@ -936,6 +936,7 @@ impl LogManager {
|
|||||||
#[instrument(skip(self, merkle))]
|
#[instrument(skip(self, merkle))]
|
||||||
fn pad_tx(&self, tx_start_index: u64, merkle: &mut MerkleManager) -> Result<()> {
|
fn pad_tx(&self, tx_start_index: u64, merkle: &mut MerkleManager) -> Result<()> {
|
||||||
// Check if we need to pad the flow.
|
// Check if we need to pad the flow.
|
||||||
|
let start_time = Instant::now();
|
||||||
let mut tx_start_flow_index =
|
let mut tx_start_flow_index =
|
||||||
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64;
|
merkle.last_chunk_start_index() + merkle.last_chunk_merkle.leaves() as u64;
|
||||||
let pad_size = tx_start_index - tx_start_flow_index;
|
let pad_size = tx_start_index - tx_start_flow_index;
|
||||||
@ -1025,6 +1026,8 @@ impl LogManager {
|
|||||||
merkle.pora_chunks_merkle.leaves(),
|
merkle.pora_chunks_merkle.leaves(),
|
||||||
merkle.last_chunk_merkle.leaves()
|
merkle.last_chunk_merkle.leaves()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
metrics::PAD_TX.update_since(start_time);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,10 +5,21 @@ use metrics::{register_timer, Timer};
|
|||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
pub static ref TX_STORE_PUT: Arc<dyn Timer> = register_timer("log_store_tx_store_put_tx");
|
pub static ref TX_STORE_PUT: Arc<dyn Timer> = register_timer("log_store_tx_store_put_tx");
|
||||||
|
|
||||||
|
pub static ref CHECK_TX_COMPLETED: Arc<dyn Timer> =
|
||||||
|
register_timer("log_store_log_manager_check_tx_completed");
|
||||||
|
|
||||||
pub static ref APPEND_SUBTREE_LIST: Arc<dyn Timer> =
|
pub static ref APPEND_SUBTREE_LIST: Arc<dyn Timer> =
|
||||||
register_timer("log_store_log_manager_append_subtree_list");
|
register_timer("log_store_log_manager_append_subtree_list");
|
||||||
|
|
||||||
pub static ref COPY_TX_AND_FINALIZE: Arc<dyn Timer> =
|
pub static ref COPY_TX_AND_FINALIZE: Arc<dyn Timer> =
|
||||||
register_timer("log_store_log_manager_copy_tx_and_finalize");
|
register_timer("log_store_log_manager_copy_tx_and_finalize");
|
||||||
|
|
||||||
|
pub static ref PAD_TX: Arc<dyn Timer> = register_timer("log_store_log_manager_pad_tx");
|
||||||
|
|
||||||
|
pub static ref PUT_BATCH_ROOT_LIST: Arc<dyn Timer> = register_timer("log_store_flow_store_put_batch_root_list");
|
||||||
|
|
||||||
|
pub static ref INSERT_SUBTREE_LIST: Arc<dyn Timer> =
|
||||||
|
register_timer("log_store_log_manager_insert_subtree_list");
|
||||||
|
|
||||||
|
pub static ref PUT_MPT_NODE: Arc<dyn Timer> = register_timer("log_store_log_manager_put_mpt_node");
|
||||||
}
|
}
|
||||||
|
@ -180,8 +180,12 @@ impl TransactionStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_tx_completed(&self, tx_seq: u64) -> Result<bool> {
|
pub fn check_tx_completed(&self, tx_seq: u64) -> Result<bool> {
|
||||||
Ok(self.kvdb.get(COL_TX_COMPLETED, &tx_seq.to_be_bytes())?
|
let start_time = Instant::now();
|
||||||
== Some(vec![TX_STATUS_FINALIZED]))
|
let res = self.kvdb.get(COL_TX_COMPLETED, &tx_seq.to_be_bytes())?
|
||||||
|
== Some(vec![TX_STATUS_FINALIZED]);
|
||||||
|
|
||||||
|
metrics::CHECK_TX_COMPLETED.update_since(start_time);
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool> {
|
pub fn check_tx_pruned(&self, tx_seq: u64) -> Result<bool> {
|
||||||
|
Loading…
Reference in New Issue
Block a user