mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2024-12-24 07:15:17 +00:00
Refactor find file (#294)
Some checks failed
abi-consistent-check / build-and-compare (push) Has been cancelled
code-coverage / unittest-cov (push) Has been cancelled
rust / check (push) Has been cancelled
rust / test (push) Has been cancelled
rust / lints (push) Has been cancelled
functional-test / test (push) Has been cancelled
Some checks failed
abi-consistent-check / build-and-compare (push) Has been cancelled
code-coverage / unittest-cov (push) Has been cancelled
rust / check (push) Has been cancelled
rust / test (push) Has been cancelled
rust / lints (push) Has been cancelled
functional-test / test (push) Has been cancelled
* refactor find file gossip * refactor find chunks gossip * refactor announce file gossip * fix announce file issue * refactor find chunks gossip
This commit is contained in:
parent
afa471e9ae
commit
6b2420cac4
@ -297,10 +297,9 @@ impl FileLocationCache {
|
|||||||
INSERT_BATCH.update(announcement.tx_ids.len() as u64);
|
INSERT_BATCH.update(announcement.tx_ids.len() as u64);
|
||||||
|
|
||||||
let peer_id = *announcement.peer_id;
|
let peer_id = *announcement.peer_id;
|
||||||
// FIXME: Check validity.
|
let shard_config = match ShardConfig::try_from(announcement.shard_config) {
|
||||||
let shard_config = ShardConfig {
|
Ok(v) => v,
|
||||||
shard_id: announcement.shard_id,
|
Err(_) => return,
|
||||||
num_shard: announcement.num_shard,
|
|
||||||
};
|
};
|
||||||
self.insert_peer_config(peer_id, shard_config);
|
self.insert_peer_config(peer_id, shard_config);
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use network::{
|
use network::{
|
||||||
libp2p::identity,
|
libp2p::identity,
|
||||||
types::{AnnounceFile, SignedAnnounceFile, SignedMessage},
|
types::{AnnounceFile, SignedAnnounceFile, SignedMessage, TimedMessage},
|
||||||
Multiaddr, PeerId,
|
Multiaddr, PeerId,
|
||||||
};
|
};
|
||||||
use shared_types::{timestamp_now, TxID};
|
use shared_types::{timestamp_now, TxID};
|
||||||
@ -34,12 +34,13 @@ impl AnnounceFileBuilder {
|
|||||||
let at: Multiaddr = "/ip4/127.0.0.1/tcp/10000".parse().unwrap();
|
let at: Multiaddr = "/ip4/127.0.0.1/tcp/10000".parse().unwrap();
|
||||||
let timestamp = self.timestamp.unwrap_or_else(timestamp_now);
|
let timestamp = self.timestamp.unwrap_or_else(timestamp_now);
|
||||||
|
|
||||||
let msg = AnnounceFile {
|
let msg = TimedMessage {
|
||||||
tx_ids: vec![tx_id],
|
inner: AnnounceFile {
|
||||||
num_shard: 1,
|
tx_ids: vec![tx_id],
|
||||||
shard_id: 0,
|
shard_config: Default::default(),
|
||||||
peer_id: peer_id.into(),
|
peer_id: peer_id.into(),
|
||||||
at: at.into(),
|
at: at.into(),
|
||||||
|
},
|
||||||
timestamp,
|
timestamp,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -8,6 +8,6 @@ pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
|
|||||||
pub use globals::NetworkGlobals;
|
pub use globals::NetworkGlobals;
|
||||||
pub use pubsub::{
|
pub use pubsub::{
|
||||||
AnnounceChunks, AnnounceFile, FindChunks, FindFile, HasSignature, PubsubMessage,
|
AnnounceChunks, AnnounceFile, FindChunks, FindFile, HasSignature, PubsubMessage,
|
||||||
SignedAnnounceChunks, SignedAnnounceFile, SignedMessage, SnappyTransform, TimedMessage,
|
SignedAnnounceFile, SignedMessage, SnappyTransform, TimedMessage,
|
||||||
};
|
};
|
||||||
pub use topics::{GossipEncoding, GossipKind, GossipTopic};
|
pub use topics::{GossipEncoding, GossipKind, GossipTopic};
|
||||||
|
@ -117,11 +117,7 @@ impl ssz::Decode for WrappedPeerId {
|
|||||||
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
||||||
pub struct FindFile {
|
pub struct FindFile {
|
||||||
pub tx_id: TxID,
|
pub tx_id: TxID,
|
||||||
pub num_shard: usize,
|
pub maybe_shard_config: Option<ShardConfig>,
|
||||||
pub shard_id: usize,
|
|
||||||
/// Indicates whether publish to neighboar nodes only.
|
|
||||||
pub neighbors_only: bool,
|
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
||||||
@ -129,17 +125,14 @@ pub struct FindChunks {
|
|||||||
pub tx_id: TxID,
|
pub tx_id: TxID,
|
||||||
pub index_start: u64, // inclusive
|
pub index_start: u64, // inclusive
|
||||||
pub index_end: u64, // exclusive
|
pub index_end: u64, // exclusive
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
||||||
pub struct AnnounceFile {
|
pub struct AnnounceFile {
|
||||||
pub tx_ids: Vec<TxID>,
|
pub tx_ids: Vec<TxID>,
|
||||||
pub num_shard: usize,
|
pub shard_config: ShardConfig,
|
||||||
pub shard_id: usize,
|
|
||||||
pub peer_id: WrappedPeerId,
|
pub peer_id: WrappedPeerId,
|
||||||
pub at: WrappedMultiaddr,
|
pub at: WrappedMultiaddr,
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
||||||
@ -149,7 +142,6 @@ pub struct AnnounceChunks {
|
|||||||
pub index_end: u64, // exclusive
|
pub index_end: u64, // exclusive
|
||||||
pub peer_id: WrappedPeerId,
|
pub peer_id: WrappedPeerId,
|
||||||
pub at: WrappedMultiaddr,
|
pub at: WrappedMultiaddr,
|
||||||
pub timestamp: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Encode, Decode)]
|
||||||
@ -214,9 +206,7 @@ impl<T: Encode + Decode> HasSignature for SignedMessage<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type SignedAnnounceFile = SignedMessage<AnnounceFile>;
|
pub type SignedAnnounceFile = SignedMessage<TimedMessage<AnnounceFile>>;
|
||||||
pub type SignedAnnounceChunks = SignedMessage<AnnounceChunks>;
|
|
||||||
|
|
||||||
type SignedAnnounceFiles = Vec<SignedAnnounceFile>;
|
type SignedAnnounceFiles = Vec<SignedAnnounceFile>;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
@ -226,11 +216,16 @@ pub enum PubsubMessage {
|
|||||||
NewFile(TimedMessage<ShardedFile>),
|
NewFile(TimedMessage<ShardedFile>),
|
||||||
/// Published to neighbors for file sync, and answered by `AnswerFile` RPC.
|
/// Published to neighbors for file sync, and answered by `AnswerFile` RPC.
|
||||||
AskFile(TimedMessage<ShardedFile>),
|
AskFile(TimedMessage<ShardedFile>),
|
||||||
FindFile(FindFile),
|
/// Published to network to find specified file.
|
||||||
FindChunks(FindChunks),
|
FindFile(TimedMessage<FindFile>),
|
||||||
|
/// Published to network to find specified chunks.
|
||||||
|
FindChunks(TimedMessage<FindChunks>),
|
||||||
|
/// Published to network to announce file.
|
||||||
AnnounceFile(Vec<SignedAnnounceFile>),
|
AnnounceFile(Vec<SignedAnnounceFile>),
|
||||||
|
/// Published to network to announce shard config.
|
||||||
AnnounceShardConfig(TimedMessage<ShardConfig>),
|
AnnounceShardConfig(TimedMessage<ShardConfig>),
|
||||||
AnnounceChunks(SignedAnnounceChunks),
|
/// Published to network to announce chunks.
|
||||||
|
AnnounceChunks(TimedMessage<AnnounceChunks>),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements the `DataTransform` trait of gossipsub to employ snappy compression
|
// Implements the `DataTransform` trait of gossipsub to employ snappy compression
|
||||||
@ -341,17 +336,19 @@ impl PubsubMessage {
|
|||||||
.map_err(|e| format!("{:?}", e))?,
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::FindFile => Ok(PubsubMessage::FindFile(
|
GossipKind::FindFile => Ok(PubsubMessage::FindFile(
|
||||||
FindFile::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
|
TimedMessage::<FindFile>::from_ssz_bytes(data)
|
||||||
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::FindChunks => Ok(PubsubMessage::FindChunks(
|
GossipKind::FindChunks => Ok(PubsubMessage::FindChunks(
|
||||||
FindChunks::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?,
|
TimedMessage::<FindChunks>::from_ssz_bytes(data)
|
||||||
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::AnnounceFile => Ok(PubsubMessage::AnnounceFile(
|
GossipKind::AnnounceFile => Ok(PubsubMessage::AnnounceFile(
|
||||||
SignedAnnounceFiles::from_ssz_bytes(data)
|
SignedAnnounceFiles::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?,
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::AnnounceChunks => Ok(PubsubMessage::AnnounceChunks(
|
GossipKind::AnnounceChunks => Ok(PubsubMessage::AnnounceChunks(
|
||||||
SignedAnnounceChunks::from_ssz_bytes(data)
|
TimedMessage::<AnnounceChunks>::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?,
|
.map_err(|e| format!("{:?}", e))?,
|
||||||
)),
|
)),
|
||||||
GossipKind::AnnounceShardConfig => Ok(PubsubMessage::AnnounceShardConfig(
|
GossipKind::AnnounceShardConfig => Ok(PubsubMessage::AnnounceShardConfig(
|
||||||
|
@ -10,10 +10,10 @@ pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy";
|
|||||||
pub const EXAMPLE_TOPIC: &str = "example";
|
pub const EXAMPLE_TOPIC: &str = "example";
|
||||||
pub const NEW_FILE_TOPIC: &str = "new_file_v2";
|
pub const NEW_FILE_TOPIC: &str = "new_file_v2";
|
||||||
pub const ASK_FILE_TOPIC: &str = "ask_file";
|
pub const ASK_FILE_TOPIC: &str = "ask_file";
|
||||||
pub const FIND_FILE_TOPIC: &str = "find_file";
|
pub const FIND_FILE_TOPIC: &str = "find_file_v2";
|
||||||
pub const FIND_CHUNKS_TOPIC: &str = "find_chunks";
|
pub const FIND_CHUNKS_TOPIC: &str = "find_chunks_v2";
|
||||||
pub const ANNOUNCE_FILE_TOPIC: &str = "announce_file";
|
pub const ANNOUNCE_FILE_TOPIC: &str = "announce_file_v2";
|
||||||
pub const ANNOUNCE_CHUNKS_TOPIC: &str = "announce_chunks";
|
pub const ANNOUNCE_CHUNKS_TOPIC: &str = "announce_chunks_v2";
|
||||||
pub const ANNOUNCE_SHARD_CONFIG_TOPIC: &str = "announce_shard_config_v2";
|
pub const ANNOUNCE_SHARD_CONFIG_TOPIC: &str = "announce_shard_config_v2";
|
||||||
|
|
||||||
/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
|
/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
|
||||||
|
@ -9,8 +9,8 @@ use network::types::TimedMessage;
|
|||||||
use network::{
|
use network::{
|
||||||
rpc::StatusMessage,
|
rpc::StatusMessage,
|
||||||
types::{
|
types::{
|
||||||
AnnounceChunks, AnnounceFile, FindChunks, FindFile, HasSignature, SignedAnnounceChunks,
|
AnnounceChunks, AnnounceFile, FindChunks, FindFile, HasSignature, SignedAnnounceFile,
|
||||||
SignedAnnounceFile, SignedMessage,
|
SignedMessage,
|
||||||
},
|
},
|
||||||
Keypair, MessageAcceptance, MessageId, NetworkGlobals, NetworkMessage, PeerId, PeerRequestId,
|
Keypair, MessageAcceptance, MessageId, NetworkGlobals, NetworkMessage, PeerId, PeerRequestId,
|
||||||
PublicKey, PubsubMessage, Request, RequestId, Response,
|
PublicKey, PubsubMessage, Request, RequestId, Response,
|
||||||
@ -375,10 +375,7 @@ impl Libp2pEventHandler {
|
|||||||
PubsubMessage::ExampleMessage(_) => MessageAcceptance::Ignore,
|
PubsubMessage::ExampleMessage(_) => MessageAcceptance::Ignore,
|
||||||
PubsubMessage::NewFile(msg) => self.on_new_file(propagation_source, msg).await,
|
PubsubMessage::NewFile(msg) => self.on_new_file(propagation_source, msg).await,
|
||||||
PubsubMessage::AskFile(msg) => self.on_ask_file(propagation_source, msg).await,
|
PubsubMessage::AskFile(msg) => self.on_ask_file(propagation_source, msg).await,
|
||||||
PubsubMessage::FindFile(msg) => {
|
PubsubMessage::FindFile(msg) => self.on_find_file(propagation_source, msg).await,
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE.mark(1);
|
|
||||||
self.on_find_file(propagation_source, msg).await
|
|
||||||
}
|
|
||||||
PubsubMessage::FindChunks(msg) => self.on_find_chunks(propagation_source, msg).await,
|
PubsubMessage::FindChunks(msg) => self.on_find_chunks(propagation_source, msg).await,
|
||||||
PubsubMessage::AnnounceFile(msgs) => {
|
PubsubMessage::AnnounceFile(msgs) => {
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE.mark(1);
|
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE.mark(1);
|
||||||
@ -573,12 +570,13 @@ impl Libp2pEventHandler {
|
|||||||
let timestamp = timestamp_now();
|
let timestamp = timestamp_now();
|
||||||
let shard_config = self.store.get_store().get_shard_config();
|
let shard_config = self.store.get_store().get_shard_config();
|
||||||
|
|
||||||
let msg = AnnounceFile {
|
let msg = TimedMessage {
|
||||||
tx_ids,
|
inner: AnnounceFile {
|
||||||
num_shard: shard_config.num_shard,
|
tx_ids,
|
||||||
shard_id: shard_config.shard_id,
|
shard_config: shard_config.into(),
|
||||||
peer_id: peer_id.into(),
|
peer_id: peer_id.into(),
|
||||||
at: addr.into(),
|
at: addr.into(),
|
||||||
|
},
|
||||||
timestamp,
|
timestamp,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -595,80 +593,44 @@ impl Libp2pEventHandler {
|
|||||||
Some(signed)
|
Some(signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn on_find_file(&self, from: PeerId, msg: FindFile) -> MessageAcceptance {
|
async fn on_find_file(&self, from: PeerId, msg: TimedMessage<FindFile>) -> MessageAcceptance {
|
||||||
let FindFile {
|
|
||||||
tx_id, timestamp, ..
|
|
||||||
} = msg;
|
|
||||||
|
|
||||||
// verify timestamp
|
// verify timestamp
|
||||||
let d = duration_since(
|
if !metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE.verify_timestamp(
|
||||||
timestamp,
|
from,
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY.clone(),
|
msg.timestamp,
|
||||||
);
|
*PUBSUB_TIMEOUT_NETWORK,
|
||||||
let timeout = if msg.neighbors_only {
|
None,
|
||||||
*PUBSUB_TIMEOUT_NEIGHBORS
|
) {
|
||||||
} else {
|
|
||||||
*PUBSUB_TIMEOUT_NETWORK
|
|
||||||
};
|
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > timeout {
|
|
||||||
debug!(%timestamp, ?d, "Invalid timestamp, ignoring FindFile message");
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_TIMEOUT.mark(1);
|
|
||||||
if msg.neighbors_only {
|
|
||||||
self.send_to_network(NetworkMessage::ReportPeer {
|
|
||||||
peer_id: from,
|
|
||||||
action: PeerAction::LowToleranceError,
|
|
||||||
source: ReportSource::Gossipsub,
|
|
||||||
msg: "Received out of date FindFile message",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify announced shard config
|
// verify announced shard config if specified
|
||||||
let announced_shard_config = match ShardConfig::new(msg.shard_id, msg.num_shard) {
|
if let Some(shard_config) = msg.maybe_shard_config {
|
||||||
Ok(v) => v,
|
let announced_shard_config = match ShardConfig::try_from(shard_config) {
|
||||||
Err(_) => return MessageAcceptance::Reject,
|
Ok(v) => v,
|
||||||
};
|
Err(_) => return MessageAcceptance::Reject,
|
||||||
|
|
||||||
// handle on shard config mismatch
|
|
||||||
let my_shard_config = self.store.get_store().get_shard_config();
|
|
||||||
if !my_shard_config.intersect(&announced_shard_config) {
|
|
||||||
return if msg.neighbors_only {
|
|
||||||
MessageAcceptance::Ignore
|
|
||||||
} else {
|
|
||||||
MessageAcceptance::Accept
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// forward FIND_FILE to the network if shard config mismatch
|
||||||
|
let my_shard_config = self.store.get_store().get_shard_config();
|
||||||
|
if !my_shard_config.intersect(&announced_shard_config) {
|
||||||
|
return MessageAcceptance::Accept;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if we have it
|
// check if we have it
|
||||||
|
let tx_id = msg.tx_id;
|
||||||
if matches!(self.store.check_tx_completed(tx_id.seq).await, Ok(true)) {
|
if matches!(self.store.check_tx_completed(tx_id.seq).await, Ok(true)) {
|
||||||
if let Ok(Some(tx)) = self.store.get_tx_by_seq_number(tx_id.seq).await {
|
if let Ok(Some(tx)) = self.store.get_tx_by_seq_number(tx_id.seq).await {
|
||||||
if tx.id() == tx_id {
|
if tx.id() == tx_id {
|
||||||
trace!(?tx_id, "Found file locally, responding to FindFile query");
|
trace!(?tx_id, "Found file locally, responding to FindFile query");
|
||||||
|
self.publish_file(tx_id).await;
|
||||||
if msg.neighbors_only {
|
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE.mark(1);
|
||||||
// announce file via RPC to avoid flooding pubsub message
|
return MessageAcceptance::Ignore;
|
||||||
self.send_to_network(NetworkMessage::SendRequest {
|
|
||||||
peer_id: from,
|
|
||||||
request: Request::AnswerFile(ShardedFile {
|
|
||||||
tx_id,
|
|
||||||
shard_config: my_shard_config.into(),
|
|
||||||
}),
|
|
||||||
request_id: RequestId::Router(Instant::now()),
|
|
||||||
});
|
|
||||||
} else if self.publish_file(tx_id).await.is_some() {
|
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE.mark(1);
|
|
||||||
return MessageAcceptance::Ignore;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not forward to whole network if only find file from neighbor nodes
|
|
||||||
if msg.neighbors_only {
|
|
||||||
return MessageAcceptance::Ignore;
|
|
||||||
}
|
|
||||||
|
|
||||||
// try from cache
|
// try from cache
|
||||||
if let Some(mut msg) = self.file_location_cache.get_one(tx_id) {
|
if let Some(mut msg) = self.file_location_cache.get_one(tx_id) {
|
||||||
trace!(?tx_id, "Found file in cache, responding to FindFile query");
|
trace!(?tx_id, "Found file in cache, responding to FindFile query");
|
||||||
@ -694,7 +656,6 @@ impl Libp2pEventHandler {
|
|||||||
) -> Option<PubsubMessage> {
|
) -> Option<PubsubMessage> {
|
||||||
let peer_id = *self.network_globals.peer_id.read();
|
let peer_id = *self.network_globals.peer_id.read();
|
||||||
let addr = self.construct_announced_ip().await?;
|
let addr = self.construct_announced_ip().await?;
|
||||||
let timestamp = timestamp_now();
|
|
||||||
|
|
||||||
let msg = AnnounceChunks {
|
let msg = AnnounceChunks {
|
||||||
tx_id,
|
tx_id,
|
||||||
@ -702,26 +663,15 @@ impl Libp2pEventHandler {
|
|||||||
index_end,
|
index_end,
|
||||||
peer_id: peer_id.into(),
|
peer_id: peer_id.into(),
|
||||||
at: addr.into(),
|
at: addr.into(),
|
||||||
timestamp,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut signed = match SignedMessage::sign_message(msg, &self.local_keypair) {
|
Some(PubsubMessage::AnnounceChunks(msg.into()))
|
||||||
Ok(signed) => signed,
|
|
||||||
Err(e) => {
|
|
||||||
error!(%tx_id.seq, %e, "Failed to sign AnnounceChunks message");
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
signed.resend_timestamp = timestamp;
|
|
||||||
|
|
||||||
Some(PubsubMessage::AnnounceChunks(signed))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn on_find_chunks(
|
async fn on_find_chunks(
|
||||||
&self,
|
&self,
|
||||||
propagation_source: PeerId,
|
propagation_source: PeerId,
|
||||||
msg: FindChunks,
|
msg: TimedMessage<FindChunks>,
|
||||||
) -> MessageAcceptance {
|
) -> MessageAcceptance {
|
||||||
// verify timestamp
|
// verify timestamp
|
||||||
if !metrics::LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS.verify_timestamp(
|
if !metrics::LIBP2P_HANDLE_PUBSUB_FIND_CHUNKS.verify_timestamp(
|
||||||
@ -851,7 +801,7 @@ impl Libp2pEventHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// verify announced shard config
|
// verify announced shard config
|
||||||
let announced_shard_config = match ShardConfig::new(msg.shard_id, msg.num_shard) {
|
let announced_shard_config = match ShardConfig::try_from(msg.shard_config) {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(_) => return MessageAcceptance::Reject,
|
Err(_) => return MessageAcceptance::Reject,
|
||||||
};
|
};
|
||||||
@ -862,7 +812,7 @@ impl Libp2pEventHandler {
|
|||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY.clone(),
|
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY.clone(),
|
||||||
);
|
);
|
||||||
if d < TOLERABLE_DRIFT.neg() || d > *PUBSUB_TIMEOUT_NETWORK {
|
if d < TOLERABLE_DRIFT.neg() || d > *PUBSUB_TIMEOUT_NETWORK {
|
||||||
debug!(%msg.resend_timestamp, ?d, "Invalid resend timestamp, ignoring AnnounceFile message");
|
debug!(?d, %propagation_source, "Invalid resend timestamp, ignoring AnnounceFile message");
|
||||||
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT.mark(1);
|
metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT.mark(1);
|
||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
@ -922,7 +872,7 @@ impl Libp2pEventHandler {
|
|||||||
fn on_announce_chunks(
|
fn on_announce_chunks(
|
||||||
&self,
|
&self,
|
||||||
propagation_source: PeerId,
|
propagation_source: PeerId,
|
||||||
msg: SignedAnnounceChunks,
|
msg: TimedMessage<AnnounceChunks>,
|
||||||
) -> MessageAcceptance {
|
) -> MessageAcceptance {
|
||||||
// verify timestamp
|
// verify timestamp
|
||||||
if !metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS.verify_timestamp(
|
if !metrics::LIBP2P_HANDLE_PUBSUB_ANNOUNCE_CHUNKS.verify_timestamp(
|
||||||
@ -934,11 +884,6 @@ impl Libp2pEventHandler {
|
|||||||
return MessageAcceptance::Ignore;
|
return MessageAcceptance::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify message signature
|
|
||||||
if !verify_signature(&msg, &msg.peer_id, propagation_source) {
|
|
||||||
return MessageAcceptance::Reject;
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify public ip address if required
|
// verify public ip address if required
|
||||||
let addr = msg.at.clone().into();
|
let addr = msg.at.clone().into();
|
||||||
if !self.config.private_ip_enabled && !Self::contains_public_ip(&addr) {
|
if !self.config.private_ip_enabled && !Self::contains_public_ip(&addr) {
|
||||||
@ -1009,23 +954,17 @@ impl Libp2pEventHandler {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_file(&self, tx_id: TxID) -> Option<bool> {
|
async fn publish_file(&self, tx_id: TxID) {
|
||||||
match self.file_batcher.write().await.add(tx_id) {
|
if let Some(batch) = self.file_batcher.write().await.add(tx_id) {
|
||||||
Some(batch) => {
|
if let Some(announcement) = self.construct_announce_file_message(batch).await {
|
||||||
let announcement = self.construct_announce_file_message(batch).await?;
|
self.publish_announcement(announcement).await;
|
||||||
Some(self.publish_announcement(announcement).await)
|
|
||||||
}
|
}
|
||||||
None => Some(false),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn publish_announcement(&self, announcement: SignedAnnounceFile) -> bool {
|
async fn publish_announcement(&self, announcement: SignedAnnounceFile) {
|
||||||
match self.announcement_batcher.write().await.add(announcement) {
|
if let Some(batch) = self.announcement_batcher.write().await.add(announcement) {
|
||||||
Some(batch) => {
|
self.publish(PubsubMessage::AnnounceFile(batch));
|
||||||
self.publish(PubsubMessage::AnnounceFile(batch));
|
|
||||||
true
|
|
||||||
}
|
|
||||||
None => false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1378,11 +1317,11 @@ mod tests {
|
|||||||
) -> MessageAcceptance {
|
) -> MessageAcceptance {
|
||||||
let (alice, bob) = (PeerId::random(), PeerId::random());
|
let (alice, bob) = (PeerId::random(), PeerId::random());
|
||||||
let id = MessageId::new(b"dummy message");
|
let id = MessageId::new(b"dummy message");
|
||||||
let message = PubsubMessage::FindFile(FindFile {
|
let message = PubsubMessage::FindFile(TimedMessage {
|
||||||
tx_id,
|
inner: FindFile {
|
||||||
num_shard: 1,
|
tx_id,
|
||||||
shard_id: 0,
|
maybe_shard_config: None,
|
||||||
neighbors_only: false,
|
},
|
||||||
timestamp,
|
timestamp,
|
||||||
});
|
});
|
||||||
handler.on_pubsub_message(alice, bob, &id, message).await
|
handler.on_pubsub_message(alice, bob, &id, message).await
|
||||||
@ -1471,7 +1410,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let malicious_addr: Multiaddr = "/ip4/127.0.0.38/tcp/30000".parse().unwrap();
|
let malicious_addr: Multiaddr = "/ip4/127.0.0.38/tcp/30000".parse().unwrap();
|
||||||
file.inner.at = malicious_addr.into();
|
file.inner.inner.at = malicious_addr.into();
|
||||||
let message = PubsubMessage::AnnounceFile(vec![file]);
|
let message = PubsubMessage::AnnounceFile(vec![file]);
|
||||||
|
|
||||||
// failed to verify signature
|
// failed to verify signature
|
||||||
|
@ -74,12 +74,11 @@ lazy_static::lazy_static! {
|
|||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("announce_shard");
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_SHARD: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("announce_shard");
|
||||||
|
|
||||||
// libp2p_event_handler: find & announce file
|
// libp2p_event_handler: find & announce file
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "qps");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE: PubsubMsgHandleMetrics = PubsubMsgHandleMetrics::new("find_file");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_find_file", "latency", 1024);
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "timeout");
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "store");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_STORE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "store");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_CACHE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "cache");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_CACHE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "cache");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_FORWARD: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "forward");
|
pub static ref LIBP2P_HANDLE_PUBSUB_FIND_FILE_FORWARD: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_find_file", "forward");
|
||||||
|
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "qps");
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "qps");
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_announce_file", "latency", 1024);
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_LATENCY: Arc<dyn Histogram> = Sample::ExpDecay(0.015).register_with_group("router_libp2p_handle_pubsub_announce_file", "latency", 1024);
|
||||||
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "timeout");
|
pub static ref LIBP2P_HANDLE_PUBSUB_ANNOUNCE_FILE_TIMEOUT: Arc<dyn Meter> = register_meter_with_group("router_libp2p_handle_pubsub_announce_file", "timeout");
|
||||||
|
@ -409,6 +409,15 @@ pub struct ShardConfig {
|
|||||||
pub shard_id: usize,
|
pub shard_id: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for ShardConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
ShardConfig {
|
||||||
|
num_shard: 1,
|
||||||
|
shard_id: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, DeriveEncode, DeriveDecode)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, DeriveEncode, DeriveDecode)]
|
||||||
pub struct ShardedFile {
|
pub struct ShardedFile {
|
||||||
pub tx_id: TxID,
|
pub tx_id: TxID,
|
||||||
|
@ -10,7 +10,7 @@ use network::{
|
|||||||
PeerAction, PeerId, PubsubMessage, SyncId as RequestId,
|
PeerAction, PeerId, PubsubMessage, SyncId as RequestId,
|
||||||
};
|
};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use shared_types::{timestamp_now, ChunkArrayWithProof, ShardedFile, TxID, CHUNK_SIZE};
|
use shared_types::{ChunkArrayWithProof, ShardedFile, TxID, CHUNK_SIZE};
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use std::{sync::Arc, time::Instant};
|
use std::{sync::Arc, time::Instant};
|
||||||
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
|
use storage::log_store::log_manager::{sector_to_segment, segment_to_sector, PORA_CHUNK_SIZE};
|
||||||
@ -220,25 +220,27 @@ impl SerialSyncController {
|
|||||||
.into(),
|
.into(),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
PubsubMessage::FindFile(FindFile {
|
PubsubMessage::FindFile(
|
||||||
tx_id: self.tx_id,
|
FindFile {
|
||||||
num_shard: shard_config.num_shard,
|
tx_id: self.tx_id,
|
||||||
shard_id: shard_config.shard_id,
|
maybe_shard_config: Some(shard_config.into()),
|
||||||
neighbors_only: self.config.neighbors_only,
|
}
|
||||||
timestamp: timestamp_now(),
|
.into(),
|
||||||
})
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
self.ctx.publish(msg);
|
self.ctx.publish(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn publish_find_chunks(&self) {
|
fn publish_find_chunks(&self) {
|
||||||
self.ctx.publish(PubsubMessage::FindChunks(FindChunks {
|
self.ctx.publish(PubsubMessage::FindChunks(
|
||||||
tx_id: self.tx_id,
|
FindChunks {
|
||||||
index_start: self.goal.index_start,
|
tx_id: self.tx_id,
|
||||||
index_end: self.goal.index_end,
|
index_start: self.goal.index_start,
|
||||||
timestamp: timestamp_now(),
|
index_end: self.goal.index_end,
|
||||||
}));
|
}
|
||||||
|
.into(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dial to peers in `Found` state, so that `Connecting` or `Connected` peers cover
|
/// Dial to peers in `Found` state, so that `Connecting` or `Connected` peers cover
|
||||||
|
@ -13,9 +13,7 @@ use network::{
|
|||||||
rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, NetworkSender,
|
rpc::GetChunksRequest, rpc::RPCResponseErrorCode, Multiaddr, NetworkMessage, NetworkSender,
|
||||||
PeerId, PeerRequestId, PubsubMessage, SyncId as RequestId,
|
PeerId, PeerRequestId, PubsubMessage, SyncId as RequestId,
|
||||||
};
|
};
|
||||||
use shared_types::{
|
use shared_types::{bytes_to_chunks, ChunkArrayWithProof, ShardedFile, Transaction, TxID};
|
||||||
bytes_to_chunks, timestamp_now, ChunkArrayWithProof, ShardedFile, Transaction, TxID,
|
|
||||||
};
|
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::{
|
use std::{
|
||||||
cmp,
|
cmp,
|
||||||
@ -580,9 +578,7 @@ impl SyncService {
|
|||||||
|
|
||||||
async fn on_find_file(&mut self, tx_seq: u64) -> Result<()> {
|
async fn on_find_file(&mut self, tx_seq: u64) -> Result<()> {
|
||||||
// file already exists
|
// file already exists
|
||||||
if self.store.check_tx_completed(tx_seq).await?
|
if self.store.get_store().get_tx_status(tx_seq)?.is_some() {
|
||||||
|| self.store.check_tx_pruned(tx_seq).await?
|
|
||||||
{
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
// broadcast find file
|
// broadcast find file
|
||||||
@ -590,14 +586,13 @@ impl SyncService {
|
|||||||
Some(tx) => tx,
|
Some(tx) => tx,
|
||||||
None => bail!("Transaction not found"),
|
None => bail!("Transaction not found"),
|
||||||
};
|
};
|
||||||
let shard_config = self.store.get_store().get_shard_config();
|
self.ctx.publish(PubsubMessage::FindFile(
|
||||||
self.ctx.publish(PubsubMessage::FindFile(FindFile {
|
FindFile {
|
||||||
tx_id: tx.id(),
|
tx_id: tx.id(),
|
||||||
num_shard: shard_config.num_shard,
|
maybe_shard_config: None,
|
||||||
shard_id: shard_config.shard_id,
|
}
|
||||||
neighbors_only: false,
|
.into(),
|
||||||
timestamp: timestamp_now(),
|
));
|
||||||
}));
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -646,10 +641,8 @@ impl SyncService {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// file already exists
|
// file already exists
|
||||||
if self.store.check_tx_completed(tx_seq).await?
|
if let Some(status) = self.store.get_store().get_tx_status(tx_seq)? {
|
||||||
|| self.store.check_tx_pruned(tx_seq).await?
|
bail!("File already exists [{:?}]", status);
|
||||||
{
|
|
||||||
bail!("File already exists");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (index_start, index_end, all_chunks) = match maybe_range {
|
let (index_start, index_end, all_chunks) = match maybe_range {
|
||||||
@ -724,21 +717,12 @@ impl SyncService {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// File already exists and ignore the AnnounceFile message
|
// File already exists or pruned, just ignore the AnnounceFile message
|
||||||
match self.store.check_tx_completed(tx_seq).await {
|
match self.store.get_store().get_tx_status(tx_seq) {
|
||||||
Ok(true) => return,
|
Ok(Some(_)) => return,
|
||||||
Ok(false) => {}
|
Ok(None) => {}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(%tx_seq, %err, "Failed to check if file finalized");
|
error!(%tx_seq, %err, "Failed to get tx status");
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match self.store.check_tx_pruned(tx_seq).await {
|
|
||||||
Ok(true) => return,
|
|
||||||
Ok(false) => {}
|
|
||||||
Err(err) => {
|
|
||||||
error!(%tx_seq, %err, "Failed to check if file pruned");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user