mirror of
https://github.com/0glabs/0g-storage-node.git
synced 2025-04-04 15:35:18 +00:00
Compare commits
7 Commits
ebe3aec901
...
84988ea561
Author | SHA1 | Date | |
---|---|---|---|
![]() |
84988ea561 | ||
![]() |
d43a616b56 | ||
![]() |
6ace0f7040 | ||
![]() |
5f08e3ed41 | ||
![]() |
95f897d355 | ||
![]() |
0f47e44bf7 | ||
![]() |
1f47fdca10 |
BIN
.gitbook/assets/zg-proof-of-random-access.png
Normal file
BIN
.gitbook/assets/zg-proof-of-random-access.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 591 KiB |
21
README.md
21
README.md
@ -1,8 +1,8 @@
|
||||
# 0G Storage
|
||||
# 0G Storage: Decentralized AI-Optimized Storage
|
||||
|
||||
## Overview
|
||||
## **Overview**
|
||||
|
||||
0G Storage is a decentralized data storage system designed to address the challenges of high-throughput and low-latency data storage and retrieval, particularly for areas such as AI.
|
||||
0G Storage is a decentralized storage system designed for massive data workloads, particularly AI and Web3 applications. Unlike traditional centralized storage, 0G distributes data across a network, improving security, availability, and scalability.
|
||||
|
||||
## System Architecture
|
||||
|
||||
@ -11,10 +11,16 @@
|
||||
1. **Data Publishing Lane**: Ensures fast Merkle tree data root commitment and verification through 0G Chain.
|
||||
2. **Data Storage Lane**: Manages large data transfers and storage using an erasure-coding mechanism for redundancy and sharding for parallel processing.
|
||||
|
||||
Across the two lanes, 0G Storage supports the following features:
|
||||
The system is powered by **Proof of Random Access (PoRA)**, a consensus mechanism that incentivizes miners to store and verify data efficiently.
|
||||
|
||||
* **General Purpose Design**: Supports atomic transactions, mutable key-value stores, and archive log systems, enabling a wide range of applications with various data types.
|
||||
* **Validated Incentivization**: Utilizes the PoRA (Proof of Random Access) mining algorithm to mitigate the data outsourcing issue and to ensure rewards are distributed to nodes who contribute to the storage network.
|
||||
## **Key Features**
|
||||
- **Layered Storage Architecture:**
|
||||
- **Log Layer:** Stores unstructured, append-only data for archival use.
|
||||
- **Key-Value Layer:** Enables fast, structured data retrieval for dynamic applications.
|
||||
- **Decentralized Incentives:** Storage nodes earn rewards in 0G tokens by participating in the network.
|
||||
- **Scalability:** Optimized for AI workloads, with seamless integration into decentralized AI ecosystems.
|
||||
|
||||

|
||||
|
||||
For in-depth technical details about 0G Storage, please read our [Intro to 0G Storage](https://docs.0g.ai/0g-storage).
|
||||
|
||||
@ -25,8 +31,9 @@ For in-depth technical details about 0G Storage, please read our [Intro to 0G St
|
||||
- If you want to build a project using 0G storage, please refer to the [0G Storage SDK](https://docs.0g.ai/build-with-0g/storage-sdk) guide.
|
||||
|
||||
## Support and Additional Resources
|
||||
We want to do everything we can to help you be successful while working on your contribution and projects. Here you'll find various resources and communities that may help you complete a project or contribute to 0G.
|
||||
We want to do everything we can to help you be successful while working on your contribution and projects. Here, you'll find various resources and communities that may help you complete a project or contribute to 0G.
|
||||
|
||||
### Communities
|
||||
- [0G Telegram](https://t.me/web3_0glabs)
|
||||
- [0G Discord](https://discord.com/invite/0glabs)
|
||||
- [OG X](https://x.com/0G_labs)
|
||||
|
@ -276,7 +276,7 @@ impl Pruner {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> {
|
||||
pub async fn get_shard_config(store: &Store) -> Result<Option<ShardConfig>> {
|
||||
store
|
||||
.get_config_decoded(&SHARD_CONFIG_KEY, DATA_DB_KEY)
|
||||
.await
|
||||
|
@ -7,7 +7,7 @@ use network::{
|
||||
self, new_network_channel, Keypair, NetworkConfig, NetworkGlobals, NetworkReceiver,
|
||||
NetworkSender, RequestId, Service as LibP2PService,
|
||||
};
|
||||
use pruner::{Pruner, PrunerConfig, PrunerMessage};
|
||||
use pruner::{get_shard_config, Pruner, PrunerConfig, PrunerMessage};
|
||||
use router::RouterService;
|
||||
use rpc::RPCConfig;
|
||||
use std::sync::Arc;
|
||||
@ -203,7 +203,7 @@ impl ClientBuilder {
|
||||
if let Some(config) = config {
|
||||
let executor = require!("miner", self, runtime_context).clone().executor;
|
||||
let network_send = require!("miner", self, network).send.clone();
|
||||
let store = self.async_store.as_ref().unwrap().clone();
|
||||
let store = require!("miner", self, async_store).clone();
|
||||
|
||||
let send = MineService::spawn(executor, network_send, config, store).await?;
|
||||
self.miner = Some(MinerComponents { send });
|
||||
@ -225,7 +225,11 @@ impl ClientBuilder {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
pub async fn with_shard(self, config: ShardConfig) -> Result<Self, String> {
|
||||
pub async fn with_shard(self, mut config: ShardConfig) -> Result<Self, String> {
|
||||
let store = require!("shard", self, async_store).clone();
|
||||
if let Some(stored_config) = get_shard_config(store.as_ref()).await.unwrap_or(None) {
|
||||
config = stored_config;
|
||||
}
|
||||
self.async_store
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
|
@ -23,6 +23,8 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
|
||||
ClientBuilder::default()
|
||||
.with_runtime_context(context)
|
||||
.with_rocksdb_store(&storage_config)?
|
||||
.with_shard(shard_config)
|
||||
.await?
|
||||
.with_log_sync(log_sync_config)
|
||||
.await?
|
||||
.with_file_location_cache(config.file_location_cache)
|
||||
@ -34,8 +36,6 @@ async fn start_node(context: RuntimeContext, config: ZgsConfig) -> Result<Client
|
||||
.await?
|
||||
.with_miner(miner_config)
|
||||
.await?
|
||||
.with_shard(shard_config)
|
||||
.await?
|
||||
.with_pruner(pruner_config)
|
||||
.await?
|
||||
.with_rpc(config.rpc)
|
||||
|
@ -1157,6 +1157,7 @@ impl LogManager {
|
||||
.get_tx_by_seq_number(from_tx_seq)?
|
||||
.ok_or_else(|| anyhow!("from tx missing"))?;
|
||||
let mut to_tx_offset_list = Vec::with_capacity(to_tx_seq_list.len());
|
||||
|
||||
for seq in to_tx_seq_list {
|
||||
// No need to copy data for completed tx.
|
||||
if self.check_tx_completed(seq)? {
|
||||
|
Loading…
Reference in New Issue
Block a user