* v1.4.20 base

* add inmemory dev mock for hypergraph

* add simple rdf + tr

* Update config.go (#234)

2 of bootstrap nodes are going to be closed due to low performances. Will consider to replace with better specs.

* go mod tidy

* go mod tidy

* bump name in readme

---------

Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
This commit is contained in:
Cassandra Heart 2024-06-21 12:46:36 -05:00 committed by GitHub
parent 35561a9e41
commit 6c567a04c1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 3134 additions and 234 deletions

View File

@ -1,4 +1,4 @@
# Quilibrium - Betelgeuse # Quilibrium - Solstice
Quilibrium is a decentralized alternative to platform as a service providers. Quilibrium is a decentralized alternative to platform as a service providers.
This release is part of the phases of the Dusk release, which finalizes with This release is part of the phases of the Dusk release, which finalizes with

View File

@ -35,7 +35,7 @@ require (
github.com/cloudflare/circl v1.3.8 github.com/cloudflare/circl v1.3.8
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-libp2p v0.35.1
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/shopspring/decimal v1.4.0 github.com/shopspring/decimal v1.4.0
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0

View File

@ -21,8 +21,8 @@ import (
) )
const ( const (
// BlossomSubID_v12 is the protocol ID for version 1.2.0 of the BlossomSub protocol. // BlossomSubID_v12 is the protocol ID for version 1.2.1 of the BlossomSub protocol.
BlossomSubID_v12 = protocol.ID("/blossomsub/1.2.0") BlossomSubID_v12 = protocol.ID("/blossomsub/1.2.1")
) )
// Defines the default BlossomSub parameters. // Defines the default BlossomSub parameters.
@ -52,7 +52,7 @@ var (
BlossomSubOpportunisticGraftPeers = 2 BlossomSubOpportunisticGraftPeers = 2
BlossomSubGraftFloodThreshold = 10 * time.Second BlossomSubGraftFloodThreshold = 10 * time.Second
BlossomSubMaxIHaveLength = 5000 BlossomSubMaxIHaveLength = 5000
BlossomSubMaxIHaveMessages = 100 BlossomSubMaxIHaveMessages = 10
BlossomSubIWantFollowupTime = 3 * time.Second BlossomSubIWantFollowupTime = 3 * time.Second
) )

View File

@ -1,5 +1,2 @@
// Deprecated: The database-backed peerstore will be removed from go-libp2p in the future. // libp2p deprecated this, we disagree
// Use the memory peerstore (pstoremem) instead.
// For more details see https://github.com/libp2p/go-libp2p/issues/2329
// and https://github.com/libp2p/go-libp2p/issues/2355.
package pstoreds package pstoreds

View File

@ -725,11 +725,11 @@ func logoVersion(width int) string {
out += " ''---.. ...---'' ##\n" out += " ''---.. ...---'' ##\n"
out += " ''----------''\n" out += " ''----------''\n"
out += " \n" out += " \n"
out += " Quilibrium Node - v" + config.GetVersionString() + " Betelgeuse\n" out += " Quilibrium Node - v" + config.GetVersionString() + " Solstice\n"
out += " \n" out += " \n"
out += " DB Console\n" out += " DB Console\n"
} else { } else {
out = "Quilibrium Node - v" + config.GetVersionString() + " Betelgeuse - DB Console\n" out = "Quilibrium Node - v" + config.GetVersionString() + " Solstice - DB Console\n"
} }
return out return out
} }

View File

@ -89,7 +89,7 @@ func (n *Node) VerifyProofIntegrity() {
} }
if !v { if !v {
panic("bad kzg proof") panic(fmt.Sprintf("bad kzg proof at increment %d", i))
} }
wp := []byte{} wp := []byte{}
wp = append(wp, n.pubSub.GetPeerID()...) wp = append(wp, n.pubSub.GetPeerID()...)
@ -97,7 +97,7 @@ func (n *Node) VerifyProofIntegrity() {
fmt.Printf("%x\n", wp) fmt.Printf("%x\n", wp)
v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof) v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof)
if !v { if !v {
panic("bad weso proof") panic(fmt.Sprintf("bad weso proof at increment %d", i))
} }
} }
} }

View File

@ -56,9 +56,11 @@ var storeSet = wire.NewSet(
store.NewPebbleClockStore, store.NewPebbleClockStore,
store.NewPebbleKeyStore, store.NewPebbleKeyStore,
store.NewPebbleDataProofStore, store.NewPebbleDataProofStore,
store.NewPeerstoreDatastore,
wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)),
wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)),
wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)),
wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)),
) )
var pubSubSet = wire.NewSet( var pubSubSet = wire.NewSet(
@ -89,6 +91,7 @@ var consensusSet = wire.NewSet(
func NewDHTNode(*config.Config) (*DHTNode, error) { func NewDHTNode(*config.Config) (*DHTNode, error) {
panic(wire.Build( panic(wire.Build(
debugLoggerSet, debugLoggerSet,
storeSet,
pubSubSet, pubSubSet,
newDHTNode, newDHTNode,
)) ))

View File

@ -24,8 +24,14 @@ import (
func NewDHTNode(configConfig *config.Config) (*DHTNode, error) { func NewDHTNode(configConfig *config.Config) (*DHTNode, error) {
p2PConfig := configConfig.P2P p2PConfig := configConfig.P2P
dbConfig := configConfig.DB
pebbleDB := store.NewPebbleDB(dbConfig)
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
if err != nil {
return nil, err
}
zapLogger := debugLogger() zapLogger := debugLogger()
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger) blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
dhtNode, err := newDHTNode(blossomSub) dhtNode, err := newDHTNode(blossomSub)
if err != nil { if err != nil {
return nil, err return nil, err
@ -42,7 +48,11 @@ func NewDebugNode(configConfig *config.Config, selfTestReport *protobufs.SelfTes
keyConfig := configConfig.Key keyConfig := configConfig.Key
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger) fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
p2PConfig := configConfig.P2P p2PConfig := configConfig.P2P
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger) peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
if err != nil {
return nil, err
}
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
engineConfig := configConfig.Engine engineConfig := configConfig.Engine
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger) kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger) wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
@ -65,7 +75,11 @@ func NewNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestRepo
keyConfig := configConfig.Key keyConfig := configConfig.Key
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger) fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
p2PConfig := configConfig.P2P p2PConfig := configConfig.P2P
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger) peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
if err != nil {
return nil, err
}
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
engineConfig := configConfig.Engine engineConfig := configConfig.Engine
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger) kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger) wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
@ -125,7 +139,7 @@ var debugLoggerSet = wire.NewSet(
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager))) var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore))) var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, store.NewPeerstoreDatastore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)), wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)))
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager))) var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)))

View File

@ -61,8 +61,6 @@ var BootstrapPeers = []string{
"/ip4/65.108.194.84/udp/8336/quic-v1/p2p/QmP8C7g9ZRiWzhqN2AgFu5onS6HwHzR6Vv1TCHxAhnCSnq", "/ip4/65.108.194.84/udp/8336/quic-v1/p2p/QmP8C7g9ZRiWzhqN2AgFu5onS6HwHzR6Vv1TCHxAhnCSnq",
"/dns/quil.dfcnodes.eu/udp/8336/quic-v1/p2p/QmQaFmbYVrKSwoen5UQdaqyDq4QhXfSSLDVnYpYD4SF9tX", "/dns/quil.dfcnodes.eu/udp/8336/quic-v1/p2p/QmQaFmbYVrKSwoen5UQdaqyDq4QhXfSSLDVnYpYD4SF9tX",
"/ip4/87.98.167.207/udp/8336/quic-v1/p2p/QmafiAXLu1JWktyfzDtD67i78GRBYCfQ4doTfq7pp7wfQ1", "/ip4/87.98.167.207/udp/8336/quic-v1/p2p/QmafiAXLu1JWktyfzDtD67i78GRBYCfQ4doTfq7pp7wfQ1",
"/ip4/216.244.76.122/udp/8336/quic-v1/p2p/QmUSbMytVBUYiiGE266aZHrHrP17vLx5UJFd7o74HkDoaV",
"/ip4/216.244.79.194/udp/8336/quic-v1/p2p/QmQn3bWk5aqaNSv9dwPjBg4qdeGBGNEB72tvuhgEc64Ki5",
// purged peers (keep your node online to return to this list) // purged peers (keep your node online to return to this list)
// "/ip4/204.186.74.47/udp/8317/quic-v1/p2p/Qmd233pLUDvcDW3ama27usfbG1HxKNh1V9dmWVW1SXp1pd", // "/ip4/204.186.74.47/udp/8317/quic-v1/p2p/Qmd233pLUDvcDW3ama27usfbG1HxKNh1V9dmWVW1SXp1pd",
// "/ip4/186.233.184.181/udp/8336/quic-v1/p2p/QmW6QDvKuYqJYYMP5tMZSp12X3nexywK28tZNgqtqNpEDL", // "/ip4/186.233.184.181/udp/8336/quic-v1/p2p/QmW6QDvKuYqJYYMP5tMZSp12X3nexywK28tZNgqtqNpEDL",

View File

@ -14,7 +14,7 @@ func GetMinimumVersion() []byte {
} }
func GetVersion() []byte { func GetVersion() []byte {
return []byte{0x01, 0x04, 0x13} return []byte{0x01, 0x04, 0x14}
} }
func GetVersionString() string { func GetVersionString() string {
@ -22,12 +22,19 @@ func GetVersionString() string {
} }
func FormatVersion(version []byte) string { func FormatVersion(version []byte) string {
if len(version) == 3 {
return fmt.Sprintf( return fmt.Sprintf(
"%d.%d.%d", "%d.%d.%d",
version[0], version[1], version[2], version[0], version[1], version[2],
) )
} else {
return fmt.Sprintf(
"%d.%d.%d-p%d",
version[0], version[1], version[2], version[3],
)
}
} }
func GetPatchNumber() byte { func GetPatchNumber() byte {
return 0x01 return 0x00
} }

View File

@ -37,14 +37,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
} }
switch any.TypeUrl { switch any.TypeUrl {
case protobufs.ClockFrameType:
if err := e.handleClockFrameData(
message.From,
any,
); err != nil {
return errors.Wrap(err, "handle message")
}
return nil
case protobufs.SelfTestReportType: case protobufs.SelfTestReportType:
if err := e.handleSelfTestReport( if err := e.handleSelfTestReport(
message.From, message.From,
@ -58,60 +50,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
return errors.Wrap(errors.New("invalid message"), "handle message") return errors.Wrap(errors.New("invalid message"), "handle message")
} }
func (e *MasterClockConsensusEngine) handleClockFrameData(
peerID []byte,
any *anypb.Any,
) error {
frame := &protobufs.ClockFrame{}
if err := any.UnmarshalTo(frame); err != nil {
return errors.Wrap(err, "handle clock frame data")
}
head, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
if frame.FrameNumber < head.FrameNumber {
return nil
}
if e.difficulty != frame.Difficulty {
e.logger.Debug(
"frame difficulty mismatched",
zap.Uint32("difficulty", frame.Difficulty),
)
return errors.Wrap(
errors.New("frame difficulty"),
"handle clock frame data",
)
}
e.logger.Debug(
"got clock frame",
zap.Binary("sender", peerID),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
)
go func() {
select {
case e.frameValidationCh <- frame:
default:
e.logger.Debug(
"dropped frame due to overwhelmed queue",
zap.Binary("sender", peerID),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
)
}
}()
return nil
}
func (e *MasterClockConsensusEngine) handleSelfTestReport( func (e *MasterClockConsensusEngine) handleSelfTestReport(
peerID []byte, peerID []byte,
any *anypb.Any, any *anypb.Any,
@ -252,7 +190,6 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport(
return nil return nil
} }
// This does not publish any longer, frames strictly are picked up from sync
func (e *MasterClockConsensusEngine) publishProof( func (e *MasterClockConsensusEngine) publishProof(
frame *protobufs.ClockFrame, frame *protobufs.ClockFrame,
) error { ) error {

View File

@ -1,12 +1,9 @@
package master package master
import ( import (
"context"
"time" "time"
"github.com/mr-tron/base58"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/consensus" "source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/p2p" "source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs" "source.quilibrium.com/quilibrium/monorepo/node/protobufs"
@ -65,87 +62,10 @@ func (e *MasterClockConsensusEngine) GetMostAheadPeers() (
func (e *MasterClockConsensusEngine) collect( func (e *MasterClockConsensusEngine) collect(
currentFramePublished *protobufs.ClockFrame, currentFramePublished *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) { ) (*protobufs.ClockFrame, error) {
e.logger.Debug("collecting vdf proofs")
latest, err := e.masterTimeReel.Head() latest, err := e.masterTimeReel.Head()
if err != nil { if err != nil {
panic(err) panic(err)
} }
// With the increase of network size, constrain down to top thirty
peers, err := e.GetMostAheadPeers()
if err != nil {
return latest, nil
}
for i := 0; i < len(peers); i++ {
peer := peers[i]
e.logger.Info("setting syncing target", zap.Binary("peer_id", peer))
cc, err := e.pubSub.GetDirectChannel(peer, "validation")
if err != nil {
e.logger.Error(
"could not connect for sync",
zap.String("peer_id", base58.Encode(peer)),
)
continue
}
client := protobufs.NewValidationServiceClient(cc)
syncClient, err := client.Sync(
context.Background(),
&protobufs.SyncRequest{
FramesRequest: &protobufs.ClockFramesRequest{
Filter: e.filter,
FromFrameNumber: latest.FrameNumber,
ToFrameNumber: 0,
},
},
)
if err != nil {
cc.Close()
continue
}
for msg, err := syncClient.Recv(); msg != nil &&
err == nil; msg, err = syncClient.Recv() {
if msg.FramesResponse == nil {
break
}
for _, frame := range msg.FramesResponse.ClockFrames {
frame := frame
if frame.FrameNumber < latest.FrameNumber {
continue
}
if e.difficulty != frame.Difficulty {
e.logger.Debug(
"frame difficulty mismatched",
zap.Uint32("difficulty", frame.Difficulty),
)
break
}
if err := e.frameProver.VerifyMasterClockFrame(frame); err != nil {
e.logger.Error(
"peer returned invalid frame",
zap.String("peer_id", base58.Encode(peer)))
e.pubSub.SetPeerScore(peer, -1000)
break
}
e.masterTimeReel.Insert(frame, false)
latest = frame
}
}
if err != nil {
cc.Close()
break
}
cc.Close()
break
}
return latest, nil return latest, nil
} }

View File

@ -421,27 +421,7 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
}() }()
go func() { go func() {
newFrameCh := e.masterTimeReel.NewFrameCh()
for e.state < consensus.EngineStateStopping { for e.state < consensus.EngineStateStopping {
var err error
select {
case frame := <-newFrameCh:
currentFrame := frame
latestFrame := frame
if latestFrame, err = e.collect(currentFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
latestFrame = currentFrame
continue
}
if latestFrame, err = e.prove(latestFrame); err != nil {
e.logger.Error("could not prove", zap.Error(err))
latestFrame = currentFrame
}
if err = e.publishProof(latestFrame); err != nil {
e.logger.Error("could not publish", zap.Error(err))
}
case <-time.After(20 * time.Second):
frame, err := e.masterTimeReel.Head() frame, err := e.masterTimeReel.Head()
if err != nil { if err != nil {
panic(err) panic(err)
@ -455,7 +435,6 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
e.logger.Error("could not publish", zap.Error(err)) e.logger.Error("could not publish", zap.Error(err))
} }
} }
}
}() }()
go func() { go func() {
@ -511,7 +490,7 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
for i := uint32(0); i < parallelism; i++ { for i := uint32(0); i < parallelism; i++ {
i := i i := i
go func() { go func() {
for j := 3; j > 0; j-- { for j := 3; j >= 0; j-- {
resp, err := resp, err :=
clients[i].CalculateChallengeProof( clients[i].CalculateChallengeProof(
context.Background(), context.Background(),
@ -522,7 +501,7 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
}, },
) )
if err != nil { if err != nil {
if j == 1 || len(e.engineConfig.DataWorkerMultiaddrs) == 0 { if j == 0 {
panic(err) panic(err)
} }
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 { if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
@ -533,7 +512,18 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
clients[i], err = e.createParallelDataClientsFromListAndIndex(i) clients[i], err = e.createParallelDataClientsFromListAndIndex(i)
if err != nil { if err != nil {
panic(err) e.logger.Error("failed to reconnect", zap.Error(err))
}
} else if len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
e.logger.Error(
"client failed, reconnecting after 50ms",
zap.Uint32("client", i),
)
time.Sleep(50 * time.Millisecond)
clients[i], err =
e.createParallelDataClientsFromBaseMultiaddrAndIndex(i)
if err != nil {
e.logger.Error("failed to reconnect", zap.Error(err))
} }
} }
continue continue
@ -593,12 +583,12 @@ func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
) { ) {
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index]) ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index])
if err != nil { if err != nil {
panic(err) return nil, errors.Wrap(err, "create parallel data client")
} }
_, addr, err := mn.DialArgs(ma) _, addr, err := mn.DialArgs(ma)
if err != nil { if err != nil {
panic(err) return nil, errors.Wrap(err, "create parallel data client")
} }
conn, err := grpc.Dial( conn, err := grpc.Dial(
@ -612,7 +602,66 @@ func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
), ),
) )
if err != nil { if err != nil {
panic(err) return nil, errors.Wrap(err, "create parallel data client")
}
client := protobufs.NewDataIPCServiceClient(conn)
e.logger.Info(
"connected to data worker process",
zap.Uint32("client", index),
)
return client, nil
}
func (
e *MasterClockConsensusEngine,
) createParallelDataClientsFromBaseMultiaddrAndIndex(
index uint32,
) (
protobufs.DataIPCServiceClient,
error,
) {
e.logger.Info(
"re-connecting to data worker process",
zap.Uint32("client", index),
)
if e.engineConfig.DataWorkerBaseListenMultiaddr == "" {
e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if e.engineConfig.DataWorkerBaseListenPort == 0 {
e.engineConfig.DataWorkerBaseListenPort = 40000
}
ma, err := multiaddr.NewMultiaddr(
fmt.Sprintf(
e.engineConfig.DataWorkerBaseListenMultiaddr,
int(e.engineConfig.DataWorkerBaseListenPort)+int(index),
),
)
if err != nil {
return nil, errors.Wrap(err, "create parallel data client")
}
_, addr, err := mn.DialArgs(ma)
if err != nil {
return nil, errors.Wrap(err, "create parallel data client")
}
conn, err := grpc.Dial(
addr,
grpc.WithTransportCredentials(
insecure.NewCredentials(),
),
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(10*1024*1024),
grpc.MaxCallRecvMsgSize(10*1024*1024),
),
)
if err != nil {
return nil, errors.Wrap(err, "create parallel data client")
} }
client := protobufs.NewDataIPCServiceClient(conn) client := protobufs.NewDataIPCServiceClient(conn)

View File

@ -0,0 +1,337 @@
package channel
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"fmt"
"math/big"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
)
type Feldman struct {
threshold int
total int
id int
fragsForCounterparties map[int][]byte
fragsFromCounterparties map[int]curves.Scalar
zkpok curves.Scalar
secret curves.Scalar
scalar curves.Scalar
generator curves.Point
publicKey curves.Point
point curves.Point
randomCommitmentPoint curves.Point
round FeldmanRound
zkcommitsFromCounterparties map[int][]byte
pointsFromCounterparties map[int]curves.Point
curve curves.Curve
}
type FeldmanReveal struct {
Point []byte
RandomCommitmentPoint []byte
ZKPoK []byte
}
var ErrWrongRound = errors.New("wrong round for feldman")
type FeldmanRound int
const (
FELDMAN_ROUND_UNINITIALIZED = FeldmanRound(0)
FELDMAN_ROUND_INITIALIZED = FeldmanRound(1)
FELDMAN_ROUND_COMMITTED = FeldmanRound(2)
FELDMAN_ROUND_REVEALED = FeldmanRound(3)
FELDMAN_ROUND_RECONSTRUCTED = FeldmanRound(4)
)
func NewFeldman(
threshold, total, id int,
secret curves.Scalar,
curve curves.Curve,
generator curves.Point,
) (*Feldman, error) {
return &Feldman{
threshold: threshold,
total: total,
id: id,
fragsForCounterparties: make(map[int][]byte),
fragsFromCounterparties: make(map[int]curves.Scalar),
zkpok: nil,
secret: secret,
scalar: nil,
generator: generator,
publicKey: secret.Point().Generator(),
point: secret.Point().Generator(),
round: FELDMAN_ROUND_UNINITIALIZED,
zkcommitsFromCounterparties: make(map[int][]byte),
pointsFromCounterparties: make(map[int]curves.Point),
curve: curve,
}, nil
}
func (f *Feldman) SamplePolynomial() error {
if f.round != FELDMAN_ROUND_UNINITIALIZED {
return errors.Wrap(ErrWrongRound, "sample polynomial")
}
coeffs := append([]curves.Scalar{}, f.secret)
for i := 1; i < f.threshold; i++ {
secret := f.curve.NewScalar()
secret = secret.Random(rand.Reader)
coeffs = append(coeffs, secret)
}
for i := 1; i <= f.total; i++ {
result := coeffs[0].Clone()
x := f.curve.Scalar.New(i)
for j := 1; j < f.threshold; j++ {
term := coeffs[j].Mul(x)
result = result.Add(term)
x = x.Mul(f.curve.Scalar.New(i))
}
if i == f.id {
f.scalar = result
} else {
fragBytes := result.Bytes()
f.fragsForCounterparties[i] = fragBytes
}
}
f.round = FELDMAN_ROUND_INITIALIZED
return nil
}
func (f *Feldman) Scalar() curves.Scalar {
return f.scalar
}
func (f *Feldman) GetPolyFrags() (map[int][]byte, error) {
if f.round != FELDMAN_ROUND_INITIALIZED {
return nil, errors.Wrap(ErrWrongRound, "get poly frags")
}
return f.fragsForCounterparties, nil
}
func (f *Feldman) SetPolyFragForParty(id int, frag []byte) ([]byte, error) {
if f.round != FELDMAN_ROUND_INITIALIZED {
return nil, errors.Wrap(ErrWrongRound, "set poly frag for party")
}
var err error
f.fragsFromCounterparties[id], err = f.curve.NewScalar().SetBytes(frag)
if err != nil {
return nil, errors.Wrap(err, "set poly frag for party")
}
if len(f.fragsFromCounterparties) == f.total-1 {
for _, v := range f.fragsFromCounterparties {
f.scalar = f.scalar.Add(v)
}
f.point = f.generator.Mul(f.scalar)
randCommitment := f.curve.NewScalar().Random(rand.Reader)
f.randomCommitmentPoint = f.generator.Mul(randCommitment)
randCommitmentPointBytes := f.randomCommitmentPoint.ToAffineCompressed()
publicPointBytes := f.point.ToAffineCompressed()
challenge := sha256.Sum256(
append(
append([]byte{}, publicPointBytes...),
randCommitmentPointBytes...,
),
)
challengeBig, err := f.curve.NewScalar().SetBigInt(
new(big.Int).SetBytes(challenge[:]),
)
if err != nil {
return nil, errors.Wrap(err, "set poly frag for party")
}
f.zkpok = f.scalar.Mul(challengeBig).Add(randCommitment)
zkpokBytes := f.zkpok.Bytes()
zkcommit := sha256.Sum256(
append(
append([]byte{}, randCommitmentPointBytes...),
zkpokBytes...,
),
)
f.round = FELDMAN_ROUND_COMMITTED
return zkcommit[:], nil
}
return []byte{}, nil
}
func (f *Feldman) ReceiveCommitments(
id int,
zkcommit []byte,
) (*FeldmanReveal, error) {
if f.round != FELDMAN_ROUND_COMMITTED {
return nil, errors.Wrap(ErrWrongRound, "receive commitments")
}
f.zkcommitsFromCounterparties[id] = zkcommit
if len(f.zkcommitsFromCounterparties) == f.total-1 {
publicPointBytes := f.point.ToAffineCompressed()
randCommitmentPointBytes := f.randomCommitmentPoint.ToAffineCompressed()
f.round = FELDMAN_ROUND_REVEALED
zkpokBytes := f.zkpok.Bytes()
return &FeldmanReveal{
Point: publicPointBytes,
RandomCommitmentPoint: randCommitmentPointBytes,
ZKPoK: zkpokBytes,
}, nil
}
return nil, nil
}
func (f *Feldman) Recombine(id int, reveal *FeldmanReveal) (bool, error) {
if f.round != FELDMAN_ROUND_REVEALED {
return false, errors.Wrap(ErrWrongRound, "recombine")
}
counterpartyPoint, err := f.curve.NewGeneratorPoint().FromAffineCompressed(
reveal.Point,
)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
if counterpartyPoint.Equal(f.curve.NewGeneratorPoint()) ||
counterpartyPoint.Equal(f.generator) {
return false, errors.Wrap(errors.New("counterparty sent generator"), "recombine")
}
counterpartyRandomCommitmentPoint, err := f.curve.NewGeneratorPoint().
FromAffineCompressed(reveal.RandomCommitmentPoint)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
if counterpartyRandomCommitmentPoint.Equal(f.curve.NewGeneratorPoint()) ||
counterpartyRandomCommitmentPoint.Equal(f.generator) {
return false, errors.Wrap(errors.New("counterparty sent generator"), "recombine")
}
counterpartyZKPoK, err := f.curve.NewScalar().SetBytes(reveal.ZKPoK)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
counterpartyZKCommit := f.zkcommitsFromCounterparties[id]
challenge := sha256.Sum256(append(
append([]byte{}, reveal.Point...),
reveal.RandomCommitmentPoint...,
))
challengeBig, err := f.curve.NewScalar().SetBigInt(
new(big.Int).SetBytes(challenge[:]),
)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
proof := f.generator.Mul(counterpartyZKPoK)
counterpartyRandomCommitmentPoint = counterpartyRandomCommitmentPoint.Add(
counterpartyPoint.Mul(challengeBig),
)
if !proof.Equal(counterpartyRandomCommitmentPoint) {
return false, errors.Wrap(
errors.New(fmt.Sprintf("invalid proof from %d", id)),
"recombine",
)
}
verifier := sha256.Sum256(append(
append([]byte{}, reveal.RandomCommitmentPoint...),
reveal.ZKPoK...,
))
if !bytes.Equal(counterpartyZKCommit, verifier[:]) {
return false, errors.Wrap(
errors.New(fmt.Sprintf("%d changed zkpok after commit", id)),
"recombine",
)
}
f.pointsFromCounterparties[id] = counterpartyPoint
if len(f.pointsFromCounterparties) == f.total-1 {
f.pointsFromCounterparties[f.id] = f.point
for i := 1; i <= f.total-f.threshold+1; i++ {
var reconstructedSum curves.Point = nil
for j := i; j < f.threshold+i; j++ {
num := f.curve.Scalar.One()
den := f.curve.Scalar.One()
for k := i; k < f.threshold+i; k++ {
if j != k {
j := f.curve.NewScalar().New(j)
k := f.curve.NewScalar().New(k)
num = num.Mul(k)
den = den.Mul(k.Sub(j))
}
}
den, _ = den.Invert()
reconstructedFragment := f.pointsFromCounterparties[j].Mul(num.Mul(den))
if reconstructedSum == nil {
reconstructedSum = reconstructedFragment
} else {
reconstructedSum = reconstructedSum.Add(reconstructedFragment)
}
}
if f.publicKey.Equal(f.curve.NewGeneratorPoint()) ||
f.publicKey.Equal(f.generator) {
f.publicKey = reconstructedSum
} else if !f.publicKey.Equal(reconstructedSum) {
return false, errors.Wrap(
errors.New("recombination mismatch"),
"recombine",
)
}
}
f.round = FELDMAN_ROUND_RECONSTRUCTED
}
return f.round == FELDMAN_ROUND_RECONSTRUCTED, nil
}
func (f *Feldman) PublicKey() curves.Point {
return f.publicKey
}
func (f *Feldman) PublicKeyBytes() []byte {
return f.publicKey.ToAffineCompressed()
}
func ReverseScalarBytes(inBytes []byte, length int) []byte {
outBytes := make([]byte, length)
for i, j := 0, len(inBytes)-1; j >= 0; i, j = i+1, j-1 {
outBytes[i] = inBytes[j]
}
return outBytes
}

View File

@ -0,0 +1,446 @@
package channel_test
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/assert"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
)
func TestFeldman(t *testing.T) {
s1 := curves.ED25519().NewScalar().Random(rand.Reader)
f1, err := crypto.NewFeldman(
3,
5,
1,
s1,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s2 := curves.ED25519().NewScalar().Random(rand.Reader)
f2, err := crypto.NewFeldman(
3,
5,
2,
s2,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s3 := curves.ED25519().NewScalar().Random(rand.Reader)
f3, err := crypto.NewFeldman(
3,
5,
3,
s3,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s4 := curves.ED25519().NewScalar().Random(rand.Reader)
f4, err := crypto.NewFeldman(
3,
5,
4,
s4,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s5 := curves.ED25519().NewScalar().Random(rand.Reader)
f5, err := crypto.NewFeldman(
3,
5,
5,
s5,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
err = f1.SamplePolynomial()
assert.NoError(t, err)
err = f2.SamplePolynomial()
assert.NoError(t, err)
err = f3.SamplePolynomial()
assert.NoError(t, err)
err = f4.SamplePolynomial()
assert.NoError(t, err)
err = f5.SamplePolynomial()
assert.NoError(t, err)
m1, err := f1.GetPolyFrags()
assert.NoError(t, err)
m2, err := f2.GetPolyFrags()
assert.NoError(t, err)
m3, err := f3.GetPolyFrags()
assert.NoError(t, err)
m4, err := f4.GetPolyFrags()
assert.NoError(t, err)
m5, err := f5.GetPolyFrags()
assert.NoError(t, err)
m1[1] = f1.Scalar().Bytes()
_, err = f1.SetPolyFragForParty(2, m2[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(3, m3[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(4, m4[1])
assert.NoError(t, err)
z1, err := f1.SetPolyFragForParty(5, m5[1])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(1, m1[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(3, m3[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(4, m4[2])
assert.NoError(t, err)
z2, err := f2.SetPolyFragForParty(5, m5[2])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(1, m1[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(2, m2[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(4, m4[3])
assert.NoError(t, err)
z3, err := f3.SetPolyFragForParty(5, m5[3])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(1, m1[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(2, m2[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(3, m3[4])
assert.NoError(t, err)
z4, err := f4.SetPolyFragForParty(5, m5[4])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(1, m1[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(2, m2[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(3, m3[5])
assert.NoError(t, err)
z5, err := f5.SetPolyFragForParty(4, m4[5])
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(2, z2)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(3, z3)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(4, z4)
assert.NoError(t, err)
assert.NoError(t, err)
r1, err := f1.ReceiveCommitments(5, z5)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(3, z3)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r2, err := f2.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r3, err := f3.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r4, err := f4.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r5, err := f5.ReceiveCommitments(4, z4)
assert.NoError(t, err)
_, err = f1.Recombine(2, r2)
assert.NoError(t, err)
_, err = f1.Recombine(3, r3)
assert.NoError(t, err)
_, err = f1.Recombine(4, r4)
assert.NoError(t, err)
_, err = f1.Recombine(5, r5)
assert.NoError(t, err)
_, err = f2.Recombine(1, r1)
assert.NoError(t, err)
_, err = f2.Recombine(3, r3)
assert.NoError(t, err)
_, err = f2.Recombine(4, r4)
assert.NoError(t, err)
_, err = f2.Recombine(5, r5)
assert.NoError(t, err)
_, err = f3.Recombine(1, r1)
assert.NoError(t, err)
_, err = f3.Recombine(2, r2)
assert.NoError(t, err)
_, err = f3.Recombine(4, r4)
assert.NoError(t, err)
_, err = f3.Recombine(5, r5)
assert.NoError(t, err)
_, err = f4.Recombine(1, r1)
assert.NoError(t, err)
_, err = f4.Recombine(2, r2)
assert.NoError(t, err)
_, err = f4.Recombine(3, r3)
assert.NoError(t, err)
_, err = f4.Recombine(5, r5)
assert.NoError(t, err)
_, err = f5.Recombine(1, r1)
assert.NoError(t, err)
_, err = f5.Recombine(2, r2)
assert.NoError(t, err)
_, err = f5.Recombine(3, r3)
assert.NoError(t, err)
_, err = f5.Recombine(4, r4)
assert.NoError(t, err)
s := s1.Add(s2.Add(s3.Add(s4.Add(s5))))
assert.True(t, curves.ED25519().NewGeneratorPoint().Mul(s).Equal(f1.PublicKey()))
assert.True(t, f5.PublicKey().Equal(f1.PublicKey()))
}
func TestFeldmanCustomGenerator(t *testing.T) {
gen := curves.ED25519().Point.Random(rand.Reader)
f1, err := crypto.NewFeldman(
3,
5,
1,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f2, err := crypto.NewFeldman(
3,
5,
2,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f3, err := crypto.NewFeldman(
3,
5,
3,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f4, err := crypto.NewFeldman(
3,
5,
4,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f5, err := crypto.NewFeldman(
3,
5,
5,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
err = f1.SamplePolynomial()
assert.NoError(t, err)
err = f2.SamplePolynomial()
assert.NoError(t, err)
err = f3.SamplePolynomial()
assert.NoError(t, err)
err = f4.SamplePolynomial()
assert.NoError(t, err)
err = f5.SamplePolynomial()
assert.NoError(t, err)
m1, err := f1.GetPolyFrags()
assert.NoError(t, err)
m2, err := f2.GetPolyFrags()
assert.NoError(t, err)
m3, err := f3.GetPolyFrags()
assert.NoError(t, err)
m4, err := f4.GetPolyFrags()
assert.NoError(t, err)
m5, err := f5.GetPolyFrags()
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(2, m2[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(3, m3[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(4, m4[1])
assert.NoError(t, err)
z1, err := f1.SetPolyFragForParty(5, m5[1])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(1, m1[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(3, m3[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(4, m4[2])
assert.NoError(t, err)
z2, err := f2.SetPolyFragForParty(5, m5[2])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(1, m1[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(2, m2[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(4, m4[3])
assert.NoError(t, err)
z3, err := f3.SetPolyFragForParty(5, m5[3])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(1, m1[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(2, m2[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(3, m3[4])
assert.NoError(t, err)
z4, err := f4.SetPolyFragForParty(5, m5[4])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(1, m1[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(2, m2[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(3, m3[5])
assert.NoError(t, err)
z5, err := f5.SetPolyFragForParty(4, m4[5])
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(2, z2)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(3, z3)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(4, z4)
assert.NoError(t, err)
assert.NoError(t, err)
r1, err := f1.ReceiveCommitments(5, z5)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(3, z3)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r2, err := f2.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r3, err := f3.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r4, err := f4.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r5, err := f5.ReceiveCommitments(4, z4)
assert.NoError(t, err)
_, err = f1.Recombine(2, r2)
assert.NoError(t, err)
_, err = f1.Recombine(3, r3)
assert.NoError(t, err)
_, err = f1.Recombine(4, r4)
assert.NoError(t, err)
_, err = f1.Recombine(5, r5)
assert.NoError(t, err)
_, err = f2.Recombine(1, r1)
assert.NoError(t, err)
_, err = f2.Recombine(3, r3)
assert.NoError(t, err)
_, err = f2.Recombine(4, r4)
assert.NoError(t, err)
_, err = f2.Recombine(5, r5)
assert.NoError(t, err)
_, err = f3.Recombine(1, r1)
assert.NoError(t, err)
_, err = f3.Recombine(2, r2)
assert.NoError(t, err)
_, err = f3.Recombine(4, r4)
assert.NoError(t, err)
_, err = f3.Recombine(5, r5)
assert.NoError(t, err)
_, err = f4.Recombine(1, r1)
assert.NoError(t, err)
_, err = f4.Recombine(2, r2)
assert.NoError(t, err)
_, err = f4.Recombine(3, r3)
assert.NoError(t, err)
_, err = f4.Recombine(5, r5)
assert.NoError(t, err)
_, err = f5.Recombine(1, r1)
assert.NoError(t, err)
_, err = f5.Recombine(2, r2)
assert.NoError(t, err)
_, err = f5.Recombine(3, r3)
assert.NoError(t, err)
_, err = f5.Recombine(4, r4)
assert.NoError(t, err)
}

View File

@ -0,0 +1,755 @@
package channel
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha512"
"crypto/subtle"
"encoding/binary"
"encoding/json"
"fmt"
"sort"
"github.com/pkg/errors"
"golang.org/x/crypto/hkdf"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
const TRIPLE_RATCHET_PROTOCOL_VERSION = 1
const TRIPLE_RATCHET_PROTOCOL = 2<<8 + TRIPLE_RATCHET_PROTOCOL_VERSION
type TripleRatchetRound int
const (
TRIPLE_RATCHET_ROUND_UNINITIALIZED = TripleRatchetRound(0)
TRIPLE_RATCHET_ROUND_INITIALIZED = TripleRatchetRound(1)
TRIPLE_RATCHET_ROUND_COMMITTED = TripleRatchetRound(2)
TRIPLE_RATCHET_ROUND_REVEALED = TripleRatchetRound(3)
TRIPLE_RATCHET_ROUND_RECONSTRUCTED = TripleRatchetRound(4)
)
// Note: If an HSM with raw primitive access becomes available, the raw crypto
// mechanisms should be refactored into calls in KeyManager and implemented
// through the driver
type TripleRatchetParticipant struct {
peerKey curves.Scalar
sendingEphemeralPrivateKey curves.Scalar
receivingEphemeralKeys map[string]curves.Scalar
receivingGroupKey curves.Point
curve curves.Curve
keyManager keys.KeyManager
rootKey []byte
sendingChainKey []byte
currentHeaderKey []byte
nextHeaderKey []byte
receivingChainKey map[string][]byte
currentSendingChainLength uint32
previousSendingChainLength uint32
currentReceivingChainLength map[string]uint32
previousReceivingChainLength map[string]uint32
peerIdMap map[string]int
idPeerMap map[int]*PeerInfo
skippedKeysMap map[string]map[string]map[uint32][]byte
peerChannels map[string]*DoubleRatchetParticipant
dkgRatchet *Feldman
}
type PeerInfo struct {
PublicKey curves.Point
IdentityPublicKey curves.Point
SignedPrePublicKey curves.Point
}
// Weak-mode synchronous group modification TR this is not the asynchronous
// TR, does not ratchet group key automatically, know what your use case is
// before adopting this.
func NewTripleRatchetParticipant(
peers []*PeerInfo,
curve curves.Curve,
keyManager keys.KeyManager,
peerKey curves.Scalar,
identityKey curves.Scalar,
signedPreKey curves.Scalar,
) (
*TripleRatchetParticipant,
map[string]*protobufs.P2PChannelEnvelope,
error,
) {
participant := &TripleRatchetParticipant{}
participant.skippedKeysMap = make(map[string]map[string]map[uint32][]byte)
participant.receivingEphemeralKeys = make(map[string]curves.Scalar)
participant.receivingChainKey = make(map[string][]byte)
participant.peerChannels = make(map[string]*DoubleRatchetParticipant)
participant.keyManager = keyManager
participant.currentSendingChainLength = 0
participant.previousSendingChainLength = 0
participant.currentReceivingChainLength = make(map[string]uint32)
participant.previousReceivingChainLength = make(map[string]uint32)
peerBasis := append([]*PeerInfo{}, peers...)
peerBasis = append(peerBasis, &PeerInfo{
PublicKey: peerKey.Point().Generator().Mul(peerKey),
IdentityPublicKey: identityKey.Point().Generator().Mul(identityKey),
SignedPrePublicKey: signedPreKey.Point().Generator().Mul(signedPreKey),
})
sort.Slice(peerBasis, func(i, j int) bool {
return bytes.Compare(
peerBasis[i].PublicKey.ToAffineCompressed(),
peerBasis[j].PublicKey.ToAffineCompressed(),
) <= 0
})
initMessages := make(map[string]*protobufs.P2PChannelEnvelope)
peerIdMap := map[string]int{}
idPeerMap := map[int]*PeerInfo{}
sender := false
for i := 0; i < len(peerBasis); i++ {
peerIdMap[string(peerBasis[i].PublicKey.ToAffineCompressed())] = i + 1
idPeerMap[i+1] = peerBasis[i]
if bytes.Equal(
peerBasis[i].PublicKey.ToAffineCompressed(),
peerKey.Point().Generator().Mul(peerKey).ToAffineCompressed(),
) {
sender = true
} else {
participant.skippedKeysMap[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)] = make(map[string]map[uint32][]byte)
participant.currentReceivingChainLength[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)] = 0
participant.previousReceivingChainLength[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)] = 0
var sessionKey []byte
if sender {
sessionKey = SenderX3DH(
identityKey,
signedPreKey,
peerBasis[i].IdentityPublicKey,
peerBasis[i].SignedPrePublicKey,
96,
)
} else {
sessionKey = ReceiverX3DH(
identityKey,
signedPreKey,
peerBasis[i].IdentityPublicKey,
peerBasis[i].SignedPrePublicKey,
96,
)
}
var err error
participant.peerChannels[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)], err = NewDoubleRatchetParticipant(
sessionKey[:32],
sessionKey[32:64],
sessionKey[64:],
sender,
signedPreKey,
peerBasis[i].SignedPrePublicKey,
&curve,
keyManager,
)
if err != nil {
return nil, nil, errors.Wrap(err, "new triple ratchet participant")
}
if sender {
initMessages[string(peerBasis[i].PublicKey.ToAffineCompressed())], err =
participant.peerChannels[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)].RatchetEncrypt([]byte("init"))
if err != nil {
return nil, nil, errors.Wrap(err, "new triple ratchet participant")
}
}
}
}
feldman, err := NewFeldman(
2,
len(peers)+1,
peerIdMap[string(
peerKey.Point().Generator().Mul(peerKey).ToAffineCompressed(),
)],
curve.NewScalar().Random(rand.Reader),
curve,
curve.Point.Generator(),
)
if err != nil {
return nil, nil, errors.Wrap(err, "new triple ratchet participant")
}
participant.peerIdMap = peerIdMap
participant.idPeerMap = idPeerMap
participant.dkgRatchet = feldman
participant.curve = curve
participant.peerKey = peerKey
return participant, initMessages, nil
}
func (r *TripleRatchetParticipant) Initialize(
initMessages map[string]*protobufs.P2PChannelEnvelope,
) (map[string]*protobufs.P2PChannelEnvelope, error) {
for k, m := range initMessages {
msg, err := r.peerChannels[k].RatchetDecrypt(m)
if err != nil {
return nil, errors.Wrap(err, "initialize")
}
if string(msg) != "init" {
return nil, errors.Wrap(errors.New("invalid init message"), "initialize")
}
}
if err := r.dkgRatchet.SamplePolynomial(); err != nil {
return nil, errors.Wrap(err, "initialize")
}
result, err := r.dkgRatchet.GetPolyFrags()
if err != nil {
return nil, errors.Wrap(err, "initialize")
}
resultMap := make(map[string]*protobufs.P2PChannelEnvelope)
for k, v := range result {
if r.idPeerMap[k].PublicKey.Equal(
r.peerKey.Point().Generator().Mul(r.peerKey),
) {
continue
}
envelope, err := r.peerChannels[string(
r.idPeerMap[k].PublicKey.ToAffineCompressed(),
)].RatchetEncrypt(v)
if err != nil {
return nil, errors.Wrap(err, "initialize")
}
resultMap[string(r.idPeerMap[k].PublicKey.ToAffineCompressed())] = envelope
}
return resultMap, nil
}
func (r *TripleRatchetParticipant) ReceivePolyFrag(
peerId []byte,
frag *protobufs.P2PChannelEnvelope,
) (map[string]*protobufs.P2PChannelEnvelope, error) {
b, err := r.peerChannels[string(peerId)].RatchetDecrypt(frag)
if err != nil {
return nil, errors.Wrap(err, "receive poly frag")
}
result, err := r.dkgRatchet.SetPolyFragForParty(
r.peerIdMap[string(peerId)],
b,
)
if err != nil {
return nil, errors.Wrap(err, "receive poly frag")
}
if len(result) != 0 {
envelopes := make(map[string]*protobufs.P2PChannelEnvelope)
for k, c := range r.peerChannels {
envelope, err := c.RatchetEncrypt(result)
if err != nil {
return nil, errors.Wrap(err, "receive poly frag")
}
envelopes[k] = envelope
}
return envelopes, errors.Wrap(err, "receive poly frag")
}
return nil, nil
}
func (r *TripleRatchetParticipant) ReceiveCommitment(
peerId []byte,
zkcommit *protobufs.P2PChannelEnvelope,
) (map[string]*protobufs.P2PChannelEnvelope, error) {
b, err := r.peerChannels[string(peerId)].RatchetDecrypt(zkcommit)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
result, err := r.dkgRatchet.ReceiveCommitments(
r.peerIdMap[string(peerId)],
b,
)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
d, err := json.Marshal(result)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
if result != nil {
envelopes := make(map[string]*protobufs.P2PChannelEnvelope)
for k, c := range r.peerChannels {
envelope, err := c.RatchetEncrypt(d)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
envelopes[k] = envelope
}
return envelopes, errors.Wrap(err, "receive poly frag")
}
return nil, nil
}
func (r *TripleRatchetParticipant) Recombine(
peerId []byte,
reveal *protobufs.P2PChannelEnvelope,
) error {
b, err := r.peerChannels[string(peerId)].RatchetDecrypt(reveal)
if err != nil {
return errors.Wrap(err, "recombine")
}
rev := &FeldmanReveal{}
if err = json.Unmarshal(b, rev); err != nil {
return errors.Wrap(err, "recombine")
}
done, err := r.dkgRatchet.Recombine(
r.peerIdMap[string(peerId)],
rev,
)
if err != nil {
return errors.Wrap(err, "recombine")
}
if !done {
return nil
}
sess := sha512.Sum512_256(r.dkgRatchet.PublicKeyBytes())
hash := hkdf.New(
sha512.New,
r.dkgRatchet.PublicKeyBytes(),
sess[:],
[]byte("quilibrium-triple-ratchet"),
)
rkck := make([]byte, 96)
if _, err := hash.Read(rkck[:]); err != nil {
return errors.Wrap(err, "recombine")
}
r.rootKey = rkck[:32]
r.currentHeaderKey = rkck[32:64]
r.nextHeaderKey = rkck[64:]
r.receivingGroupKey = r.dkgRatchet.PublicKey()
r.sendingEphemeralPrivateKey = r.curve.Scalar.Random(rand.Reader)
return nil
}
func (r *TripleRatchetParticipant) RatchetEncrypt(
message []byte,
) (*protobufs.P2PChannelEnvelope, error) {
envelope := &protobufs.P2PChannelEnvelope{
ProtocolIdentifier: TRIPLE_RATCHET_PROTOCOL,
MessageHeader: &protobufs.MessageCiphertext{},
MessageBody: &protobufs.MessageCiphertext{},
}
newChainKey, messageKey, aeadKey := ratchetKeys(r.sendingChainKey)
r.sendingChainKey = newChainKey
var err error
header := r.encodeHeader()
envelope.MessageHeader, err = r.encrypt(
header,
r.currentHeaderKey,
nil,
)
if err != nil {
return nil, errors.Wrap(err, "could not encrypt header")
}
envelope.MessageBody, err = r.encrypt(
message,
messageKey,
append(append([]byte{}, aeadKey...), envelope.MessageHeader.Ciphertext...),
)
if err != nil {
return nil, errors.Wrap(err, "could not encrypt message")
}
r.currentSendingChainLength++
return envelope, nil
}
func (r *TripleRatchetParticipant) RatchetDecrypt(
envelope *protobufs.P2PChannelEnvelope,
) ([]byte, error) {
plaintext, err := r.trySkippedMessageKeys(envelope)
if err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
if plaintext != nil {
return plaintext, nil
}
header, shouldRatchet, err := r.decryptHeader(
envelope.MessageHeader,
r.currentHeaderKey,
)
if err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
senderKey,
receivingEphemeralKey,
previousReceivingChainLength,
currentReceivingChainLength,
err := r.decodeHeader(header)
if err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
if shouldRatchet {
if err := r.skipMessageKeys(
senderKey,
previousReceivingChainLength,
); err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
if err := r.ratchetReceiverEphemeralKeys(
senderKey,
receivingEphemeralKey,
); err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
}
if err := r.skipMessageKeys(
senderKey,
currentReceivingChainLength,
); err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
newChainKey, messageKey, aeadKey := ratchetKeys(
r.receivingChainKey[string(senderKey.ToAffineCompressed())],
)
r.receivingChainKey[string(senderKey.ToAffineCompressed())] = newChainKey
r.currentReceivingChainLength[string(senderKey.ToAffineCompressed())]++
plaintext, err = r.decrypt(
envelope.MessageBody,
messageKey,
append(
append([]byte{}, aeadKey...),
envelope.MessageHeader.Ciphertext...,
),
)
return plaintext, errors.Wrap(err, "ratchet decrypt")
}
func (r *TripleRatchetParticipant) ratchetSenderEphemeralKeys() error {
hash := hkdf.New(
sha512.New,
r.receivingGroupKey.Mul(
r.sendingEphemeralPrivateKey,
).ToAffineCompressed(),
r.rootKey,
[]byte("quilibrium-triple-ratchet"),
)
rkck2 := make([]byte, 96)
if _, err := hash.Read(rkck2[:]); err != nil {
return errors.Wrap(err, "failed ratcheting root key")
}
r.rootKey = rkck2[:32]
r.sendingChainKey = rkck2[32:64]
r.nextHeaderKey = rkck2[64:]
return nil
}
func (r *TripleRatchetParticipant) ratchetReceiverEphemeralKeys(
peerKey curves.Point,
newEphemeralKey curves.Scalar,
) error {
r.previousSendingChainLength = r.currentSendingChainLength
r.currentSendingChainLength = 0
r.currentReceivingChainLength[string(peerKey.ToAffineCompressed())] = 0
r.currentHeaderKey = r.nextHeaderKey
r.receivingEphemeralKeys[string(
peerKey.ToAffineCompressed(),
)] = newEphemeralKey
hash := hkdf.New(
sha512.New,
r.receivingGroupKey.Mul(
newEphemeralKey,
).ToAffineCompressed(),
r.rootKey,
[]byte("quilibrium-triple-ratchet"),
)
rkck := make([]byte, 96)
if _, err := hash.Read(rkck[:]); err != nil {
return errors.Wrap(err, "failed ratcheting root key")
}
r.rootKey = rkck[:32]
r.receivingChainKey[string(peerKey.ToAffineCompressed())] = rkck[32:64]
r.nextHeaderKey = rkck[64:]
r.sendingEphemeralPrivateKey = r.curve.NewScalar().Random(rand.Reader)
return nil
}
func (r *TripleRatchetParticipant) trySkippedMessageKeys(
envelope *protobufs.P2PChannelEnvelope,
) ([]byte, error) {
for receivingHeaderKey, skippedKeys := range r.skippedKeysMap {
header, _, err := r.decryptHeader(
envelope.MessageHeader,
[]byte(receivingHeaderKey),
)
if err == nil {
peerKey, _, _, current, err := r.decodeHeader(header)
if err != nil {
return nil, errors.Wrap(err, "try skipped message keys")
}
messageKey := skippedKeys[string(
peerKey.ToAffineCompressed(),
)][current][:32]
aeadKey := skippedKeys[string(
peerKey.ToAffineCompressed(),
)][current][32:]
plaintext, err := r.decrypt(
envelope.MessageBody,
messageKey,
append(
append([]byte{}, aeadKey...),
envelope.MessageHeader.Ciphertext[:]...,
),
)
if err != nil {
return nil, errors.Wrap(err, "try skipped message keys")
}
delete(r.skippedKeysMap[string(
peerKey.ToAffineCompressed(),
)][receivingHeaderKey], current)
if len(r.skippedKeysMap[string(
peerKey.ToAffineCompressed(),
)][receivingHeaderKey]) == 0 {
delete(r.skippedKeysMap[string(
peerKey.ToAffineCompressed(),
)], receivingHeaderKey)
}
return plaintext, nil
}
}
return nil, nil
}
func (r *TripleRatchetParticipant) skipMessageKeys(
senderKey curves.Point,
until uint32,
) error {
if r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)]+100 < until {
return errors.Wrap(errors.New("skip limit exceeded"), "skip message keys")
}
if r.receivingChainKey != nil {
for r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)] < until {
newChainKey, messageKey, aeadKey := ratchetKeys(
r.receivingChainKey[string(
senderKey.ToAffineCompressed(),
)],
)
skippedKeys := r.skippedKeysMap[string(
senderKey.ToAffineCompressed(),
)][string(r.currentHeaderKey)]
if skippedKeys == nil {
r.skippedKeysMap[string(
senderKey.ToAffineCompressed(),
)][string(r.currentHeaderKey)] =
make(map[uint32][]byte)
}
skippedKeys[r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)]] = append(
append([]byte{}, messageKey...),
aeadKey...,
)
r.receivingChainKey[string(
senderKey.ToAffineCompressed(),
)] = newChainKey
r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)]++
}
}
return nil
}
func (r *TripleRatchetParticipant) encodeHeader() []byte {
header := []byte{}
header = append(
header,
r.peerKey.Point().Generator().Mul(r.peerKey).ToAffineCompressed()...,
)
header = append(
header,
r.sendingEphemeralPrivateKey.Bytes()...,
)
header = binary.BigEndian.AppendUint32(header, r.previousSendingChainLength)
header = binary.BigEndian.AppendUint32(header, r.currentSendingChainLength)
return header
}
func (r *TripleRatchetParticipant) decryptHeader(
ciphertext *protobufs.MessageCiphertext,
receivingHeaderKey []byte,
) ([]byte, bool, error) {
header, err := r.decrypt(
ciphertext,
receivingHeaderKey,
nil,
)
if err != nil && subtle.ConstantTimeCompare(
r.currentHeaderKey,
receivingHeaderKey,
) == 1 {
if header, err = r.decrypt(
ciphertext,
r.nextHeaderKey,
nil,
); err != nil {
return nil, false, errors.Wrap(err, "could not decrypt header")
}
fmt.Println("should ratchet")
return header, true, nil
}
return header, false, errors.Wrap(err, "could not decrypt header")
}
func (r *TripleRatchetParticipant) decodeHeader(
header []byte,
) (curves.Point, curves.Scalar, uint32, uint32, error) {
if len(header) < 9 {
return nil, nil, 0, 0, errors.Wrap(
errors.New("malformed header"),
"decode header",
)
}
currentReceivingChainLength := binary.BigEndian.Uint32(header[len(header)-4:])
previousReceivingChainLength := binary.BigEndian.Uint32(
header[len(header)-8 : len(header)-4],
)
sender := header[:len(r.curve.Point.ToAffineCompressed())]
senderKey, err := r.curve.Point.FromAffineCompressed(sender)
if err != nil {
return nil, nil, 0, 0, errors.Wrap(err, "decode header")
}
receivingEphemeralKeyBytes := header[len(
r.curve.Point.ToAffineCompressed(),
) : len(header)-8]
receivingEphemeralKey, err := r.curve.Scalar.Clone().SetBytes(
receivingEphemeralKeyBytes,
)
return senderKey,
receivingEphemeralKey,
previousReceivingChainLength,
currentReceivingChainLength,
errors.Wrap(err, "decode header")
}
func (r *TripleRatchetParticipant) encrypt(
plaintext []byte,
key []byte,
associatedData []byte,
) (*protobufs.MessageCiphertext, error) {
iv := [12]byte{}
rand.Read(iv[:])
aesCipher, err := aes.NewCipher(key)
if err != nil {
return nil, errors.Wrap(err, "encrypt")
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
return nil, errors.Wrap(err, "encrypt")
}
ciphertext := &protobufs.MessageCiphertext{}
if associatedData == nil {
associatedData = make([]byte, 32)
if _, err := rand.Read(associatedData); err != nil {
return nil, errors.Wrap(err, "encrypt")
}
ciphertext.AssociatedData = associatedData
}
ciphertext.Ciphertext = gcm.Seal(nil, iv[:], plaintext, associatedData)
ciphertext.InitializationVector = iv[:]
return ciphertext, nil
}
func (r *TripleRatchetParticipant) decrypt(
ciphertext *protobufs.MessageCiphertext,
key []byte,
associatedData []byte,
) ([]byte, error) {
if associatedData == nil {
associatedData = ciphertext.AssociatedData
}
aesCipher, err := aes.NewCipher(key)
if err != nil {
return nil, errors.Wrap(err, "decrypt")
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
return nil, errors.Wrap(err, "decrypt")
}
plaintext, err := gcm.Open(
nil,
ciphertext.InitializationVector,
ciphertext.Ciphertext,
associatedData,
)
return plaintext, errors.Wrap(err, "decrypt")
}

View File

@ -22,7 +22,8 @@ replace github.com/cockroachdb/pebble => ../pebble
require ( require (
filippo.io/edwards25519 v1.0.0-rc.1 filippo.io/edwards25519 v1.0.0-rc.1
github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a
github.com/libp2p/go-libp2p v0.31.0 github.com/deiu/rdf2go v0.0.0-20240619132609-81222e324bb9
github.com/libp2p/go-libp2p v0.35.1
github.com/libp2p/go-libp2p-gostream v0.6.0 github.com/libp2p/go-libp2p-gostream v0.6.0
github.com/libp2p/go-libp2p-kad-dht v0.23.0 github.com/libp2p/go-libp2p-kad-dht v0.23.0
google.golang.org/protobuf v1.34.1 google.golang.org/protobuf v1.34.1
@ -34,6 +35,9 @@ require (
) )
require ( require (
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597 // indirect
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326 // indirect
github.com/pion/datachannel v1.5.6 // indirect github.com/pion/datachannel v1.5.6 // indirect
github.com/pion/dtls/v2 v2.2.11 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect
github.com/pion/ice/v2 v2.3.24 // indirect github.com/pion/ice/v2 v2.3.24 // indirect
@ -50,6 +54,7 @@ require (
github.com/pion/transport/v2 v2.2.5 // indirect github.com/pion/transport/v2 v2.2.5 // indirect
github.com/pion/turn/v2 v2.1.6 // indirect github.com/pion/turn/v2 v2.1.6 // indirect
github.com/pion/webrtc/v3 v3.2.40 // indirect github.com/pion/webrtc/v3 v3.2.40 // indirect
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f // indirect
go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.uber.org/mock v0.4.0 // indirect go.uber.org/mock v0.4.0 // indirect
@ -73,13 +78,11 @@ require (
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.4 // indirect
github.com/gorilla/websocket v1.5.1 // indirect github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0
github.com/gtank/merlin v0.1.1 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 // indirect
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect
github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/reflow v0.3.0 // indirect
@ -124,7 +127,7 @@ require (
github.com/iden3/go-iden3-crypto v0.0.15 github.com/iden3/go-iden3-crypto v0.0.15
github.com/ipfs/boxo v0.8.0 // indirect github.com/ipfs/boxo v0.8.0 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-datastore v0.6.0 // indirect github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect
github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect
@ -189,7 +192,7 @@ require (
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/mod v0.17.0 // indirect golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect golang.org/x/net v0.25.0 // indirect
golang.org/x/sync v0.7.0 golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 golang.org/x/sys v0.21.0
golang.org/x/text v0.16.0 // indirect golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect

View File

@ -9,6 +9,8 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D
filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
@ -41,6 +43,8 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7
github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw= github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbletea v0.24.2 h1:uaQIKx9Ai6Gdh5zpTbGiWpytMU+CfsPp06RaW2cx/SY= github.com/charmbracelet/bubbletea v0.24.2 h1:uaQIKx9Ai6Gdh5zpTbGiWpytMU+CfsPp06RaW2cx/SY=
@ -90,9 +94,18 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPc
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597 h1:xKCSqM+c9FjQIr0Qacn9m7x0kv/opDWGr/nvCowFCok=
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597/go.mod h1:r8Pv5x6dxChq4mb1ZqzTyK3y9w8wDzWt55XAJpfSq34=
github.com/deiu/rdf2go v0.0.0-20240619132609-81222e324bb9 h1:xs255gi9FPRuCW+Ud8lQOBXBGHqM8cqqmoRfGokK3f0=
github.com/deiu/rdf2go v0.0.0-20240619132609-81222e324bb9/go.mod h1:d+9YsU6N5OuirjLEOp23T2/+S7OLByerfuv1f89iy90=
github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=
github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE=
github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po=
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
@ -194,8 +207,6 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk=
github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -203,6 +214,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ=
github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -218,6 +231,10 @@ github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0M
github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro=
github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek=
github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo=
github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q=
github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8=
@ -289,6 +306,8 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326 h1:YP3lfXXYiQV5MKeUqVnxRP5uuMQTLPx+PGYm1UBoU98=
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326/go.mod h1:nfqkuSNlsk1bvti/oa7TThx4KmRMBmSxf3okHI9wp3E=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
@ -314,8 +333,6 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUM
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0=
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
@ -462,6 +479,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f h1:L2/fBPABieQnQzfV40k2Zw7IcvZbt0CN5TgwUl8zDCs=
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f/go.mod h1:MZ2GRTcqmve6EoSbErWgCR+Ash4p8Gc5esHe8MDErss=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
@ -511,6 +530,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=

View File

@ -0,0 +1,242 @@
package inmem
import "errors"
var ErrInvalidLocation error = errors.New("invalid location")
var ErrMissingExtrinsics error = errors.New("missing extrinsics")
var ErrIsExtrinsic error = errors.New("is extrinsic")
type HypergraphCRDT struct {
locations map[Location]struct{}
vertexAdds map[Location]*IdSet
vertexRemoves map[Location]*IdSet
hyperedgeAdds map[Location]*IdSet
hyperedgeRemoves map[Location]*IdSet
}
func NewHypergraphCRDT(locations []Location) *HypergraphCRDT {
hypergraph := &HypergraphCRDT{
locations: make(map[Location]struct{}),
vertexAdds: make(map[Location]*IdSet),
vertexRemoves: make(map[Location]*IdSet),
hyperedgeAdds: make(map[Location]*IdSet),
hyperedgeRemoves: make(map[Location]*IdSet),
}
for _, l := range locations {
hypergraph.locations[l] = struct{}{}
hypergraph.vertexAdds[l] = NewIdSet("vertex")
hypergraph.vertexRemoves[l] = NewIdSet("vertex")
hypergraph.hyperedgeAdds[l] = NewIdSet("hyperedge")
hypergraph.hyperedgeRemoves[l] = NewIdSet("hyperedge")
}
return hypergraph
}
func (hg *HypergraphCRDT) AddAtom(a Atom) error {
switch v := a.(type) {
case *Vertex:
hg.AddVertex(v)
return nil
case *Hyperedge:
return hg.AddHyperedge(v)
}
return nil
}
func (hg *HypergraphCRDT) AddVertex(v *Vertex) {
shardMap := ShardVertex(v)
for location, vertices := range shardMap {
for _, vertex := range vertices.VertexSet.atoms {
if vert, ok := vertex.(*Vertex); ok {
hg.vertexAdds[location].Add(vert)
}
}
}
}
func (hg *HypergraphCRDT) AddVertexSet(vertices *IdSet) error {
if vertices.atomType != "vertex" {
return ErrInvalidAtomType
}
shardMap := ShardAtomSet(vertices.atoms)
for location, vertices := range shardMap {
for _, vertex := range vertices.VertexSet.atoms {
if vert, ok := vertex.(*Vertex); ok {
hg.vertexAdds[location].Add(vert)
}
}
}
return nil
}
func (hg *HypergraphCRDT) AddHyperedge(h *Hyperedge) error {
if hg.LookupAtomSet(h.extrinsics) {
shardMap := ShardHyperedge(h)
for location, set := range shardMap {
for _, hyperedge := range set.HyperedgeSet.atoms {
if he, ok := hyperedge.(*Hyperedge); ok {
hg.hyperedgeAdds[location].Add(he)
}
}
for _, vertex := range set.VertexSet.atoms {
if v, ok := vertex.(*Vertex); ok {
hg.hyperedgeAdds[location].Add(v)
}
}
}
return nil
} else {
return ErrMissingExtrinsics
}
}
func (hg *HypergraphCRDT) RemoveAtom(a Atom) error {
switch v := a.(type) {
case *Vertex:
return hg.RemoveVertex(v)
case *Hyperedge:
return hg.RemoveHyperedge(v)
}
return nil
}
func (hg *HypergraphCRDT) RemoveVertex(v *Vertex) error {
if hg.LookupVertex(v) {
for l, hyperedgeAdds := range hg.hyperedgeAdds {
for _, hyperedge := range hyperedgeAdds.atoms {
he, ok := hyperedge.(*Hyperedge)
if !ok {
continue
}
if !hg.hyperedgeRemoves[l].Has(he) {
if _, ok := he.extrinsics[v.GetID()]; ok {
return ErrIsExtrinsic
}
}
}
}
hg.vertexRemoves[v.location].Add(v)
}
return nil
}
func (hg *HypergraphCRDT) RemoveHyperedge(h *Hyperedge) error {
if hg.LookupAtom(h) {
for l, hyperedgeAdds := range hg.hyperedgeAdds {
for _, hyperedge := range hyperedgeAdds.atoms {
he, ok := hyperedge.(*Hyperedge)
if !ok || hg.hyperedgeRemoves[l].Has(he) {
continue
}
if _, ok := he.extrinsics[h.GetID()]; ok {
return ErrIsExtrinsic
}
}
}
hg.hyperedgeRemoves[h.location].Add(h)
}
return nil
}
func (hg *HypergraphCRDT) LookupAtom(a Atom) bool {
if _, ok := hg.locations[a.GetLocation()]; !ok {
return false
}
switch v := a.(type) {
case *Vertex:
return hg.LookupVertex(v)
case *Hyperedge:
return hg.LookupHyperedge(v)
default:
return false
}
}
// LookupAtomSet checks all atoms in an IdSet to see if they all can be looked
// up successfully.
func (hg *HypergraphCRDT) LookupAtomSet(atomSet map[string]Atom) bool {
for _, atom := range atomSet {
if !hg.LookupAtom(atom) {
return false
}
}
return true
}
// LookupVertex checks if a vertex is added and not removed in the current
// location.
func (hg *HypergraphCRDT) LookupVertex(v *Vertex) bool {
location := v.GetLocation()
return hg.vertexAdds[location].Has(v) && !hg.vertexRemoves[location].Has(v)
}
// LookupHyperedge checks if a hyperedge and its extrinsics can be looked up.
func (hg *HypergraphCRDT) LookupHyperedge(h *Hyperedge) bool {
return hg.LookupAtomSet(h.extrinsics) &&
hg.hyperedgeAdds[h.GetLocation()].Has(h) &&
!hg.hyperedgeRemoves[h.GetLocation()].Has(h)
}
// Within checks if atom `a` is within hyperedge `h` directly or transitively.
func (hg *HypergraphCRDT) Within(a, h Atom) bool {
switch ha := h.(type) {
case *Hyperedge:
_, ok := ha.extrinsics[a.GetID()]
if ok || a.GetID() == h.GetID() {
return true
}
for _, extrinsic := range ha.extrinsics {
if he, ok := extrinsic.(*Hyperedge); ok {
for _, hyperExtrinsic := range he.extrinsics {
if hyperHe, ok := hyperExtrinsic.(*Hyperedge); ok {
if hg.LookupHyperedge(hyperHe) {
if _, ok := hyperHe.extrinsics[a.GetID()]; ok &&
hg.Within(hyperHe, h) {
return true
}
}
}
}
}
}
}
return false
}
// GetReconciledVertexSet computes the set of vertices that have been added but
// not removed for a location.
func (hg *HypergraphCRDT) GetReconciledVertexSet(l Location) *IdSet {
vertices := NewIdSet("vertex")
for _, v := range hg.vertexAdds[l].atoms {
if !hg.vertexRemoves[l].Has(v) {
vertices.Add(v)
}
}
return vertices
}
// GetReconciledHyperedgeSet computes the set of hyperedges that have been added
// but not removed for a location.
func (hg *HypergraphCRDT) GetReconciledHyperedgeSet(l Location) *IdSet {
hyperedges := NewIdSet("hyperedge")
for _, h := range hg.hyperedgeAdds[l].atoms {
if !hg.hyperedgeRemoves[l].Has(h) {
hyperedges.Add(h)
}
}
return hyperedges
}

View File

@ -0,0 +1,75 @@
package inmem_test
import (
"testing"
"github.com/stretchr/testify/assert"
hypergraph "source.quilibrium.com/quilibrium/monorepo/node/hypergraph/inmem"
)
func TestIdSet(t *testing.T) {
v := hypergraph.NewVertex("1", "here")
h := hypergraph.NewHyperedge("2", "here", make(map[string]hypergraph.Atom))
vset := hypergraph.NewIdSet("vertex")
hset := hypergraph.NewIdSet("hyperedge")
assert.NoError(t, vset.Add(v))
assert.NoError(t, hset.Add(h))
assert.True(t, vset.Has(v))
assert.True(t, hset.Has(h))
vset.Delete(v)
assert.False(t, hset.Has(v))
}
func TestCRDT(t *testing.T) {
loc1 := hypergraph.Location("here1")
loc2 := hypergraph.Location("here2")
hg := hypergraph.NewHypergraphCRDT([]hypergraph.Location{loc1, loc2})
v1 := hypergraph.NewVertex("1", loc1)
v2 := hypergraph.NewVertex("2", loc2)
h1 := hypergraph.NewHyperedge("h1", loc1, make(map[string]hypergraph.Atom))
hg.AddVertex(v1)
hg.AddVertex(v2)
hg.AddHyperedge(h1)
h2vs := map[string]hypergraph.Atom{}
h2vs["1"] = v1
h2vs["2"] = v2
h2 := hypergraph.NewHyperedge("h2", loc2, h2vs)
hg.AddHyperedge(h2)
h3vs := map[string]hypergraph.Atom{}
h3vs["h2"] = h2
h3 := hypergraph.NewHyperedge("h3", loc1, h3vs)
hg.AddHyperedge(h3)
assert.NotNil(t, hg.LookupVertex(v1))
assert.NotNil(t, hg.LookupVertex(v2))
assert.NotNil(t, hg.LookupHyperedge(h1))
assert.NotNil(t, hg.LookupHyperedge(h2))
assert.NotNil(t, hg.LookupHyperedge(h3))
assert.True(t, hg.GetReconciledVertexSet(v1.GetLocation()).Has(v1))
assert.False(t, hg.GetReconciledVertexSet(v1.GetLocation()).Has(v2))
assert.True(t, hg.GetReconciledVertexSet(v2.GetLocation()).Has(v2))
assert.True(t, hg.GetReconciledHyperedgeSet(v1.GetLocation()).Has(h1))
assert.False(t, hg.GetReconciledHyperedgeSet(h1.GetLocation()).Has(h2))
assert.True(t, hg.GetReconciledHyperedgeSet(h2.GetLocation()).Has(h2))
assert.True(t, hg.GetReconciledHyperedgeSet(h3.GetLocation()).Has(h3))
assert.Error(t, hg.RemoveHyperedge(h2))
assert.True(t, hg.GetReconciledHyperedgeSet(h2.GetLocation()).Has(h2))
assert.NoError(t, hg.RemoveHyperedge(h3))
assert.False(t, hg.GetReconciledHyperedgeSet(h3.GetLocation()).Has(h3))
assert.Error(t, hg.RemoveVertex(v1))
assert.True(t, hg.GetReconciledVertexSet(v1.GetLocation()).Has(v1))
assert.NoError(t, hg.RemoveHyperedge(h2))
assert.False(t, hg.GetReconciledHyperedgeSet(h2.GetLocation()).Has(h2))
assert.NoError(t, hg.RemoveVertex(v1))
assert.False(t, hg.GetReconciledVertexSet(v1.GetLocation()).Has(v1))
assert.NoError(t, hg.RemoveVertex(v2))
assert.False(t, hg.GetReconciledVertexSet(v2.GetLocation()).Has(v2))
}

View File

@ -0,0 +1,89 @@
package inmem
func InShard(a Atom, l Location) bool {
return a.GetLocation() == l
}
type ShardSet struct {
VertexSet *IdSet
HyperedgeSet *IdSet
}
func ShardAtom(a Atom) map[Location]*ShardSet {
switch atom := a.(type) {
case *Vertex:
return ShardVertex(atom)
case *Hyperedge:
return ShardHyperedge(atom)
default:
return nil
}
}
func ShardAtomSet(atomSet map[string]Atom) map[Location]*ShardSet {
result := make(map[Location]*ShardSet)
for _, a := range atomSet {
result[a.GetLocation()] = &ShardSet{
VertexSet: NewIdSet("vertex"),
HyperedgeSet: NewIdSet("hyperedge"),
}
}
for _, atom := range atomSet {
shard := ShardAtom(atom)
for location, locationShard := range shard {
for _, locationAtom := range locationShard.VertexSet.atoms {
if _, ok := result[location]; !ok {
result[location] = &ShardSet{
VertexSet: NewIdSet("vertex"),
HyperedgeSet: NewIdSet("hyperedge"),
}
}
result[location].VertexSet.Add(locationAtom)
}
for _, locationAtom := range locationShard.HyperedgeSet.atoms {
if _, ok := result[location]; !ok {
result[location] = &ShardSet{
VertexSet: NewIdSet("vertex"),
HyperedgeSet: NewIdSet("hyperedge"),
}
}
result[location].HyperedgeSet.Add(locationAtom)
}
}
}
return result
}
func ShardVertex(v *Vertex) map[Location]*ShardSet {
result := make(map[Location]*ShardSet)
if _, ok := result[v.location]; !ok {
result[v.location] = &ShardSet{
VertexSet: NewIdSet("vertex"),
HyperedgeSet: NewIdSet("hyperedge"),
}
}
result[v.location].VertexSet.Add(v)
return result
}
// ShardHyperedge shards a hyperedge and its extrinsics across locations.
func ShardHyperedge(h *Hyperedge) map[Location]*ShardSet {
extrinsicShardSet := ShardAtomSet(h.extrinsics)
result := make(map[Location]*ShardSet)
for l, s := range extrinsicShardSet {
result[l] = s
}
if _, ok := result[h.location]; !ok {
result[h.location] = &ShardSet{
VertexSet: NewIdSet("vertex"),
HyperedgeSet: NewIdSet("hyperedge"),
}
}
result[h.location].HyperedgeSet.Add(h)
return result
}

View File

@ -0,0 +1,133 @@
package inmem
import "errors"
type AtomType string
type Location string
var ErrInvalidAtomType error = errors.New("invalid atom type for set")
type Vertex struct {
id string
location Location
}
type Hyperedge struct {
id string
location Location
extrinsics map[string]Atom
}
type Atom interface {
GetID() string
GetAtomType() AtomType
GetLocation() Location
}
var _v Atom = (*Vertex)(nil)
var _h Atom = (*Hyperedge)(nil)
func NewVertex(id string, location Location) *Vertex {
return &Vertex{
id,
location,
}
}
func NewHyperedge(
id string,
location Location,
extrinsics map[string]Atom,
) *Hyperedge {
return &Hyperedge{
id,
location,
extrinsics,
}
}
func (v *Vertex) GetID() string {
return v.id
}
func (h *Hyperedge) GetID() string {
return h.id
}
func (v *Vertex) GetAtomType() AtomType {
return "vertex"
}
func (h *Hyperedge) GetAtomType() AtomType {
return "hyperedge"
}
func (v *Vertex) GetLocation() Location {
return v.location
}
func (h *Hyperedge) GetLocation() Location {
return h.location
}
type IdSet struct {
atomType AtomType
atoms map[string]Atom
}
func NewIdSet(atomType AtomType) *IdSet {
return &IdSet{atomType: atomType, atoms: make(map[string]Atom)}
}
// Add adds an atom to the IdSet if it's not already present.
func (set *IdSet) Add(atom Atom) error {
switch a := atom.(type) {
case *Vertex:
if set.atomType != "vertex" {
return ErrInvalidAtomType
}
if _, exists := set.atoms[a.GetID()]; !exists {
set.atoms[a.GetID()] = a
}
case *Hyperedge:
if set.atomType != "hyperedge" {
return ErrInvalidAtomType
}
if _, exists := set.atoms[a.GetID()]; !exists {
set.atoms[a.GetID()] = a
}
}
return nil
}
// Delete removes an atom from the IdSet and returns true if the atom was
// present.
func (set *IdSet) Delete(atom Atom) bool {
switch a := atom.(type) {
case *Vertex:
if _, exists := set.atoms[a.GetID()]; exists {
delete(set.atoms, a.GetID())
return true
}
case *Hyperedge:
if _, exists := set.atoms[a.GetID()]; exists {
delete(set.atoms, a.GetID())
return true
}
}
return false
}
// Has checks if an atom is in the IdSet.
func (set *IdSet) Has(atom Atom) bool {
switch a := atom.(type) {
case *Vertex:
_, exists := set.atoms[a.GetID()]
return exists
case *Hyperedge:
_, exists := set.atoms[a.GetID()]
return exists
}
return false
}

View File

@ -113,6 +113,11 @@ var (
0, 0,
"specifies the parent process pid for a data worker", "specifies the parent process pid for a data worker",
) )
integrityCheck = flag.Bool(
"integrity-check",
false,
"runs an integrity check on the store, helpful for confirming backups are not corrupted (defaults to false)",
)
) )
var signatories = []string{ var signatories = []string{
@ -402,7 +407,10 @@ func main() {
} }
fmt.Println("Loading ceremony state and starting node...") fmt.Println("Loading ceremony state and starting node...")
if !*integrityCheck {
go spawnDataWorkers(nodeConfig) go spawnDataWorkers(nodeConfig)
}
kzg.Init() kzg.Init()
@ -422,6 +430,13 @@ func main() {
panic(err) panic(err)
} }
if *integrityCheck {
fmt.Println("Running integrity check...")
node.VerifyProofIntegrity()
fmt.Println("Integrity check passed!")
return
}
repair(*configDirectory, node) repair(*configDirectory, node)
if nodeConfig.ListenGRPCMultiaddr != "" { if nodeConfig.ListenGRPCMultiaddr != "" {
@ -477,6 +492,7 @@ func spawnDataWorkers(nodeConfig *config.Config) {
for i := 1; i <= cores-1; i++ { for i := 1; i <= cores-1; i++ {
i := i i := i
go func() { go func() {
for {
args := []string{ args := []string{
fmt.Sprintf("--core=%d", i), fmt.Sprintf("--core=%d", i),
fmt.Sprintf("--parent-process=%d", os.Getpid()), fmt.Sprintf("--parent-process=%d", os.Getpid()),
@ -491,6 +507,10 @@ func spawnDataWorkers(nodeConfig *config.Config) {
} }
dataWorkers[i-1] = cmd dataWorkers[i-1] = cmd
cmd.Wait()
time.Sleep(25 * time.Millisecond)
fmt.Printf("Data worker %d stopped, restarting...\n", i)
}
}() }()
} }
} }
@ -912,5 +932,5 @@ func printVersion() {
patchString = fmt.Sprintf("-p%d", patch) patchString = fmt.Sprintf("-p%d", patch)
} }
fmt.Println(" ") fmt.Println(" ")
fmt.Println(" Quilibrium Node - v" + config.GetVersionString() + patchString + " Betelgeuse") fmt.Println(" Quilibrium Node - v" + config.GetVersionString() + patchString + " Solstice")
} }

View File

@ -22,6 +22,7 @@ import (
"github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/p2p/discovery/routing" "github.com/libp2p/go-libp2p/p2p/discovery/routing"
"github.com/libp2p/go-libp2p/p2p/discovery/util" "github.com/libp2p/go-libp2p/p2p/discovery/util"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
"github.com/mr-tron/base58" "github.com/mr-tron/base58"
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -32,6 +33,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/config" "source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs" "source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
) )
type BlossomSub struct { type BlossomSub struct {
@ -61,7 +63,7 @@ var BITMASK_ALL = []byte{
// While we iterate through these next phases, we're going to aggressively // While we iterate through these next phases, we're going to aggressively
// enforce keeping updated. This will be achieved through announce strings // enforce keeping updated. This will be achieved through announce strings
// that will vary with each update // that will vary with each update
var ANNOUNCE_PREFIX = "quilibrium-1.4.19-betelgeuse-" var ANNOUNCE_PREFIX = "quilibrium-1.4.20-solstice-"
func getPeerID(p2pConfig *config.P2PConfig) peer.ID { func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey) peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
@ -85,6 +87,7 @@ func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
func NewBlossomSub( func NewBlossomSub(
p2pConfig *config.P2PConfig, p2pConfig *config.P2PConfig,
peerstore store.Peerstore,
logger *zap.Logger, logger *zap.Logger,
) *BlossomSub { ) *BlossomSub {
ctx := context.Background() ctx := context.Background()
@ -123,6 +126,13 @@ func NewBlossomSub(
opts = append(opts, libp2p.Identity(privKey)) opts = append(opts, libp2p.Identity(privKey))
} }
ps, err := pstoreds.NewPeerstore(ctx, peerstore, pstoreds.DefaultOpts())
if err != nil {
panic(err)
}
opts = append(opts, libp2p.Peerstore(ps))
bs := &BlossomSub{ bs := &BlossomSub{
ctx: ctx, ctx: ctx,
logger: logger, logger: logger,
@ -182,7 +192,7 @@ func NewBlossomSub(
BehaviourPenaltyDecay: .5, BehaviourPenaltyDecay: .5,
DecayInterval: 10 * time.Second, DecayInterval: 10 * time.Second,
DecayToZero: .1, DecayToZero: .1,
RetainScore: 5 * time.Minute, RetainScore: 60 * time.Minute,
AppSpecificScore: func(p peer.ID) float64 { AppSpecificScore: func(p peer.ID) float64 {
return float64(bs.GetPeerScore([]byte(p))) return float64(bs.GetPeerScore([]byte(p)))
}, },
@ -199,13 +209,13 @@ func NewBlossomSub(
params := mergeDefaults(p2pConfig) params := mergeDefaults(p2pConfig)
rt := blossomsub.NewBlossomSubRouter(h, params) rt := blossomsub.NewBlossomSubRouter(h, params)
ps, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...) pubsub, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...)
if err != nil { if err != nil {
panic(err) panic(err)
} }
peerID := h.ID() peerID := h.ID()
bs.ps = ps bs.ps = pubsub
bs.peerID = peerID bs.peerID = peerID
bs.h = h bs.h = h
bs.signKey = privKey bs.signKey = privKey

View File

@ -158,7 +158,9 @@ func (r *RPCServer) GetNodeInfo(
PeerId: peerID.String(), PeerId: peerID.String(),
MaxFrame: r.masterClock.GetFrame().GetFrameNumber(), MaxFrame: r.masterClock.GetFrame().GetFrameNumber(),
PeerScore: uint64(peerScore), PeerScore: uint64(peerScore),
Version: config.GetVersion(), Version: append(
append([]byte{}, config.GetVersion()...), config.GetPatchNumber(),
),
}, nil }, nil
} }

451
node/schema/rdf.go Normal file
View File

@ -0,0 +1,451 @@
package schema
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/deiu/rdf2go"
"github.com/pkg/errors"
)
type RDFParser interface {
Validate(document string) (bool, error)
}
type TurtleRDFParser struct {
}
type Field struct {
Name string
Type string
Size uint32
Comment string
Annotation string
RdfType string
Order int
ClassUrl rdf2go.Term
}
const RdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
const RdfsNS = "http://www.w3.org/2000/01/rdf-schema#"
const SchemaRepositoryNS = "https://types.quilibrium.com/schema-repository/"
const QCLNS = "https://types.quilibrium.com/qcl/"
const Prefix = "<%s>"
const TupleString = "%s%s"
const NTupleString = "<%s%s>"
var rdfTypeN = fmt.Sprintf(NTupleString, RdfNS, "type")
var rdfsClassN = fmt.Sprintf(NTupleString, RdfsNS, "Class")
var rdfsPropertyN = fmt.Sprintf(NTupleString, RdfsNS, "Property")
var rdfsDomainN = fmt.Sprintf(NTupleString, RdfsNS, "domain")
var rdfsRangeN = fmt.Sprintf(NTupleString, RdfsNS, "range")
var rdfsCommentN = fmt.Sprintf(NTupleString, RdfsNS, "comment")
var rdfType = fmt.Sprintf(TupleString, RdfNS, "type")
var rdfsClass = fmt.Sprintf(TupleString, RdfsNS, "Class")
var rdfsProperty = fmt.Sprintf(TupleString, RdfsNS, "Property")
var rdfsDomain = fmt.Sprintf(TupleString, RdfsNS, "domain")
var rdfsRange = fmt.Sprintf(TupleString, RdfsNS, "range")
var qclSize = fmt.Sprintf(TupleString, QCLNS, "size")
var qclOrder = fmt.Sprintf(TupleString, QCLNS, "order")
var rdfsComment = fmt.Sprintf(TupleString, RdfsNS, "comment")
var qclRdfTypeMap = map[string]string{
"Uint": "uint%d",
"Int": "int%d",
"ByteArray": "[%d]byte",
"Bool": "bool",
"Float": "float%d",
"String": "string",
"Struct": "struct",
}
func (t *TurtleRDFParser) Validate(document string) (bool, error) {
g := rdf2go.NewGraph("https://types.quilibrium.com/schema-repository/")
reader := strings.NewReader(document)
err := g.Parse(reader, "text/turtle")
if err != nil {
return false, errors.Wrap(err, "validate")
}
return true, nil
}
func (t *TurtleRDFParser) GenerateQCL(document string) (string, error) {
g := rdf2go.NewGraph("https://types.quilibrium.com/schema-repository/")
reader := strings.NewReader(document)
err := g.Parse(reader, "text/turtle")
if err != nil {
return "", errors.Wrap(err, "validate")
}
prefixMap := make(map[string]string)
for _, line := range strings.Split(document, "\n") {
parts := strings.Split(line, " ")
switch parts[0] {
case "PREFIX":
if len(parts) != 3 {
return "", errors.Wrap(err, "invalid PREFIX line")
}
prefixMap[strings.Trim(parts[2], "<>")] = parts[1]
}
}
iter := g.IterTriples()
classes := []string{}
classTerms := []rdf2go.Term{}
classUrls := []string{}
fields := make(map[string]map[string]*Field)
for a := range iter {
if a.Predicate.String() == rdfTypeN &&
a.Object.String() == rdfsClassN {
subj := a.Subject.RawValue()
parts := strings.Split(subj, "#")
className := parts[len(parts)-1]
parts = strings.Split(className, "/")
className = parts[len(parts)-1]
classUrl := subj[:len(subj)-len(className)]
classes = append(classes, className)
classUrls = append(classUrls, classUrl)
classTerms = append(classTerms, a.Subject)
}
}
for i, c := range classTerms {
for _, prop := range g.All(nil, rdf2go.NewResource(rdfsRange), c) {
subj := prop.Subject.RawValue()
parts := strings.Split(subj, "#")
className := parts[len(parts)-1]
parts = strings.Split(className, "/")
className = parts[len(parts)-1]
classUrl := subj[:len(subj)-len(className)]
if _, ok := fields[classes[i]]; !ok {
fields[classes[i]] = make(map[string]*Field)
}
fields[classes[i]][className] = &Field{
Name: className,
ClassUrl: prop.Subject,
Annotation: prefixMap[classUrl] + className,
Order: -1,
}
}
}
for _, class := range fields {
for fieldName, field := range class {
// scan the types
for _, prop := range g.All(field.ClassUrl, rdf2go.NewResource(
rdfsDomain,
), nil) {
obj := prop.Object.RawValue()
parts := strings.Split(obj, "#")
className := parts[len(parts)-1]
parts = strings.Split(className, "/")
className = parts[len(parts)-1]
classUrl := obj[:len(obj)-len(className)]
switch classUrl {
case QCLNS:
field.Type = qclRdfTypeMap[className]
for _, sprop := range g.All(field.ClassUrl, rdf2go.NewResource(
qclSize,
), nil) {
sobj := sprop.Object.RawValue()
parts := strings.Split(sobj, "#")
size := parts[len(parts)-1]
parts = strings.Split(size, "/")
size = parts[len(parts)-1]
s, err := strconv.Atoi(size)
fieldSize := s
if className != "String" && className != "ByteArray" && className != "Struct" {
fieldSize *= 8
}
if err != nil || s < 1 {
return "", errors.Wrap(
fmt.Errorf(
"invalid size for %s: %s",
fieldName,
size,
),
"generate qcl",
)
}
if strings.Contains(field.Type, "%") {
field.Type = fmt.Sprintf(field.Type, fieldSize)
}
field.RdfType = className
field.Size = uint32(s)
}
if strings.Contains(field.Type, "%d") {
return "", errors.Wrap(
fmt.Errorf(
"size unspecified for %s, add a qcl:size predicate",
fieldName,
),
"generate qcl",
)
}
case RdfsNS:
if className != "Literal" {
return "", errors.Wrap(
fmt.Errorf(
"invalid property type for %s: %s",
fieldName,
className,
),
"generate qcl",
)
}
field.Type = className
default:
field.Type = "hypergraph.Extrinsic"
field.Annotation += ",extrinsic=" + prefixMap[classUrl] + className
field.Size = 32
field.RdfType = "Struct"
}
break
}
for _, sprop := range g.All(field.ClassUrl, rdf2go.NewResource(
qclOrder,
), nil) {
sobj := sprop.Object.RawValue()
parts := strings.Split(sobj, "#")
order := parts[len(parts)-1]
parts = strings.Split(order, "/")
order = parts[len(parts)-1]
o, err := strconv.Atoi(order)
fieldOrder := o
if err != nil || o < 0 {
return "", errors.Wrap(
fmt.Errorf(
"invalid order for %s: %s",
fieldName,
order,
),
"generate qcl",
)
}
field.Order = fieldOrder
}
if field.Order < 0 {
return "", errors.Wrap(
fmt.Errorf(
"field order unspecified for %s, add a qcl:order predicate",
fieldName,
),
"generate qcl",
)
}
for _, prop := range g.All(field.ClassUrl, rdf2go.NewResource(
rdfsComment,
), nil) {
field.Comment = prop.Object.String()
}
}
}
output := "package main\n\n"
sort.Slice(classes, func(i, j int) bool {
return strings.Compare(classes[i], classes[j]) < 0
})
for _, class := range classes {
output += fmt.Sprintf("type %s struct {\n", class)
sortedFields := []*Field{}
for _, field := range fields[class] {
sortedFields = append(sortedFields, field)
}
sort.Slice(sortedFields, func(i, j int) bool {
return sortedFields[i].Order < sortedFields[j].Order
})
for _, field := range sortedFields {
if field.Comment != "" {
output += fmt.Sprintf(" // %s\n", field.Comment)
}
output += fmt.Sprintf(
" %s %s `rdf:\"%s\"`\n",
field.Name,
field.Type,
field.Annotation,
)
}
output += "}\n\n"
}
for _, class := range classes {
totalSize := uint32(0)
for _, field := range fields[class] {
totalSize += field.Size
}
output += fmt.Sprintf(
"func Unmarshal%s(payload [%d]byte) %s {\n result := %s{}\n",
class,
totalSize,
class,
class,
)
s := uint32(0)
sortedFields := []*Field{}
for _, field := range fields[class] {
sortedFields = append(sortedFields, field)
}
sort.Slice(sortedFields, func(i, j int) bool {
return sortedFields[i].Order < sortedFields[j].Order
})
for _, field := range sortedFields {
sizedType := ""
switch field.RdfType {
case "Uint":
sizedType = fmt.Sprintf(
"binary.GetUint(payload[%d:%d])",
s,
s+field.Size,
)
s += field.Size
case "Int":
sizedType = fmt.Sprintf(
"int%d(binary.GetUint(payload[%d:%d]))",
field.Size,
s,
s+field.Size,
)
s += field.Size
case "ByteArray":
sizedType = fmt.Sprintf(
"payload[%d:%d]",
s,
s+field.Size,
)
s += field.Size
case "Bool":
sizedType = "bool"
s++
case "Float":
sizedType = fmt.Sprintf(
"payload[%d:%d]",
s,
s+field.Size,
)
s += field.Size
case "String":
sizedType = fmt.Sprintf(
"string(payload[%d:%d])",
s,
s+field.Size,
)
s += field.Size
case "Struct":
sizedType = fmt.Sprintf(
"hypergraph.Extrinsic{}\n result.%s.Ref = payload[%d:%d]",
field.Name,
s,
s+field.Size,
)
s += field.Size
}
output += fmt.Sprintf(
" result.%s = %s\n",
field.Name,
sizedType,
)
}
output += " return result\n}\n\n"
}
for _, class := range classes {
totalSize := uint32(0)
for _, field := range fields[class] {
totalSize += field.Size
}
output += fmt.Sprintf(
"func Marshal%s(obj %s) [%d]byte {\n",
class,
class,
totalSize,
)
s := uint32(0)
sortedFields := []*Field{}
for _, field := range fields[class] {
sortedFields = append(sortedFields, field)
}
sort.Slice(sortedFields, func(i, j int) bool {
return sortedFields[i].Order < sortedFields[j].Order
})
output += fmt.Sprintf(" buf := make([]byte, %d)\n", totalSize)
for _, field := range sortedFields {
sizedType := ""
switch field.RdfType {
case "Uint":
sizedType = fmt.Sprintf(
"binary.PutUint(buf, %d, obj.%s)",
s,
field.Name,
)
s += field.Size
case "Int":
sizedType = fmt.Sprintf(
"binary.PutInt(buf, %d, obj.%s)",
s,
field.Name,
)
s += field.Size
case "ByteArray":
sizedType = fmt.Sprintf(
"copy(buf[%d:%d], obj.%s)",
s,
s+field.Size,
field.Name,
)
s += field.Size
case "Bool":
sizedType = fmt.Sprintf(
"if obj.%s { buf[%d] = 0xff } else { buf[%d] = 0x00 }",
field.Name,
s,
s,
)
s++
case "Float":
sizedType = fmt.Sprintf(
"copy(buf[%d:%d], obj.%s)",
s,
s+field.Size,
field.Name,
)
s += field.Size
case "String":
sizedType = fmt.Sprintf(
"copy(buf[%d:%d], []byte(obj.%s))",
s,
s+field.Size,
field.Name,
)
s += field.Size
case "Struct":
sizedType = fmt.Sprintf(
"copy(buf[%d:%d], obj.%s.Ref)",
s,
s+field.Size,
field.Name,
)
s += field.Size
}
output += fmt.Sprintf(
" %s\n",
sizedType,
)
}
output += " return buf\n}\n\n"
}
return output, nil
}

View File

@ -1,6 +1,7 @@
package store package store
import ( import (
"bytes"
"errors" "errors"
"io" "io"
"math/rand" "math/rand"
@ -92,6 +93,24 @@ func (i *InMemKVDBIterator) Next() bool {
return found return found
} }
func (i *InMemKVDBIterator) Last() bool {
if !i.open {
return false
}
i.db.storeMx.Lock()
found := false
final := sort.SearchStrings(i.db.sortedKeys, string(i.end))
if len(i.db.sortedKeys) == final ||
!bytes.Equal([]byte(i.db.sortedKeys[final]), i.end) {
final--
}
i.pos = final
found = true
i.db.storeMx.Unlock()
return found
}
func (i *InMemKVDBIterator) Prev() bool { func (i *InMemKVDBIterator) Prev() bool {
if !i.open { if !i.open {
return false return false
@ -162,6 +181,26 @@ func (i *InMemKVDBIterator) SeekLT(lt []byte) bool {
return found return found
} }
func (t *InMemKVDBTransaction) Get(key []byte) ([]byte, io.Closer, error) {
if !t.db.open {
return nil, nil, errors.New("inmem db closed")
}
for _, c := range t.changes {
if bytes.Equal(c.key, key) {
return c.value, io.NopCloser(nil), nil
}
}
t.db.storeMx.Lock()
b, ok := t.db.store[string(key)]
t.db.storeMx.Unlock()
if !ok {
return nil, nil, pebble.ErrNotFound
}
return b, io.NopCloser(nil), nil
}
func (t *InMemKVDBTransaction) Set(key []byte, value []byte) error { func (t *InMemKVDBTransaction) Set(key []byte, value []byte) error {
if !t.db.open { if !t.db.open {
return errors.New("inmem db closed") return errors.New("inmem db closed")
@ -212,6 +251,23 @@ func (t *InMemKVDBTransaction) Delete(key []byte) error {
return nil return nil
} }
func (t *InMemKVDBTransaction) NewIter(lowerBound []byte, upperBound []byte) (
Iterator,
error,
) {
if !t.db.open {
return nil, errors.New("inmem db closed")
}
return &InMemKVDBIterator{
open: true,
db: t.db,
start: lowerBound,
end: upperBound,
pos: -1,
}, nil
}
func (t *InMemKVDBTransaction) Abort() error { func (t *InMemKVDBTransaction) Abort() error {
return nil return nil
} }

View File

@ -11,6 +11,7 @@ type Iterator interface {
Value() []byte Value() []byte
Close() error Close() error
SeekLT([]byte) bool SeekLT([]byte) bool
Last() bool
} }
type TypedIterator[T proto.Message] interface { type TypedIterator[T proto.Message] interface {

View File

@ -88,16 +88,22 @@ func (p *PebbleDB) CompactAll() error {
var _ KVDB = (*PebbleDB)(nil) var _ KVDB = (*PebbleDB)(nil)
type Transaction interface { type Transaction interface {
Get(key []byte) ([]byte, io.Closer, error)
Set(key []byte, value []byte) error Set(key []byte, value []byte) error
Commit() error Commit() error
Delete(key []byte) error Delete(key []byte) error
Abort() error Abort() error
NewIter(lowerBound []byte, upperBound []byte) (Iterator, error)
} }
type PebbleTransaction struct { type PebbleTransaction struct {
b *pebble.Batch b *pebble.Batch
} }
func (t *PebbleTransaction) Get(key []byte) ([]byte, io.Closer, error) {
return t.b.Get(key)
}
func (t *PebbleTransaction) Set(key []byte, value []byte) error { func (t *PebbleTransaction) Set(key []byte, value []byte) error {
return t.b.Set(key, value, &pebble.WriteOptions{Sync: true}) return t.b.Set(key, value, &pebble.WriteOptions{Sync: true})
} }
@ -114,6 +120,16 @@ func (t *PebbleTransaction) Abort() error {
return t.b.Close() return t.b.Close()
} }
func (t *PebbleTransaction) NewIter(lowerBound []byte, upperBound []byte) (
Iterator,
error,
) {
return t.b.NewIter(&pebble.IterOptions{
LowerBound: lowerBound,
UpperBound: upperBound,
})
}
var _ Transaction = (*PebbleTransaction)(nil) var _ Transaction = (*PebbleTransaction)(nil)
func rightAlign(data []byte, size int) []byte { func rightAlign(data []byte, size int) []byte {

318
node/store/peerstore.go Normal file
View File

@ -0,0 +1,318 @@
package store
import (
"context"
"github.com/cockroachdb/pebble"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
"github.com/pkg/errors"
)
// shim structs for go-datastore
type batch struct {
b *transaction
db KVDB
}
type transaction struct {
tx Transaction
}
type PeerstoreDatastore struct {
db KVDB
}
const (
PEERSTORE = 0x06
)
type Peerstore interface {
ds.TxnDatastore
ds.PersistentDatastore
ds.Batching
}
var _ ds.Datastore = (*PeerstoreDatastore)(nil)
var _ ds.TxnDatastore = (*PeerstoreDatastore)(nil)
var _ ds.Txn = (*transaction)(nil)
var _ ds.PersistentDatastore = (*PeerstoreDatastore)(nil)
var _ ds.Batching = (*PeerstoreDatastore)(nil)
var _ ds.Batch = (*batch)(nil)
var _ Peerstore = (*PeerstoreDatastore)(nil)
func NewPeerstoreDatastore(db KVDB) (*PeerstoreDatastore, error) {
ds := PeerstoreDatastore{
db: db,
}
return &ds, nil
}
func (d *PeerstoreDatastore) Put(
ctx context.Context,
key ds.Key,
value []byte,
) (err error) {
return d.db.Set(
append([]byte{PEERSTORE}, key.Bytes()...),
value,
)
}
func (d *PeerstoreDatastore) Sync(ctx context.Context, prefix ds.Key) error {
return nil
}
func (d *PeerstoreDatastore) Get(
ctx context.Context,
key ds.Key,
) (value []byte, err error) {
val, closer, err := d.db.Get(append([]byte{PEERSTORE}, key.Bytes()...))
if err != nil {
if err == pebble.ErrNotFound {
return nil, ds.ErrNotFound
}
return nil, err
}
out := make([]byte, len(val))
copy(out[:], val[:])
closer.Close()
return val, nil
}
func (d *PeerstoreDatastore) Has(
ctx context.Context,
key ds.Key,
) (exists bool, err error) {
if _, err := d.Get(ctx, key); err != nil {
if err == ds.ErrNotFound {
return false, nil
}
return false, errors.Wrap(err, "has")
}
return true, nil
}
func (d *PeerstoreDatastore) GetSize(
ctx context.Context,
key ds.Key,
) (size int, err error) {
return ds.GetBackedSize(ctx, d, key)
}
func (d *PeerstoreDatastore) Delete(
ctx context.Context,
key ds.Key,
) (err error) {
return d.db.Delete(append([]byte{PEERSTORE}, key.Bytes()...))
}
func (d *PeerstoreDatastore) Query(ctx context.Context, q dsq.Query) (
dsq.Results,
error,
) {
rnge := []byte{PEERSTORE}
qNaive := q
prefix := ds.NewKey(q.Prefix).String()
if prefix != "/" {
rnge = append(rnge, []byte(prefix+"/")...)
qNaive.Prefix = ""
}
i, err := d.db.NewIter(rnge, nil)
if err != nil {
return nil, errors.Wrap(err, "query")
}
next := i.Next
if len(q.Orders) > 0 {
switch q.Orders[0].(type) {
case dsq.OrderByKey, *dsq.OrderByKey:
qNaive.Orders = nil
i.First()
case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending:
next = func() bool {
next = i.Prev
return i.Last()
}
qNaive.Orders = nil
default:
i.First()
}
} else {
i.First()
}
r := dsq.ResultsFromIterator(q, dsq.Iterator{
Next: func() (dsq.Result, bool) {
if !next() {
return dsq.Result{}, false
}
k := string(i.Key()[1:])
e := dsq.Entry{Key: k, Size: len(i.Value())}
if !q.KeysOnly {
buf := make([]byte, len(i.Value()))
copy(buf, i.Value())
e.Value = buf
}
return dsq.Result{Entry: e}, true
},
Close: func() error {
return i.Close()
},
})
return dsq.NaiveQueryApply(qNaive, r), nil
}
// TODO: get disk usage of peerstore later
func (d *PeerstoreDatastore) DiskUsage(ctx context.Context) (uint64, error) {
return 0, nil
}
// Closing is not done here:
func (d *PeerstoreDatastore) Close() (err error) {
return nil
}
func (d *PeerstoreDatastore) Batch(ctx context.Context) (ds.Batch, error) {
return &batch{
b: &transaction{tx: d.db.NewBatch()},
db: d.db,
}, nil
}
func (d *PeerstoreDatastore) NewTransaction(
ctx context.Context,
readOnly bool,
) (ds.Txn, error) {
tx := d.db.NewBatch()
return &transaction{tx}, nil
}
func (b *batch) Put(ctx context.Context, key ds.Key, value []byte) error {
b.b.Put(ctx, key, value)
return nil
}
func (b *batch) Commit(ctx context.Context) error {
return b.b.Commit(ctx)
}
func (b *batch) Delete(ctx context.Context, key ds.Key) error {
b.b.Delete(ctx, key)
return nil
}
func (t *transaction) Commit(ctx context.Context) error {
return t.tx.Commit()
}
func (t *transaction) Discard(ctx context.Context) {
t.tx.Abort()
}
func (t *transaction) Get(
ctx context.Context,
key ds.Key,
) (value []byte, err error) {
b, closer, err := t.tx.Get(append([]byte{PEERSTORE}, key.Bytes()...))
if err != nil {
if err == pebble.ErrNotFound {
return nil, ds.ErrNotFound
}
return nil, errors.Wrap(err, "get")
}
out := make([]byte, len(b))
copy(out[:], b[:])
closer.Close()
return b, nil
}
func (t *transaction) Put(ctx context.Context, key ds.Key, value []byte) error {
return t.tx.Set(append([]byte{PEERSTORE}, key.Bytes()...), value)
}
func (t *transaction) Has(ctx context.Context, key ds.Key) (
exists bool,
err error,
) {
if _, err := t.Get(ctx, key); err != nil {
if errors.Is(err, ErrNotFound) {
return false, nil
}
return false, errors.Wrap(err, "has")
}
return true, nil
}
func (t *transaction) GetSize(
ctx context.Context,
key ds.Key,
) (size int, err error) {
return ds.GetBackedSize(ctx, t, key)
}
func (t *transaction) Delete(ctx context.Context, key ds.Key) (err error) {
return t.tx.Delete(append([]byte{PEERSTORE}, key.Bytes()...))
}
func (t *transaction) Query(ctx context.Context, q dsq.Query) (
dsq.Results,
error,
) {
rnge := []byte{PEERSTORE}
qNaive := q
prefix := ds.NewKey(q.Prefix).String()
if prefix != "/" {
rnge = append(rnge, []byte(prefix+"/")...)
qNaive.Prefix = ""
}
i, err := t.tx.NewIter(rnge, nil)
if err != nil {
return nil, errors.Wrap(err, "query")
}
next := i.Next
if len(q.Orders) > 0 {
switch q.Orders[0].(type) {
case dsq.OrderByKey, *dsq.OrderByKey:
qNaive.Orders = nil
case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending:
next = func() bool {
next = i.Prev
return i.Last()
}
qNaive.Orders = nil
default:
}
}
r := dsq.ResultsFromIterator(q, dsq.Iterator{
Next: func() (dsq.Result, bool) {
if !next() {
return dsq.Result{}, false
}
k := string(i.Key()[1:])
e := dsq.Entry{Key: k, Size: len(i.Value())}
if !q.KeysOnly {
buf := make([]byte, len(i.Value()))
copy(buf, i.Value())
e.Value = buf
}
return dsq.Result{Entry: e}, true
},
Close: func() error {
return i.Close()
},
})
return dsq.NaiveQueryApply(qNaive, r), nil
}