2023-09-03 23:47:09 +00:00
|
|
|
|
package master
|
|
|
|
|
|
|
|
|
|
import (
|
2024-03-13 01:28:48 +00:00
|
|
|
|
"bytes"
|
2024-03-01 07:12:31 +00:00
|
|
|
|
"context"
|
2024-05-27 05:10:15 +00:00
|
|
|
|
gcrypto "crypto"
|
2024-03-13 01:28:48 +00:00
|
|
|
|
"crypto/rand"
|
2024-05-25 05:22:50 +00:00
|
|
|
|
"encoding/binary"
|
2023-09-03 23:47:09 +00:00
|
|
|
|
"encoding/hex"
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
"fmt"
|
2024-03-13 01:28:48 +00:00
|
|
|
|
"io"
|
2024-03-04 03:20:24 +00:00
|
|
|
|
"math/big"
|
2023-09-03 23:47:09 +00:00
|
|
|
|
"sync"
|
|
|
|
|
"time"
|
|
|
|
|
|
2024-05-27 05:10:15 +00:00
|
|
|
|
"github.com/iden3/go-iden3-crypto/poseidon"
|
|
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2024-03-13 01:28:48 +00:00
|
|
|
|
"github.com/mr-tron/base58"
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
"github.com/multiformats/go-multiaddr"
|
|
|
|
|
mn "github.com/multiformats/go-multiaddr/net"
|
2023-09-03 23:47:09 +00:00
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
|
"go.uber.org/zap"
|
2024-06-08 11:32:45 +00:00
|
|
|
|
"golang.org/x/crypto/sha3"
|
2024-03-01 07:12:31 +00:00
|
|
|
|
"google.golang.org/grpc"
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
"google.golang.org/grpc/credentials/insecure"
|
2023-09-03 23:47:09 +00:00
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
2024-02-13 07:04:56 +00:00
|
|
|
|
qtime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
|
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
2023-09-03 23:47:09 +00:00
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/execution"
|
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
2023-09-09 23:45:47 +00:00
|
|
|
|
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
2023-09-03 23:47:09 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type SyncStatusType int
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
SyncStatusNotSyncing = iota
|
|
|
|
|
SyncStatusAwaitingResponse
|
|
|
|
|
SyncStatusSynchronizing
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type MasterClockConsensusEngine struct {
|
2024-03-01 07:12:31 +00:00
|
|
|
|
*protobufs.UnimplementedValidationServiceServer
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
difficulty uint32
|
|
|
|
|
logger *zap.Logger
|
|
|
|
|
state consensus.EngineState
|
|
|
|
|
pubSub p2p.PubSub
|
|
|
|
|
keyManager keys.KeyManager
|
2024-06-08 11:32:45 +00:00
|
|
|
|
dataProver crypto.InclusionProver
|
2024-02-13 07:04:56 +00:00
|
|
|
|
frameProver crypto.FrameProver
|
2023-09-03 23:47:09 +00:00
|
|
|
|
lastFrameReceivedAt time.Time
|
|
|
|
|
|
2024-03-12 07:45:20 +00:00
|
|
|
|
frameChan chan *protobufs.ClockFrame
|
|
|
|
|
executionEngines map[string]execution.ExecutionEngine
|
|
|
|
|
filter []byte
|
|
|
|
|
input []byte
|
|
|
|
|
syncingStatus SyncStatusType
|
|
|
|
|
syncingTarget []byte
|
|
|
|
|
engineMx sync.Mutex
|
|
|
|
|
seenFramesMx sync.Mutex
|
|
|
|
|
historicFramesMx sync.Mutex
|
|
|
|
|
seenFrames []*protobufs.ClockFrame
|
|
|
|
|
historicFrames []*protobufs.ClockFrame
|
2024-06-08 11:32:45 +00:00
|
|
|
|
dataProofStore store.DataProofStore
|
2024-03-12 07:45:20 +00:00
|
|
|
|
clockStore store.ClockStore
|
|
|
|
|
masterTimeReel *qtime.MasterTimeReel
|
2024-03-21 07:14:45 +00:00
|
|
|
|
peerInfoManager p2p.PeerInfoManager
|
2024-03-12 07:45:20 +00:00
|
|
|
|
report *protobufs.SelfTestReport
|
2024-03-13 01:28:48 +00:00
|
|
|
|
frameValidationCh chan *protobufs.ClockFrame
|
|
|
|
|
bandwidthTestCh chan []byte
|
2024-05-27 05:10:15 +00:00
|
|
|
|
verifyTestCh chan verifyChallenge
|
2024-03-12 07:45:20 +00:00
|
|
|
|
currentReceivingSyncPeers int
|
|
|
|
|
currentReceivingSyncPeersMx sync.Mutex
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
engineConfig *config.EngineConfig
|
2023-09-03 23:47:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var _ consensus.ConsensusEngine = (*MasterClockConsensusEngine)(nil)
|
|
|
|
|
|
|
|
|
|
func NewMasterClockConsensusEngine(
|
|
|
|
|
engineConfig *config.EngineConfig,
|
|
|
|
|
logger *zap.Logger,
|
2024-06-08 11:32:45 +00:00
|
|
|
|
dataProofStore store.DataProofStore,
|
2023-09-09 23:45:47 +00:00
|
|
|
|
clockStore store.ClockStore,
|
2023-09-03 23:47:09 +00:00
|
|
|
|
keyManager keys.KeyManager,
|
|
|
|
|
pubSub p2p.PubSub,
|
2024-06-08 11:32:45 +00:00
|
|
|
|
dataProver crypto.InclusionProver,
|
2024-02-13 07:04:56 +00:00
|
|
|
|
frameProver crypto.FrameProver,
|
|
|
|
|
masterTimeReel *qtime.MasterTimeReel,
|
2024-03-21 07:14:45 +00:00
|
|
|
|
peerInfoManager p2p.PeerInfoManager,
|
2024-03-01 07:12:31 +00:00
|
|
|
|
report *protobufs.SelfTestReport,
|
2023-09-03 23:47:09 +00:00
|
|
|
|
) *MasterClockConsensusEngine {
|
|
|
|
|
if logger == nil {
|
|
|
|
|
panic(errors.New("logger is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if engineConfig == nil {
|
|
|
|
|
panic(errors.New("engine config is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if keyManager == nil {
|
|
|
|
|
panic(errors.New("key manager is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if pubSub == nil {
|
|
|
|
|
panic(errors.New("pubsub is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
if dataProver == nil {
|
|
|
|
|
panic(errors.New("data prover is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-13 07:04:56 +00:00
|
|
|
|
if frameProver == nil {
|
|
|
|
|
panic(errors.New("frame prover is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if masterTimeReel == nil {
|
|
|
|
|
panic(errors.New("master time reel is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(errors.New("genesis seed is nil"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e := &MasterClockConsensusEngine{
|
2024-06-08 11:32:45 +00:00
|
|
|
|
difficulty: 100000,
|
2023-09-03 23:47:09 +00:00
|
|
|
|
logger: logger,
|
|
|
|
|
state: consensus.EngineStateStopped,
|
|
|
|
|
keyManager: keyManager,
|
|
|
|
|
pubSub: pubSub,
|
|
|
|
|
executionEngines: map[string]execution.ExecutionEngine{},
|
2024-01-03 07:31:42 +00:00
|
|
|
|
frameChan: make(chan *protobufs.ClockFrame),
|
2023-09-03 23:47:09 +00:00
|
|
|
|
input: seed,
|
|
|
|
|
lastFrameReceivedAt: time.Time{},
|
|
|
|
|
syncingStatus: SyncStatusNotSyncing,
|
2024-06-08 11:32:45 +00:00
|
|
|
|
dataProofStore: dataProofStore,
|
2023-09-09 23:45:47 +00:00
|
|
|
|
clockStore: clockStore,
|
2024-06-08 11:32:45 +00:00
|
|
|
|
dataProver: dataProver,
|
2024-02-13 07:04:56 +00:00
|
|
|
|
frameProver: frameProver,
|
|
|
|
|
masterTimeReel: masterTimeReel,
|
2024-03-21 07:14:45 +00:00
|
|
|
|
peerInfoManager: peerInfoManager,
|
2024-03-01 07:12:31 +00:00
|
|
|
|
report: report,
|
2024-03-27 08:50:52 +00:00
|
|
|
|
frameValidationCh: make(chan *protobufs.ClockFrame),
|
2024-03-13 01:28:48 +00:00
|
|
|
|
bandwidthTestCh: make(chan []byte),
|
2024-05-27 05:10:15 +00:00
|
|
|
|
verifyTestCh: make(chan verifyChallenge),
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
engineConfig: engineConfig,
|
2024-01-03 07:31:42 +00:00
|
|
|
|
}
|
|
|
|
|
|
2024-03-21 07:14:45 +00:00
|
|
|
|
e.addPeerManifestReport(e.pubSub.GetPeerID(), report)
|
2024-03-01 07:12:31 +00:00
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
if e.filter, err = hex.DecodeString(engineConfig.Filter); err != nil {
|
|
|
|
|
panic(errors.Wrap(err, "could not parse filter value"))
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-27 05:10:15 +00:00
|
|
|
|
e.getProvingKey(engineConfig)
|
|
|
|
|
|
|
|
|
|
if err := e.createCommunicationKeys(); err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
logger.Info("constructing consensus engine")
|
|
|
|
|
|
|
|
|
|
return e
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) Start() <-chan error {
|
2024-01-03 07:31:42 +00:00
|
|
|
|
e.logger.Info("starting master consensus engine")
|
2023-09-03 23:47:09 +00:00
|
|
|
|
e.state = consensus.EngineStateStarting
|
|
|
|
|
errChan := make(chan error)
|
|
|
|
|
|
2024-03-21 07:14:45 +00:00
|
|
|
|
e.peerInfoManager.Start()
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
e.state = consensus.EngineStateLoading
|
|
|
|
|
e.logger.Info("syncing last seen state")
|
|
|
|
|
|
2024-02-13 07:04:56 +00:00
|
|
|
|
err := e.masterTimeReel.Start()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
2023-09-09 23:45:47 +00:00
|
|
|
|
|
2024-02-13 07:04:56 +00:00
|
|
|
|
frame, err := e.masterTimeReel.Head()
|
|
|
|
|
if err != nil {
|
2023-09-09 23:45:47 +00:00
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-13 07:04:56 +00:00
|
|
|
|
e.buildHistoricFrameCache(frame)
|
2023-09-03 23:47:09 +00:00
|
|
|
|
|
2024-03-13 01:28:48 +00:00
|
|
|
|
go func() {
|
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case newFrame := <-e.frameValidationCh:
|
2024-03-29 07:20:23 +00:00
|
|
|
|
head, err := e.masterTimeReel.Head()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-27 05:10:15 +00:00
|
|
|
|
if head.FrameNumber > newFrame.FrameNumber ||
|
|
|
|
|
newFrame.FrameNumber-head.FrameNumber > 128 {
|
2024-03-29 07:20:23 +00:00
|
|
|
|
e.logger.Debug(
|
|
|
|
|
"frame out of range, ignoring",
|
|
|
|
|
zap.Uint64("number", newFrame.FrameNumber),
|
|
|
|
|
)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-13 01:28:48 +00:00
|
|
|
|
if err := e.frameProver.VerifyMasterClockFrame(newFrame); err != nil {
|
|
|
|
|
e.logger.Error("could not verify clock frame", zap.Error(err))
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-17 21:14:37 +00:00
|
|
|
|
e.masterTimeReel.Insert(newFrame, false)
|
2024-03-13 01:28:48 +00:00
|
|
|
|
case peerId := <-e.bandwidthTestCh:
|
|
|
|
|
e.performBandwidthTest(peerId)
|
2024-05-27 05:10:15 +00:00
|
|
|
|
case verifyTest := <-e.verifyTestCh:
|
|
|
|
|
e.performVerifyTest(verifyTest)
|
2024-03-13 01:28:48 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
e.logger.Info("subscribing to pubsub messages")
|
|
|
|
|
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
|
|
|
|
|
|
|
|
|
|
e.state = consensus.EngineStateCollecting
|
|
|
|
|
|
2024-03-01 07:12:31 +00:00
|
|
|
|
go func() {
|
|
|
|
|
server := grpc.NewServer(
|
|
|
|
|
grpc.MaxSendMsgSize(600*1024*1024),
|
|
|
|
|
grpc.MaxRecvMsgSize(600*1024*1024),
|
|
|
|
|
)
|
|
|
|
|
protobufs.RegisterValidationServiceServer(server, e)
|
|
|
|
|
|
|
|
|
|
if err := e.pubSub.StartDirectChannelListener(
|
|
|
|
|
e.pubSub.GetPeerID(),
|
|
|
|
|
"validation",
|
|
|
|
|
server,
|
|
|
|
|
); err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2023-09-10 23:29:17 +00:00
|
|
|
|
go func() {
|
|
|
|
|
for {
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"peers in store",
|
|
|
|
|
zap.Int("peer_store_count", e.pubSub.GetPeerstoreCount()),
|
|
|
|
|
zap.Int("network_peer_count", e.pubSub.GetNetworkPeersCount()),
|
|
|
|
|
)
|
|
|
|
|
time.Sleep(10 * time.Second)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2024-03-01 07:12:31 +00:00
|
|
|
|
go func() {
|
2024-03-13 01:28:48 +00:00
|
|
|
|
// Let it sit until we at least have a few more peers inbound
|
|
|
|
|
time.Sleep(30 * time.Second)
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
parallelism := e.report.Cores - 1
|
|
|
|
|
|
|
|
|
|
if parallelism < 3 {
|
|
|
|
|
panic("invalid system configuration, minimum system configuration must be four cores")
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
var clients []protobufs.DataIPCServiceClient
|
|
|
|
|
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
|
|
|
|
clients, err = e.createParallelDataClientsFromList()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
clients, err = e.createParallelDataClientsFromBaseMultiaddr(
|
|
|
|
|
int(parallelism),
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
increment, _, previousOutput, err :=
|
|
|
|
|
e.dataProofStore.GetLatestDataTimeProof(e.pubSub.GetPeerID())
|
|
|
|
|
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
panic(err)
|
|
|
|
|
}
|
2024-03-01 07:12:31 +00:00
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
prevIndex := -1
|
|
|
|
|
prevHashes := []byte{}
|
|
|
|
|
hashes := []byte{}
|
|
|
|
|
previousPreviousCommitment := []byte{}
|
|
|
|
|
previousCommitment := []byte{}
|
|
|
|
|
prevProofs := [][]byte{}
|
|
|
|
|
proofs := [][]byte{}
|
|
|
|
|
commitment := []byte{}
|
|
|
|
|
skipStore := false
|
|
|
|
|
|
|
|
|
|
if err != nil && errors.Is(err, store.ErrNotFound) {
|
|
|
|
|
e.logger.Info("no state found, starting from genesis")
|
|
|
|
|
increment = 0
|
|
|
|
|
rootFrame, err := e.clockStore.GetMasterClockFrame(e.filter, 0)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
previousCommitment = rootFrame.Output
|
|
|
|
|
} else {
|
|
|
|
|
e.logger.Info("state found", zap.Uint32("increment", increment))
|
|
|
|
|
_, _, previousCommitment, _ = GetOutputs(previousOutput)
|
|
|
|
|
skipStore = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
commitment = previousCommitment
|
|
|
|
|
|
|
|
|
|
input := []byte{}
|
|
|
|
|
input = append(input, e.pubSub.GetPeerID()...)
|
|
|
|
|
input = append(input, previousCommitment...)
|
|
|
|
|
proofs = e.PerformTimeProof(input, parallelism, increment, clients)
|
|
|
|
|
|
|
|
|
|
polySize := 128
|
|
|
|
|
if parallelism > 2048 {
|
|
|
|
|
polySize = 65536
|
|
|
|
|
} else if parallelism > 1024 {
|
|
|
|
|
polySize = 2048
|
|
|
|
|
} else if parallelism > 128 {
|
|
|
|
|
polySize = 1024
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-13 01:28:48 +00:00
|
|
|
|
for {
|
2024-03-08 05:05:04 +00:00
|
|
|
|
head, err := e.masterTimeReel.Head()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.report.MasterHeadFrame = head.FrameNumber
|
2024-05-27 05:10:15 +00:00
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
prevHashes = hashes
|
|
|
|
|
previousPreviousCommitment = previousCommitment
|
|
|
|
|
previousCommitment = commitment
|
|
|
|
|
hashes, commitment, prevIndex = e.PerformDataCommitment(
|
|
|
|
|
proofs,
|
|
|
|
|
int(parallelism),
|
|
|
|
|
uint64(polySize),
|
2024-05-25 05:22:50 +00:00
|
|
|
|
)
|
2024-06-08 11:32:45 +00:00
|
|
|
|
|
|
|
|
|
// PoMW requires two forms of proofs – time proofs of data, then execution
|
|
|
|
|
// proofs. In the multiproof case we also have a random selection portion
|
|
|
|
|
// of the execution proofs by issuing a challenge from the next proof,
|
|
|
|
|
// such that it generates a random choice of input from the prior. This
|
|
|
|
|
// allows recursive proof evaluation without requiring retention of all
|
|
|
|
|
// parallel proofs.
|
|
|
|
|
if len(prevProofs) != 0 {
|
|
|
|
|
if !skipStore {
|
|
|
|
|
e.report.Proof = []byte{}
|
|
|
|
|
e.report.Proof = binary.BigEndian.AppendUint32(
|
|
|
|
|
e.report.Proof,
|
|
|
|
|
uint32(prevIndex),
|
|
|
|
|
)
|
|
|
|
|
e.report.Increment = increment - 1
|
|
|
|
|
e.report.Challenge = previousPreviousCommitment
|
|
|
|
|
e.report.Proof = append(e.report.Proof, prevProofs[prevIndex]...)
|
|
|
|
|
|
|
|
|
|
p, err := e.dataProver.ProveRaw(
|
|
|
|
|
prevHashes,
|
|
|
|
|
prevIndex,
|
|
|
|
|
uint64(polySize),
|
|
|
|
|
)
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
output := SerializeOutput(
|
|
|
|
|
uint32(prevIndex),
|
|
|
|
|
prevProofs,
|
|
|
|
|
previousCommitment,
|
|
|
|
|
p,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
txn, err := e.dataProofStore.NewTransaction()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"storing proof",
|
|
|
|
|
zap.Uint32("increment", increment-1),
|
|
|
|
|
)
|
|
|
|
|
err = e.dataProofStore.PutDataTimeProof(
|
|
|
|
|
txn,
|
|
|
|
|
parallelism,
|
|
|
|
|
e.pubSub.GetPeerID(),
|
|
|
|
|
increment-1,
|
|
|
|
|
previousPreviousCommitment,
|
|
|
|
|
output,
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
if err := txn.Commit(); err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
e.logger.Info(
|
|
|
|
|
"broadcasting self-test info",
|
|
|
|
|
zap.Uint64("current_frame", e.report.MasterHeadFrame),
|
|
|
|
|
)
|
2024-05-25 05:22:50 +00:00
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
if err := e.publishMessage(e.filter, e.report); err != nil {
|
|
|
|
|
e.logger.Debug("error publishing message", zap.Error(err))
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
skipStore = false
|
|
|
|
|
}
|
2024-05-25 05:22:50 +00:00
|
|
|
|
}
|
2024-03-01 07:12:31 +00:00
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
increment++
|
|
|
|
|
input := []byte{}
|
|
|
|
|
input = append(input, e.pubSub.GetPeerID()...)
|
|
|
|
|
input = append(input, commitment...)
|
|
|
|
|
prevProofs = proofs
|
|
|
|
|
proofs = e.PerformTimeProof(input, parallelism, increment, clients)
|
2024-03-01 07:12:31 +00:00
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
go func() {
|
2024-02-13 07:04:56 +00:00
|
|
|
|
newFrameCh := e.masterTimeReel.NewFrameCh()
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
for e.state < consensus.EngineStateStopping {
|
|
|
|
|
var err error
|
2024-02-13 07:04:56 +00:00
|
|
|
|
select {
|
|
|
|
|
case frame := <-newFrameCh:
|
|
|
|
|
currentFrame := frame
|
|
|
|
|
latestFrame := frame
|
|
|
|
|
if latestFrame, err = e.collect(currentFrame); err != nil {
|
2023-09-03 23:47:09 +00:00
|
|
|
|
e.logger.Error("could not collect", zap.Error(err))
|
2024-01-03 07:31:42 +00:00
|
|
|
|
latestFrame = currentFrame
|
2024-02-13 07:04:56 +00:00
|
|
|
|
continue
|
2023-09-03 23:47:09 +00:00
|
|
|
|
}
|
|
|
|
|
if latestFrame, err = e.prove(latestFrame); err != nil {
|
|
|
|
|
e.logger.Error("could not prove", zap.Error(err))
|
2024-01-03 07:31:42 +00:00
|
|
|
|
latestFrame = currentFrame
|
2023-09-03 23:47:09 +00:00
|
|
|
|
}
|
|
|
|
|
if err = e.publishProof(latestFrame); err != nil {
|
|
|
|
|
e.logger.Error("could not publish", zap.Error(err))
|
|
|
|
|
}
|
2024-02-13 07:04:56 +00:00
|
|
|
|
case <-time.After(20 * time.Second):
|
|
|
|
|
frame, err := e.masterTimeReel.Head()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if frame, err = e.prove(frame); err != nil {
|
|
|
|
|
e.logger.Error("could not prove", zap.Error(err))
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if err = e.publishProof(frame); err != nil {
|
|
|
|
|
e.logger.Error("could not publish", zap.Error(err))
|
|
|
|
|
}
|
2023-09-03 23:47:09 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
|
errChan <- nil
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
return errChan
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
func SerializeOutput(
|
|
|
|
|
previousIndex uint32,
|
|
|
|
|
previousOutputs [][]byte,
|
|
|
|
|
kzgCommitment []byte,
|
|
|
|
|
kzgProof []byte,
|
|
|
|
|
) []byte {
|
|
|
|
|
serializedOutput := []byte{}
|
|
|
|
|
serializedOutput = binary.BigEndian.AppendUint32(
|
|
|
|
|
serializedOutput,
|
|
|
|
|
previousIndex,
|
|
|
|
|
)
|
|
|
|
|
serializedOutput = append(serializedOutput, previousOutputs[previousIndex]...)
|
|
|
|
|
serializedOutput = append(serializedOutput, kzgCommitment...)
|
|
|
|
|
serializedOutput = append(serializedOutput, kzgProof...)
|
|
|
|
|
return serializedOutput
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func GetOutputs(output []byte) (
|
|
|
|
|
index uint32,
|
|
|
|
|
indexProof []byte,
|
|
|
|
|
kzgCommitment []byte,
|
|
|
|
|
kzgProof []byte,
|
|
|
|
|
) {
|
|
|
|
|
index = binary.BigEndian.Uint32(output[:4])
|
|
|
|
|
indexProof = output[4:520]
|
|
|
|
|
kzgCommitment = output[520:594]
|
|
|
|
|
kzgProof = output[594:668]
|
|
|
|
|
return index, indexProof, kzgCommitment, kzgProof
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) PerformTimeProof(
|
|
|
|
|
challenge []byte,
|
|
|
|
|
parallelism uint32,
|
|
|
|
|
increment uint32,
|
|
|
|
|
clients []protobufs.DataIPCServiceClient,
|
|
|
|
|
) [][]byte {
|
|
|
|
|
proofs := make([][]byte, parallelism)
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
|
|
// Perform the VDFs:
|
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
|
wg.Add(int(parallelism))
|
|
|
|
|
|
|
|
|
|
for i := uint32(0); i < parallelism; i++ {
|
|
|
|
|
i := i
|
|
|
|
|
go func() {
|
2024-06-15 01:53:30 +00:00
|
|
|
|
for j := 3; j > 0; j-- {
|
|
|
|
|
resp, err :=
|
|
|
|
|
clients[i].CalculateChallengeProof(
|
|
|
|
|
context.Background(),
|
|
|
|
|
&protobufs.ChallengeProofRequest{
|
|
|
|
|
Challenge: challenge,
|
|
|
|
|
Core: i,
|
|
|
|
|
Increment: increment,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if j == 1 || len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
|
|
|
|
e.logger.Error(
|
|
|
|
|
"client failed, reconnecting after 50ms",
|
|
|
|
|
zap.Uint32("client", i),
|
|
|
|
|
)
|
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
|
clients[i], err = e.createParallelDataClientsFromListAndIndex(i)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
continue
|
|
|
|
|
}
|
2024-06-08 11:32:45 +00:00
|
|
|
|
|
2024-06-15 01:53:30 +00:00
|
|
|
|
proofs[i] = resp.Output
|
|
|
|
|
break
|
|
|
|
|
}
|
2024-06-08 11:32:45 +00:00
|
|
|
|
wg.Done()
|
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
wg.Wait()
|
|
|
|
|
since := time.Since(now)
|
|
|
|
|
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"completed duration proof",
|
|
|
|
|
zap.Uint32("increment", increment),
|
|
|
|
|
zap.Duration("time_taken", since),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return proofs
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) PerformDataCommitment(
|
|
|
|
|
proofs [][]byte,
|
|
|
|
|
parallelism int,
|
|
|
|
|
polySize uint64,
|
|
|
|
|
) ([]byte, []byte, int) {
|
|
|
|
|
// Take the VDF outputs and generate some deterministic outputs to feed
|
|
|
|
|
// into a KZG commitment:
|
|
|
|
|
output := []byte{}
|
|
|
|
|
for i := 0; i < len(proofs); i++ {
|
|
|
|
|
h := sha3.Sum512(proofs[i])
|
|
|
|
|
output = append(output, h[:]...)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nextInput, err := e.dataProver.CommitRaw(output, polySize)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inputHash := sha3.Sum256(nextInput)
|
|
|
|
|
inputHashBI := big.NewInt(0).SetBytes(inputHash[:])
|
|
|
|
|
prevIndex := int(inputHashBI.Mod(
|
|
|
|
|
inputHashBI,
|
|
|
|
|
big.NewInt(int64(parallelism)),
|
|
|
|
|
).Int64())
|
|
|
|
|
|
|
|
|
|
return output, nextInput, prevIndex
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-15 01:53:30 +00:00
|
|
|
|
func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
|
|
|
|
|
index uint32,
|
|
|
|
|
) (
|
|
|
|
|
protobufs.DataIPCServiceClient,
|
|
|
|
|
error,
|
|
|
|
|
) {
|
|
|
|
|
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index])
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_, addr, err := mn.DialArgs(ma)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
conn, err := grpc.Dial(
|
|
|
|
|
addr,
|
|
|
|
|
grpc.WithTransportCredentials(
|
|
|
|
|
insecure.NewCredentials(),
|
|
|
|
|
),
|
|
|
|
|
grpc.WithDefaultCallOptions(
|
|
|
|
|
grpc.MaxCallSendMsgSize(10*1024*1024),
|
|
|
|
|
grpc.MaxCallRecvMsgSize(10*1024*1024),
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
client := protobufs.NewDataIPCServiceClient(conn)
|
|
|
|
|
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"connected to data worker process",
|
|
|
|
|
zap.Uint32("client", index),
|
|
|
|
|
)
|
|
|
|
|
return client, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
func (e *MasterClockConsensusEngine) createParallelDataClientsFromList() (
|
|
|
|
|
[]protobufs.DataIPCServiceClient,
|
|
|
|
|
error,
|
|
|
|
|
) {
|
|
|
|
|
parallelism := len(e.engineConfig.DataWorkerMultiaddrs)
|
|
|
|
|
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"connecting to data worker processes",
|
|
|
|
|
zap.Int("parallelism", parallelism),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
clients := make([]protobufs.DataIPCServiceClient, parallelism)
|
|
|
|
|
|
|
|
|
|
for i := 0; i < parallelism; i++ {
|
|
|
|
|
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[i])
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_, addr, err := mn.DialArgs(ma)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
conn, err := grpc.Dial(
|
|
|
|
|
addr,
|
|
|
|
|
grpc.WithTransportCredentials(
|
|
|
|
|
insecure.NewCredentials(),
|
|
|
|
|
),
|
|
|
|
|
grpc.WithDefaultCallOptions(
|
|
|
|
|
grpc.MaxCallSendMsgSize(10*1024*1024),
|
|
|
|
|
grpc.MaxCallRecvMsgSize(10*1024*1024),
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clients[i] = protobufs.NewDataIPCServiceClient(conn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"connected to data worker processes",
|
|
|
|
|
zap.Int("parallelism", parallelism),
|
|
|
|
|
)
|
|
|
|
|
return clients, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
|
|
|
|
|
parallelism int,
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
) ([]protobufs.DataIPCServiceClient, error) {
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"connecting to data worker processes",
|
2024-06-08 11:32:45 +00:00
|
|
|
|
zap.Int("parallelism", parallelism),
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if e.engineConfig.DataWorkerBaseListenMultiaddr == "" {
|
|
|
|
|
e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if e.engineConfig.DataWorkerBaseListenPort == 0 {
|
|
|
|
|
e.engineConfig.DataWorkerBaseListenPort = 40000
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
clients := make([]protobufs.DataIPCServiceClient, parallelism)
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
|
2024-06-08 11:32:45 +00:00
|
|
|
|
for i := 0; i < parallelism; i++ {
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
ma, err := multiaddr.NewMultiaddr(
|
|
|
|
|
fmt.Sprintf(
|
|
|
|
|
e.engineConfig.DataWorkerBaseListenMultiaddr,
|
|
|
|
|
int(e.engineConfig.DataWorkerBaseListenPort)+i,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_, addr, err := mn.DialArgs(ma)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
conn, err := grpc.Dial(
|
|
|
|
|
addr,
|
|
|
|
|
grpc.WithTransportCredentials(
|
|
|
|
|
insecure.NewCredentials(),
|
|
|
|
|
),
|
|
|
|
|
grpc.WithDefaultCallOptions(
|
|
|
|
|
grpc.MaxCallSendMsgSize(10*1024*1024),
|
|
|
|
|
grpc.MaxCallRecvMsgSize(10*1024*1024),
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clients[i] = protobufs.NewDataIPCServiceClient(conn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.logger.Info(
|
|
|
|
|
"connected to data worker processes",
|
2024-06-08 11:32:45 +00:00
|
|
|
|
zap.Int("parallelism", parallelism),
|
v1.4.18-patch-2 (#230)
* feat: IPC for wesolowski
* update self peer info
* remove digests and signatures
* add new binaries and digests
* Signatory #13 added
* Signatory #4 added (#231)
* added sig.6 files (#232)
* Signatory #9 added (#233)
* Added signatories #1, #2, #3, #5, #8, #12, #14, #15, #16, #17
* remove binaries, release ready
---------
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-29 17:51:47 +00:00
|
|
|
|
)
|
|
|
|
|
return clients, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-01 07:12:31 +00:00
|
|
|
|
func (e *MasterClockConsensusEngine) PerformValidation(
|
|
|
|
|
ctx context.Context,
|
|
|
|
|
msg *protobufs.ValidationMessage,
|
|
|
|
|
) (*protobufs.ValidationMessage, error) {
|
|
|
|
|
return msg, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
func (e *MasterClockConsensusEngine) Stop(force bool) <-chan error {
|
|
|
|
|
e.logger.Info("stopping consensus engine")
|
|
|
|
|
e.state = consensus.EngineStateStopping
|
|
|
|
|
errChan := make(chan error)
|
|
|
|
|
|
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
|
wg.Add(len(e.executionEngines))
|
|
|
|
|
for name := range e.executionEngines {
|
|
|
|
|
name := name
|
|
|
|
|
go func(name string) {
|
2024-02-13 07:04:56 +00:00
|
|
|
|
frame, err := e.masterTimeReel.Head()
|
|
|
|
|
if err != nil {
|
|
|
|
|
errChan <- err
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = <-e.UnregisterExecutor(name, frame.FrameNumber, force)
|
2023-09-03 23:47:09 +00:00
|
|
|
|
if err != nil {
|
|
|
|
|
errChan <- err
|
|
|
|
|
}
|
|
|
|
|
wg.Done()
|
|
|
|
|
}(name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.logger.Info("waiting for execution engines to stop")
|
|
|
|
|
wg.Wait()
|
|
|
|
|
e.logger.Info("execution engines stopped")
|
2024-02-13 07:04:56 +00:00
|
|
|
|
e.masterTimeReel.Stop()
|
2024-03-21 07:14:45 +00:00
|
|
|
|
e.peerInfoManager.Stop()
|
2023-09-03 23:47:09 +00:00
|
|
|
|
|
|
|
|
|
e.state = consensus.EngineStateStopped
|
|
|
|
|
go func() {
|
|
|
|
|
errChan <- nil
|
|
|
|
|
}()
|
|
|
|
|
return errChan
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-27 05:10:15 +00:00
|
|
|
|
type verifyChallenge struct {
|
2024-06-08 11:32:45 +00:00
|
|
|
|
peerID []byte
|
|
|
|
|
challenge []byte
|
|
|
|
|
increment uint32
|
|
|
|
|
cores uint32
|
|
|
|
|
proof []byte
|
2024-05-27 05:10:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) performVerifyTest(
|
|
|
|
|
challenge verifyChallenge,
|
|
|
|
|
) {
|
|
|
|
|
if !e.frameProver.VerifyChallengeProof(
|
|
|
|
|
challenge.challenge,
|
2024-06-08 11:32:45 +00:00
|
|
|
|
challenge.increment,
|
|
|
|
|
binary.BigEndian.Uint32(challenge.proof[:4]),
|
|
|
|
|
challenge.proof[4:],
|
2024-05-27 05:10:15 +00:00
|
|
|
|
) {
|
|
|
|
|
e.logger.Warn(
|
|
|
|
|
"received invalid proof from peer",
|
|
|
|
|
zap.String("peer_id", peer.ID(challenge.peerID).String()),
|
|
|
|
|
)
|
|
|
|
|
e.pubSub.SetPeerScore(challenge.peerID, -1000)
|
|
|
|
|
} else {
|
|
|
|
|
e.logger.Debug(
|
|
|
|
|
"received valid proof from peer",
|
|
|
|
|
zap.String("peer_id", peer.ID(challenge.peerID).String()),
|
|
|
|
|
)
|
|
|
|
|
info := e.peerInfoManager.GetPeerInfo(challenge.peerID)
|
|
|
|
|
info.LastSeen = time.Now().UnixMilli()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-13 01:28:48 +00:00
|
|
|
|
func (e *MasterClockConsensusEngine) performBandwidthTest(peerID []byte) {
|
|
|
|
|
result := e.pubSub.GetMultiaddrOfPeer(peerID)
|
|
|
|
|
if result == "" {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cc, err := e.pubSub.GetDirectChannel(peerID, "validation")
|
|
|
|
|
if err != nil {
|
2024-03-24 08:11:00 +00:00
|
|
|
|
e.logger.Debug(
|
2024-03-23 20:26:57 +00:00
|
|
|
|
"could not connect to peer for validation",
|
2024-03-13 01:28:48 +00:00
|
|
|
|
zap.String("peer_id", base58.Encode(peerID)),
|
|
|
|
|
)
|
2024-03-23 20:26:57 +00:00
|
|
|
|
|
2024-03-13 01:28:48 +00:00
|
|
|
|
// tag: dusk – nuke this peer for now
|
|
|
|
|
e.pubSub.SetPeerScore(peerID, -1000)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
client := protobufs.NewValidationServiceClient(cc)
|
|
|
|
|
verification := make([]byte, 1048576)
|
|
|
|
|
rand.Read(verification)
|
|
|
|
|
start := time.Now().UnixMilli()
|
|
|
|
|
validation, err := client.PerformValidation(
|
|
|
|
|
context.Background(),
|
|
|
|
|
&protobufs.ValidationMessage{
|
|
|
|
|
Validation: verification,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
end := time.Now().UnixMilli()
|
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
|
cc.Close()
|
2024-03-24 08:11:00 +00:00
|
|
|
|
e.logger.Debug(
|
2024-03-13 01:28:48 +00:00
|
|
|
|
"peer returned error",
|
|
|
|
|
zap.String("peer_id", base58.Encode(peerID)),
|
|
|
|
|
zap.Error(err),
|
|
|
|
|
)
|
|
|
|
|
// tag: dusk – nuke this peer for now
|
|
|
|
|
e.pubSub.SetPeerScore(peerID, -1000)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
cc.Close()
|
|
|
|
|
|
|
|
|
|
if !bytes.Equal(verification, validation.Validation) {
|
2024-03-24 08:11:00 +00:00
|
|
|
|
e.logger.Debug(
|
2024-03-23 20:26:57 +00:00
|
|
|
|
"peer provided invalid verification",
|
2024-03-13 01:28:48 +00:00
|
|
|
|
zap.String("peer_id", base58.Encode(peerID)),
|
|
|
|
|
)
|
|
|
|
|
// tag: dusk – nuke this peer for now
|
|
|
|
|
e.pubSub.SetPeerScore(peerID, -1000)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if end-start > 2000 {
|
2024-03-24 08:11:00 +00:00
|
|
|
|
e.logger.Debug(
|
2024-03-23 20:26:57 +00:00
|
|
|
|
"peer has slow bandwidth, scoring out",
|
2024-03-13 01:28:48 +00:00
|
|
|
|
zap.String("peer_id", base58.Encode(peerID)),
|
|
|
|
|
)
|
|
|
|
|
// tag: dusk – nuke this peer for now
|
|
|
|
|
e.pubSub.SetPeerScore(peerID, -1000)
|
|
|
|
|
return
|
|
|
|
|
}
|
2024-03-21 07:14:45 +00:00
|
|
|
|
|
|
|
|
|
duration := end - start
|
|
|
|
|
bandwidth := uint64(1048576*1000) / uint64(duration)
|
|
|
|
|
manifest := e.peerInfoManager.GetPeerInfo(peerID)
|
|
|
|
|
if manifest == nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
peerManifest := &p2p.PeerManifest{
|
|
|
|
|
PeerId: peerID,
|
|
|
|
|
Difficulty: manifest.Difficulty,
|
|
|
|
|
DifficultyMetric: manifest.DifficultyMetric,
|
|
|
|
|
Commit_16Metric: manifest.Commit_16Metric,
|
|
|
|
|
Commit_128Metric: manifest.Commit_128Metric,
|
|
|
|
|
Commit_1024Metric: manifest.Commit_1024Metric,
|
|
|
|
|
Commit_65536Metric: manifest.Commit_65536Metric,
|
|
|
|
|
Proof_16Metric: manifest.Proof_16Metric,
|
|
|
|
|
Proof_128Metric: manifest.Proof_128Metric,
|
|
|
|
|
Proof_1024Metric: manifest.Proof_1024Metric,
|
|
|
|
|
Proof_65536Metric: manifest.Proof_65536Metric,
|
|
|
|
|
Cores: manifest.Cores,
|
|
|
|
|
Memory: manifest.Memory,
|
|
|
|
|
Storage: manifest.Storage,
|
|
|
|
|
Capabilities: []p2p.Capability{},
|
|
|
|
|
MasterHeadFrame: manifest.MasterHeadFrame,
|
|
|
|
|
Bandwidth: bandwidth,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, capability := range manifest.Capabilities {
|
|
|
|
|
metadata := make([]byte, len(capability.AdditionalMetadata))
|
|
|
|
|
copy(metadata[:], capability.AdditionalMetadata[:])
|
|
|
|
|
peerManifest.Capabilities = append(
|
|
|
|
|
peerManifest.Capabilities,
|
|
|
|
|
p2p.Capability{
|
|
|
|
|
ProtocolIdentifier: capability.ProtocolIdentifier,
|
|
|
|
|
AdditionalMetadata: metadata,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.peerInfoManager.AddPeerInfo(manifest)
|
2024-03-13 01:28:48 +00:00
|
|
|
|
}
|
|
|
|
|
|
2024-03-04 03:20:24 +00:00
|
|
|
|
func (
|
|
|
|
|
e *MasterClockConsensusEngine,
|
|
|
|
|
) GetPeerManifests() *protobufs.PeerManifestsResponse {
|
|
|
|
|
response := &protobufs.PeerManifestsResponse{
|
|
|
|
|
PeerManifests: []*protobufs.PeerManifest{},
|
|
|
|
|
}
|
2024-03-21 07:14:45 +00:00
|
|
|
|
peerMap := e.peerInfoManager.GetPeerMap()
|
|
|
|
|
for peerId, peerManifest := range peerMap {
|
2024-03-04 03:20:24 +00:00
|
|
|
|
peerId := peerId
|
|
|
|
|
peerManifest := peerManifest
|
|
|
|
|
manifest := &protobufs.PeerManifest{
|
|
|
|
|
PeerId: []byte(peerId),
|
|
|
|
|
Difficulty: peerManifest.Difficulty,
|
|
|
|
|
DifficultyMetric: peerManifest.DifficultyMetric,
|
|
|
|
|
Commit_16Metric: peerManifest.Commit_16Metric,
|
|
|
|
|
Commit_128Metric: peerManifest.Commit_128Metric,
|
|
|
|
|
Commit_1024Metric: peerManifest.Commit_1024Metric,
|
|
|
|
|
Commit_65536Metric: peerManifest.Commit_65536Metric,
|
|
|
|
|
Proof_16Metric: peerManifest.Proof_16Metric,
|
|
|
|
|
Proof_128Metric: peerManifest.Proof_128Metric,
|
|
|
|
|
Proof_1024Metric: peerManifest.Proof_1024Metric,
|
|
|
|
|
Proof_65536Metric: peerManifest.Proof_65536Metric,
|
|
|
|
|
Cores: peerManifest.Cores,
|
|
|
|
|
Memory: new(big.Int).SetBytes(peerManifest.Memory).Bytes(),
|
|
|
|
|
Storage: new(big.Int).SetBytes(peerManifest.Storage).Bytes(),
|
2024-03-08 05:05:04 +00:00
|
|
|
|
MasterHeadFrame: peerManifest.MasterHeadFrame,
|
2024-05-25 05:22:50 +00:00
|
|
|
|
LastSeen: peerManifest.LastSeen,
|
2024-03-04 03:20:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, capability := range peerManifest.Capabilities {
|
|
|
|
|
metadata := make([]byte, len(capability.AdditionalMetadata))
|
|
|
|
|
copy(metadata[:], capability.AdditionalMetadata[:])
|
|
|
|
|
manifest.Capabilities = append(
|
|
|
|
|
manifest.Capabilities,
|
|
|
|
|
&protobufs.Capability{
|
|
|
|
|
ProtocolIdentifier: capability.ProtocolIdentifier,
|
|
|
|
|
AdditionalMetadata: metadata,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
response.PeerManifests = append(
|
|
|
|
|
response.PeerManifests,
|
|
|
|
|
manifest,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
return response
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-03 23:47:09 +00:00
|
|
|
|
func (e *MasterClockConsensusEngine) GetDifficulty() uint32 {
|
|
|
|
|
return e.difficulty
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-03 07:31:42 +00:00
|
|
|
|
func (e *MasterClockConsensusEngine) GetFrame() *protobufs.ClockFrame {
|
2024-02-13 07:04:56 +00:00
|
|
|
|
frame, err := e.masterTimeReel.Head()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return frame
|
2023-09-03 23:47:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) GetState() consensus.EngineState {
|
|
|
|
|
return e.state
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-03 07:31:42 +00:00
|
|
|
|
func (
|
|
|
|
|
e *MasterClockConsensusEngine,
|
|
|
|
|
) GetFrameChannel() <-chan *protobufs.ClockFrame {
|
2023-09-03 23:47:09 +00:00
|
|
|
|
return e.frameChan
|
|
|
|
|
}
|
2024-01-03 07:31:42 +00:00
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) buildHistoricFrameCache(
|
|
|
|
|
latestFrame *protobufs.ClockFrame,
|
|
|
|
|
) {
|
|
|
|
|
e.historicFrames = []*protobufs.ClockFrame{}
|
|
|
|
|
|
|
|
|
|
if latestFrame.FrameNumber != 0 {
|
|
|
|
|
min := uint64(0)
|
|
|
|
|
if latestFrame.FrameNumber-255 > min && latestFrame.FrameNumber > 255 {
|
|
|
|
|
min = latestFrame.FrameNumber - 255
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iter, err := e.clockStore.RangeMasterClockFrames(
|
|
|
|
|
e.filter,
|
|
|
|
|
min,
|
|
|
|
|
latestFrame.FrameNumber-1,
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for iter.First(); iter.Valid(); iter.Next() {
|
|
|
|
|
frame, err := iter.Value()
|
|
|
|
|
if err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.historicFrames = append(e.historicFrames, frame)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err = iter.Close(); err != nil {
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.historicFrames = append(e.historicFrames, latestFrame)
|
|
|
|
|
}
|
2024-03-21 07:14:45 +00:00
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) addPeerManifestReport(
|
|
|
|
|
peerId []byte,
|
|
|
|
|
report *protobufs.SelfTestReport,
|
|
|
|
|
) {
|
|
|
|
|
manifest := &p2p.PeerManifest{
|
|
|
|
|
PeerId: peerId,
|
|
|
|
|
Difficulty: report.Difficulty,
|
|
|
|
|
DifficultyMetric: report.DifficultyMetric,
|
|
|
|
|
Commit_16Metric: report.Commit_16Metric,
|
|
|
|
|
Commit_128Metric: report.Commit_128Metric,
|
|
|
|
|
Commit_1024Metric: report.Commit_1024Metric,
|
|
|
|
|
Commit_65536Metric: report.Commit_65536Metric,
|
|
|
|
|
Proof_16Metric: report.Proof_16Metric,
|
|
|
|
|
Proof_128Metric: report.Proof_128Metric,
|
|
|
|
|
Proof_1024Metric: report.Proof_1024Metric,
|
|
|
|
|
Proof_65536Metric: report.Proof_65536Metric,
|
|
|
|
|
Cores: report.Cores,
|
|
|
|
|
Memory: report.Memory,
|
|
|
|
|
Storage: report.Storage,
|
|
|
|
|
Capabilities: []p2p.Capability{},
|
|
|
|
|
MasterHeadFrame: report.MasterHeadFrame,
|
2024-05-25 05:22:50 +00:00
|
|
|
|
LastSeen: time.Now().UnixMilli(),
|
2024-03-21 07:14:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, capability := range manifest.Capabilities {
|
|
|
|
|
metadata := make([]byte, len(capability.AdditionalMetadata))
|
|
|
|
|
copy(metadata[:], capability.AdditionalMetadata[:])
|
|
|
|
|
manifest.Capabilities = append(
|
|
|
|
|
manifest.Capabilities,
|
|
|
|
|
p2p.Capability{
|
|
|
|
|
ProtocolIdentifier: capability.ProtocolIdentifier,
|
|
|
|
|
AdditionalMetadata: metadata,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e.peerInfoManager.AddPeerInfo(manifest)
|
|
|
|
|
}
|
2024-05-27 05:10:15 +00:00
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) getProvingKey(
|
|
|
|
|
engineConfig *config.EngineConfig,
|
|
|
|
|
) (gcrypto.Signer, keys.KeyType, []byte, []byte) {
|
|
|
|
|
provingKey, err := e.keyManager.GetSigningKey(engineConfig.ProvingKeyId)
|
|
|
|
|
if errors.Is(err, keys.KeyNotFoundErr) {
|
|
|
|
|
e.logger.Info("could not get proving key, generating")
|
|
|
|
|
provingKey, err = e.keyManager.CreateSigningKey(
|
|
|
|
|
engineConfig.ProvingKeyId,
|
|
|
|
|
keys.KeyTypeEd448,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
e.logger.Error("could not get proving key", zap.Error(err))
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rawKey, err := e.keyManager.GetRawKey(engineConfig.ProvingKeyId)
|
|
|
|
|
if err != nil {
|
|
|
|
|
e.logger.Error("could not get proving key type", zap.Error(err))
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
provingKeyType := rawKey.Type
|
|
|
|
|
|
|
|
|
|
h, err := poseidon.HashBytes(rawKey.PublicKey)
|
|
|
|
|
if err != nil {
|
|
|
|
|
e.logger.Error("could not hash proving key", zap.Error(err))
|
|
|
|
|
panic(err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
provingKeyAddress := h.Bytes()
|
|
|
|
|
provingKeyAddress = append(
|
|
|
|
|
make([]byte, 32-len(provingKeyAddress)),
|
|
|
|
|
provingKeyAddress...,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return provingKey, provingKeyType, rawKey.PublicKey, provingKeyAddress
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (e *MasterClockConsensusEngine) createCommunicationKeys() error {
|
|
|
|
|
_, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
|
|
|
|
|
if err != nil {
|
|
|
|
|
if errors.Is(err, keys.KeyNotFoundErr) {
|
|
|
|
|
_, err = e.keyManager.CreateAgreementKey(
|
|
|
|
|
"q-ratchet-idk",
|
|
|
|
|
keys.KeyTypeX448,
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "create communication keys")
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
return errors.Wrap(err, "create communication keys")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_, err = e.keyManager.GetAgreementKey("q-ratchet-spk")
|
|
|
|
|
if err != nil {
|
|
|
|
|
if errors.Is(err, keys.KeyNotFoundErr) {
|
|
|
|
|
_, err = e.keyManager.CreateAgreementKey(
|
|
|
|
|
"q-ratchet-spk",
|
|
|
|
|
keys.KeyTypeX448,
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "create communication keys")
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
return errors.Wrap(err, "create communication keys")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|