diff --git a/nekryptology/pkg/vdf/wesolowski.go b/nekryptology/pkg/vdf/wesolowski.go index 2051cc3..6c5bc68 100644 --- a/nekryptology/pkg/vdf/wesolowski.go +++ b/nekryptology/pkg/vdf/wesolowski.go @@ -89,7 +89,10 @@ func GenerateVDFWithStopChan(seed []byte, iterations, int_size_bits uint32, stop func GenerateVDFIteration(seed, x_blob []byte, iterations, int_size_bits uint32) ([]byte, []byte) { int_size := (int_size_bits + 16) >> 4 D := iqc.CreateDiscriminant(seed, int_size_bits) - x, _ := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D) + x, ok := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D) + if !ok { + return nil, nil + } y, proof := calculateVDF(D, x, iterations, int_size_bits, nil) @@ -105,8 +108,15 @@ func VerifyVDF(seed, proof_blob []byte, iterations, int_size_bits uint32) bool { D := iqc.CreateDiscriminant(seed, int_size_bits) x := iqc.NewClassGroupFromAbDiscriminant(big.NewInt(2), big.NewInt(1), D) - y, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D) - proof, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D) + y, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D) + if !ok { + return false + } + + proof, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D) + if !ok { + return false + } return verifyProof(x, y, proof, iterations) } @@ -114,9 +124,20 @@ func VerifyVDF(seed, proof_blob []byte, iterations, int_size_bits uint32) bool { func VerifyVDFIteration(seed, x_blob, proof_blob []byte, iterations, int_size_bits uint32) bool { int_size := (int_size_bits + 16) >> 4 D := iqc.CreateDiscriminant(seed, int_size_bits) - x, _ := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D) - y, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D) - proof, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D) + x, ok := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D) + if !ok { + return false + } + + y, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D) + if !ok { + return false + } + + proof, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D) + if !ok { + return false + } return verifyProof(x, y, proof, iterations) } diff --git a/node/config/engine.go b/node/config/engine.go index 546e157..14d2f98 100644 --- a/node/config/engine.go +++ b/node/config/engine.go @@ -8,6 +8,14 @@ type EngineConfig struct { PendingCommitWorkers int64 `yaml:"pendingCommitWorkers"` MinimumPeersRequired int `yaml:"minimumPeersRequired"` StatsMultiaddr string `yaml:"statsMultiaddr"` + // Sets the fmt.Sprintf format string to use as the listen multiaddrs for + // data worker processes + DataWorkerBaseListenMultiaddr string `yaml:"dataWorkerBaseListenMultiaddr"` + // Sets the starting port number to use as the listen port for data worker + // processes, incrementing by 1 until n-1, n = cores. (Example: a 4 core + // system, base listen port of 40000 will listen on 40000, 40001, 40002) + DataWorkerBaseListenPort uint16 `yaml:"dataWorkerBaseListenPort"` + DataWorkerMemoryLimit int64 `yaml:"dataWorkerMemoryLimit"` // Values used only for testing – do not override these in production, your // node will get kicked out diff --git a/node/config/version.go b/node/config/version.go index f9a1fe2..b9289de 100644 --- a/node/config/version.go +++ b/node/config/version.go @@ -29,5 +29,5 @@ func FormatVersion(version []byte) string { } func GetPatchNumber() byte { - return 0x01 + return 0x02 } diff --git a/node/consensus/master/broadcast_messaging.go b/node/consensus/master/broadcast_messaging.go index 7d66767..18d8d87 100644 --- a/node/consensus/master/broadcast_messaging.go +++ b/node/consensus/master/broadcast_messaging.go @@ -116,15 +116,19 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport( peerID []byte, any *anypb.Any, ) error { - if bytes.Equal(peerID, e.pubSub.GetPeerID()) { - return nil - } - report := &protobufs.SelfTestReport{} if err := any.UnmarshalTo(report); err != nil { return errors.Wrap(err, "handle self test report") } + if bytes.Equal(peerID, e.pubSub.GetPeerID()) { + info := e.peerInfoManager.GetPeerInfo(peerID) + info.LastSeen = time.Now().UnixMilli() + info.DifficultyMetric = report.DifficultyMetric + info.MasterHeadFrame = report.MasterHeadFrame + return nil + } + // minimum proof size is one timestamp, one vdf proof, must match one fewer // than core count if len(report.Proof) < 516+8 || diff --git a/node/consensus/master/master_clock_consensus_engine.go b/node/consensus/master/master_clock_consensus_engine.go index bf47b52..6f21464 100644 --- a/node/consensus/master/master_clock_consensus_engine.go +++ b/node/consensus/master/master_clock_consensus_engine.go @@ -7,6 +7,7 @@ import ( "crypto/rand" "encoding/binary" "encoding/hex" + "fmt" "io" "math/big" "sync" @@ -15,9 +16,12 @@ import ( "github.com/iden3/go-iden3-crypto/poseidon" "github.com/libp2p/go-libp2p/core/peer" "github.com/mr-tron/base58" + "github.com/multiformats/go-multiaddr" + mn "github.com/multiformats/go-multiaddr/net" "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "source.quilibrium.com/quilibrium/monorepo/node/config" "source.quilibrium.com/quilibrium/monorepo/node/consensus" qtime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time" @@ -68,6 +72,7 @@ type MasterClockConsensusEngine struct { verifyTestCh chan verifyChallenge currentReceivingSyncPeers int currentReceivingSyncPeersMx sync.Mutex + engineConfig *config.EngineConfig } var _ consensus.ConsensusEngine = (*MasterClockConsensusEngine)(nil) @@ -131,6 +136,7 @@ func NewMasterClockConsensusEngine( frameValidationCh: make(chan *protobufs.ClockFrame), bandwidthTestCh: make(chan []byte), verifyTestCh: make(chan verifyChallenge), + engineConfig: engineConfig, } e.addPeerManifestReport(e.pubSub.GetPeerID(), report) @@ -241,6 +247,16 @@ func (e *MasterClockConsensusEngine) Start() <-chan error { time.Sleep(30 * time.Second) difficultyMetric := int64(100000) skew := (difficultyMetric * 12) / 10 + parallelism := e.report.Cores - 1 + + if parallelism < 3 { + panic("invalid system configuration, minimum system configuration must be four cores") + } + + clients, err := e.createParallelDataClients(int(parallelism)) + if err != nil { + panic(err) + } for { head, err := e.masterTimeReel.Head() @@ -250,7 +266,6 @@ func (e *MasterClockConsensusEngine) Start() <-chan error { e.report.MasterHeadFrame = head.FrameNumber e.report.DifficultyMetric = difficultyMetric - parallelism := e.report.Cores - 1 challenge := binary.BigEndian.AppendUint64( []byte{}, @@ -258,15 +273,42 @@ func (e *MasterClockConsensusEngine) Start() <-chan error { ) challenge = append(challenge, e.pubSub.GetPeerID()...) - ts, proofs, nextDifficultyMetric, err := - e.frameProver.CalculateChallengeProof( - challenge, - parallelism, - skew, - ) - if err != nil { - panic(err) + proofs := make([][]byte, parallelism) + nextMetrics := make([]int64, parallelism) + + wg := sync.WaitGroup{} + wg.Add(int(parallelism)) + + ts := time.Now().UnixMilli() + for i := uint32(0); i < parallelism; i++ { + i := i + go func() { + resp, err := + clients[i].CalculateChallengeProof( + context.Background(), + &protobufs.ChallengeProofRequest{ + Challenge: challenge, + Core: i, + Skew: skew, + NowMs: ts, + }, + ) + if err != nil { + panic(err) + } + + proofs[i], nextMetrics[i] = resp.Output, resp.NextSkew + wg.Done() + }() } + wg.Wait() + nextDifficultySum := uint64(0) + for i := 0; i < int(parallelism); i++ { + nextDifficultySum += uint64(nextMetrics[i]) + } + + nextDifficultyMetric := int64(nextDifficultySum / uint64(parallelism)) + e.logger.Info( "recalibrating difficulty metric", zap.Int64("previous_difficulty_metric", difficultyMetric), @@ -336,6 +378,64 @@ func (e *MasterClockConsensusEngine) Start() <-chan error { return errChan } +func (e *MasterClockConsensusEngine) createParallelDataClients( + paralellism int, +) ([]protobufs.DataIPCServiceClient, error) { + e.logger.Info( + "connecting to data worker processes", + zap.Int("parallelism", paralellism), + ) + + if e.engineConfig.DataWorkerBaseListenMultiaddr == "" { + e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d" + } + + if e.engineConfig.DataWorkerBaseListenPort == 0 { + e.engineConfig.DataWorkerBaseListenPort = 40000 + } + + clients := make([]protobufs.DataIPCServiceClient, paralellism) + + for i := 0; i < paralellism; i++ { + ma, err := multiaddr.NewMultiaddr( + fmt.Sprintf( + e.engineConfig.DataWorkerBaseListenMultiaddr, + int(e.engineConfig.DataWorkerBaseListenPort)+i, + ), + ) + if err != nil { + panic(err) + } + + _, addr, err := mn.DialArgs(ma) + if err != nil { + panic(err) + } + + conn, err := grpc.Dial( + addr, + grpc.WithTransportCredentials( + insecure.NewCredentials(), + ), + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(10*1024*1024), + grpc.MaxCallRecvMsgSize(10*1024*1024), + ), + ) + if err != nil { + panic(err) + } + + clients[i] = protobufs.NewDataIPCServiceClient(conn) + } + + e.logger.Info( + "connected to data worker processes", + zap.Int("parallelism", paralellism), + ) + return clients, nil +} + func (e *MasterClockConsensusEngine) PerformValidation( ctx context.Context, msg *protobufs.ValidationMessage, diff --git a/node/crypto/frame_prover.go b/node/crypto/frame_prover.go index 778e233..9c6f4a3 100644 --- a/node/crypto/frame_prover.go +++ b/node/crypto/frame_prover.go @@ -53,9 +53,10 @@ type FrameProver interface { ) bool CalculateChallengeProof( challenge []byte, - parallelism uint32, + core uint32, skew int64, - ) (int64, [][]byte, int64, error) + nowMs int64, + ) ([]byte, int64, error) VerifyChallengeProof( challenge []byte, timestamp int64, diff --git a/node/crypto/wesolowski_frame_prover.go b/node/crypto/wesolowski_frame_prover.go index ede59b5..6b1915d 100644 --- a/node/crypto/wesolowski_frame_prover.go +++ b/node/crypto/wesolowski_frame_prover.go @@ -6,7 +6,6 @@ import ( "crypto/rand" "encoding/binary" "math/big" - "sync" "time" "github.com/cloudflare/circl/sign/ed448" @@ -601,43 +600,31 @@ func (w *WesolowskiFrameProver) VerifyWeakRecursiveProof( func (w *WesolowskiFrameProver) CalculateChallengeProof( challenge []byte, - parallelism uint32, + core uint32, skew int64, -) (int64, [][]byte, int64, error) { - now := time.Now() - nowMs := now.UnixMilli() + nowMs int64, +) ([]byte, int64, error) { input := binary.BigEndian.AppendUint64([]byte{}, uint64(nowMs)) input = append(input, challenge...) - outputs := make([][]byte, parallelism) - - wg := sync.WaitGroup{} - wg.Add(int(parallelism)) // 4.5 minutes = 270 seconds, one increment should be ten seconds proofDuration := 270 * 1000 calibratedDifficulty := (int64(proofDuration) * 10000) / skew - for i := uint32(0); i < parallelism; i++ { - i := i - go func() { - instanceInput := binary.BigEndian.AppendUint32([]byte{}, i) - instanceInput = append(instanceInput, input...) - b := sha3.Sum256(instanceInput) - v := vdf.New(uint32(calibratedDifficulty), b) + instanceInput := binary.BigEndian.AppendUint32([]byte{}, core) + instanceInput = append(instanceInput, input...) + b := sha3.Sum256(instanceInput) + v := vdf.New(uint32(calibratedDifficulty), b) - v.Execute() - o := v.GetOutput() + v.Execute() + o := v.GetOutput() - outputs[i] = make([]byte, 516) - copy(outputs[i][:], o[:]) - wg.Done() - }() - } - - wg.Wait() + output := make([]byte, 516) + copy(output[:], o[:]) + now := time.UnixMilli(nowMs) after := time.Since(now) nextSkew := (skew * after.Milliseconds()) / int64(proofDuration) - return nowMs, outputs, nextSkew, nil + return output, nextSkew, nil } func (w *WesolowskiFrameProver) VerifyChallengeProof( diff --git a/node/crypto/wesolowski_frame_prover_test.go b/node/crypto/wesolowski_frame_prover_test.go index 6bc039c..eb57fbc 100644 --- a/node/crypto/wesolowski_frame_prover_test.go +++ b/node/crypto/wesolowski_frame_prover_test.go @@ -30,10 +30,12 @@ func TestMasterProve(t *testing.T) { func TestChallengeProof(t *testing.T) { l, _ := zap.NewProduction() w := crypto.NewWesolowskiFrameProver(l) - now, proofs, nextSkew, err := w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 3, 120000) + now := time.Now().UnixMilli() + proofs, nextSkew, err := w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 0, 120000, now) assert.NoError(t, err) - assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, 100000, proofs)) - now, proofs, _, err = w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 3, nextSkew*12/10) + assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, 100000, [][]byte{proofs})) + now = time.Now().UnixMilli() + proofs, _, err = w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 0, nextSkew*12/10, now) assert.NoError(t, err) - assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, nextSkew, proofs)) + assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, nextSkew, [][]byte{proofs})) } diff --git a/node/main.go b/node/main.go index 9d52c0a..137f556 100644 --- a/node/main.go +++ b/node/main.go @@ -12,9 +12,11 @@ import ( "io/fs" "log" "os" + "os/exec" "os/signal" "path/filepath" "runtime" + rdebug "runtime/debug" "runtime/pprof" "strconv" "strings" @@ -103,6 +105,16 @@ var ( true, "enables or disables signature validation (default true)", ) + core = flag.Int( + "core", + 0, + "specifies the core of the process (defaults to zero, the initial launcher)", + ) + parentProcess = flag.Int( + "parent-process", + 0, + "specifies the parent process pid for a data worker", + ) ) var signatories = []string{ @@ -264,7 +276,7 @@ func main() { done := make(chan os.Signal, 1) signal.Notify(done, syscall.SIGINT, syscall.SIGTERM) - if !*dbConsole { + if !*dbConsole && *core == 0 { printLogo() printVersion() fmt.Println(" ") @@ -322,7 +334,58 @@ func main() { return } + if *core != 0 { + runtime.GOMAXPROCS(1) + rdebug.SetGCPercent(9999) + + if nodeConfig.Engine.DataWorkerMemoryLimit == 0 { + nodeConfig.Engine.DataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75GiB + } + + rdebug.SetMemoryLimit(nodeConfig.Engine.DataWorkerMemoryLimit) + + if nodeConfig.Engine.DataWorkerBaseListenMultiaddr == "" { + nodeConfig.Engine.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d" + } + + if nodeConfig.Engine.DataWorkerBaseListenPort == 0 { + nodeConfig.Engine.DataWorkerBaseListenPort = 40000 + } + + if *parentProcess == 0 { + panic("parent process pid not specified") + } + + l, err := zap.NewProduction() + if err != nil { + panic(err) + } + + rpcMultiaddr := fmt.Sprintf( + nodeConfig.Engine.DataWorkerBaseListenMultiaddr, + int(nodeConfig.Engine.DataWorkerBaseListenPort)+*core-1, + ) + srv, err := rpc.NewDataWorkerIPCServer( + rpcMultiaddr, + l, + uint32(*core)-1, + qcrypto.NewWesolowskiFrameProver(l), + *parentProcess, + ) + if err != nil { + panic(err) + } + + err = srv.Start() + if err != nil { + panic(err) + } + return + } + fmt.Println("Loading ceremony state and starting node...") + go spawnDataWorkers() + kzg.Init() report := RunSelfTestIfNeeded(*configDirectory, nodeConfig) @@ -365,9 +428,55 @@ func main() { node.Start() <-done + stopDataWorkers() node.Stop() } +var dataWorkers []*exec.Cmd + +func spawnDataWorkers() { + process, err := os.Executable() + if err != nil { + panic(err) + } + + cores := runtime.GOMAXPROCS(0) + dataWorkers = make([]*exec.Cmd, cores-1) + fmt.Printf("Spawning %d data workers...\n", cores-1) + + for i := 1; i <= cores-1; i++ { + i := i + go func() { + args := []string{ + fmt.Sprintf("--core=%d", i), + fmt.Sprintf("--parent-process=%d", os.Getpid()), + } + args = append(args, os.Args[1:]...) + cmd := exec.Command(process, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Start() + if err != nil { + panic(err) + } + + dataWorkers[i-1] = cmd + }() + } +} + +func stopDataWorkers() { + for i := 0; i < len(dataWorkers); i++ { + err := dataWorkers[i].Process.Signal(os.Kill) + if err != nil { + fmt.Printf( + "fatal: unable to kill worker with pid %d, please kill this process!\n", + dataWorkers[i].Process.Pid, + ) + } + } +} + // Reintroduce at a later date func RunCompaction(clockStore *store.PebbleClockStore) { intrinsicFilter := append( diff --git a/node/node-1.4.18-darwin-arm64.dgst b/node/node-1.4.18-darwin-arm64.dgst index 93db326..c8d67e2 100644 --- a/node/node-1.4.18-darwin-arm64.dgst +++ b/node/node-1.4.18-darwin-arm64.dgst @@ -1 +1 @@ -SHA3-256(node-1.4.18-darwin-arm64)= dc14a02268d88540bb364259775743c536d7541011bf26d4630f7fed425b5986 +SHA3-256(node-1.4.18-darwin-arm64)= 0f666d6c3814f96bebce455034fe6a44b64f223b4e313ac506f42fa2371ac30d diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.1 b/node/node-1.4.18-darwin-arm64.dgst.sig.1 index 2f8bc8a..f2f6a14 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.1 and b/node/node-1.4.18-darwin-arm64.dgst.sig.1 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.12 b/node/node-1.4.18-darwin-arm64.dgst.sig.12 index 6cf3d74..d3137d9 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.12 and b/node/node-1.4.18-darwin-arm64.dgst.sig.12 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.13 b/node/node-1.4.18-darwin-arm64.dgst.sig.13 index b51946d..8e00e26 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.13 and b/node/node-1.4.18-darwin-arm64.dgst.sig.13 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.14 b/node/node-1.4.18-darwin-arm64.dgst.sig.14 index 0be4738..939b099 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.14 and b/node/node-1.4.18-darwin-arm64.dgst.sig.14 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.15 b/node/node-1.4.18-darwin-arm64.dgst.sig.15 new file mode 100644 index 0000000..fff4fac Binary files /dev/null and b/node/node-1.4.18-darwin-arm64.dgst.sig.15 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.16 b/node/node-1.4.18-darwin-arm64.dgst.sig.16 index d6ad923..9807929 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.16 and b/node/node-1.4.18-darwin-arm64.dgst.sig.16 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.17 b/node/node-1.4.18-darwin-arm64.dgst.sig.17 index a36964f..c719e16 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.17 and b/node/node-1.4.18-darwin-arm64.dgst.sig.17 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.2 b/node/node-1.4.18-darwin-arm64.dgst.sig.2 index 7859d1a..9e05512 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.2 and b/node/node-1.4.18-darwin-arm64.dgst.sig.2 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.3 b/node/node-1.4.18-darwin-arm64.dgst.sig.3 index 8f13dc5..901b0ad 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.3 and b/node/node-1.4.18-darwin-arm64.dgst.sig.3 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.4 b/node/node-1.4.18-darwin-arm64.dgst.sig.4 index 0156159..433eedc 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.4 and b/node/node-1.4.18-darwin-arm64.dgst.sig.4 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.5 b/node/node-1.4.18-darwin-arm64.dgst.sig.5 new file mode 100644 index 0000000..0e52649 Binary files /dev/null and b/node/node-1.4.18-darwin-arm64.dgst.sig.5 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.6 b/node/node-1.4.18-darwin-arm64.dgst.sig.6 new file mode 100644 index 0000000..d4d68d9 Binary files /dev/null and b/node/node-1.4.18-darwin-arm64.dgst.sig.6 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.8 b/node/node-1.4.18-darwin-arm64.dgst.sig.8 index 4deec94..cbf75a3 100644 Binary files a/node/node-1.4.18-darwin-arm64.dgst.sig.8 and b/node/node-1.4.18-darwin-arm64.dgst.sig.8 differ diff --git a/node/node-1.4.18-darwin-arm64.dgst.sig.9 b/node/node-1.4.18-darwin-arm64.dgst.sig.9 new file mode 100644 index 0000000..7157385 Binary files /dev/null and b/node/node-1.4.18-darwin-arm64.dgst.sig.9 differ diff --git a/node/node-1.4.18-linux-amd64.dgst b/node/node-1.4.18-linux-amd64.dgst index cbf1d51..f619e63 100644 --- a/node/node-1.4.18-linux-amd64.dgst +++ b/node/node-1.4.18-linux-amd64.dgst @@ -1 +1 @@ -SHA3-256(node-1.4.18-linux-amd64)= e41bf8538990e201637521b0eb278a1aebfc46e5d8de102f4870de30616b44e6 +SHA3-256(node-1.4.18-linux-amd64)= f2da38a5c8d4257767f4a9659ca9c05d4cf312fd7294e6a18847c8923f6815a0 diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.1 b/node/node-1.4.18-linux-amd64.dgst.sig.1 index 3f61252..a7949bc 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.1 and b/node/node-1.4.18-linux-amd64.dgst.sig.1 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.12 b/node/node-1.4.18-linux-amd64.dgst.sig.12 index 2187f57..b4fd9f5 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.12 and b/node/node-1.4.18-linux-amd64.dgst.sig.12 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.13 b/node/node-1.4.18-linux-amd64.dgst.sig.13 index 320de72..bd9c141 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.13 and b/node/node-1.4.18-linux-amd64.dgst.sig.13 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.14 b/node/node-1.4.18-linux-amd64.dgst.sig.14 index 3b45334..206197d 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.14 and b/node/node-1.4.18-linux-amd64.dgst.sig.14 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.15 b/node/node-1.4.18-linux-amd64.dgst.sig.15 new file mode 100644 index 0000000..32ee7f4 Binary files /dev/null and b/node/node-1.4.18-linux-amd64.dgst.sig.15 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.16 b/node/node-1.4.18-linux-amd64.dgst.sig.16 index 81dc5b2..490e99a 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.16 and b/node/node-1.4.18-linux-amd64.dgst.sig.16 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.17 b/node/node-1.4.18-linux-amd64.dgst.sig.17 index a05a3f1..ea5e17f 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.17 and b/node/node-1.4.18-linux-amd64.dgst.sig.17 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.2 b/node/node-1.4.18-linux-amd64.dgst.sig.2 index 20b8108..0e07cb9 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.2 and b/node/node-1.4.18-linux-amd64.dgst.sig.2 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.3 b/node/node-1.4.18-linux-amd64.dgst.sig.3 index 4bdf22e..825eca9 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.3 and b/node/node-1.4.18-linux-amd64.dgst.sig.3 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.4 b/node/node-1.4.18-linux-amd64.dgst.sig.4 index 027bbd4..dc59ed7 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.4 and b/node/node-1.4.18-linux-amd64.dgst.sig.4 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.5 b/node/node-1.4.18-linux-amd64.dgst.sig.5 new file mode 100644 index 0000000..23cf3ed Binary files /dev/null and b/node/node-1.4.18-linux-amd64.dgst.sig.5 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.6 b/node/node-1.4.18-linux-amd64.dgst.sig.6 new file mode 100644 index 0000000..3ea21bc Binary files /dev/null and b/node/node-1.4.18-linux-amd64.dgst.sig.6 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.8 b/node/node-1.4.18-linux-amd64.dgst.sig.8 index 5afcbf8..435ff71 100644 Binary files a/node/node-1.4.18-linux-amd64.dgst.sig.8 and b/node/node-1.4.18-linux-amd64.dgst.sig.8 differ diff --git a/node/node-1.4.18-linux-amd64.dgst.sig.9 b/node/node-1.4.18-linux-amd64.dgst.sig.9 new file mode 100644 index 0000000..d39a4f6 Binary files /dev/null and b/node/node-1.4.18-linux-amd64.dgst.sig.9 differ diff --git a/node/node-1.4.18-linux-arm64.dgst b/node/node-1.4.18-linux-arm64.dgst index 0938885..79e41a2 100644 --- a/node/node-1.4.18-linux-arm64.dgst +++ b/node/node-1.4.18-linux-arm64.dgst @@ -1 +1 @@ -SHA3-256(node-1.4.18-linux-arm64)= de488e85acfd5ced235c8b7994d37cb153251826a52d1b0e964034b80d65b478 +SHA3-256(node-1.4.18-linux-arm64)= e12e14a18a491921fcdd4ffe6e9792f1e195abf4950fe56316219f920867c45a diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.1 b/node/node-1.4.18-linux-arm64.dgst.sig.1 index 27daa5a..10dd863 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.1 and b/node/node-1.4.18-linux-arm64.dgst.sig.1 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.12 b/node/node-1.4.18-linux-arm64.dgst.sig.12 index 0c7c533..a5bd10d 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.12 and b/node/node-1.4.18-linux-arm64.dgst.sig.12 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.13 b/node/node-1.4.18-linux-arm64.dgst.sig.13 index c38670b..70d157c 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.13 and b/node/node-1.4.18-linux-arm64.dgst.sig.13 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.14 b/node/node-1.4.18-linux-arm64.dgst.sig.14 index 9d207f7..3ee2e94 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.14 and b/node/node-1.4.18-linux-arm64.dgst.sig.14 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.15 b/node/node-1.4.18-linux-arm64.dgst.sig.15 new file mode 100644 index 0000000..afbc1c5 Binary files /dev/null and b/node/node-1.4.18-linux-arm64.dgst.sig.15 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.16 b/node/node-1.4.18-linux-arm64.dgst.sig.16 index 77b2a17..71c3e87 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.16 and b/node/node-1.4.18-linux-arm64.dgst.sig.16 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.17 b/node/node-1.4.18-linux-arm64.dgst.sig.17 index a7afc99..aacb133 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.17 and b/node/node-1.4.18-linux-arm64.dgst.sig.17 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.2 b/node/node-1.4.18-linux-arm64.dgst.sig.2 index 7759dc4..d7bdcc3 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.2 and b/node/node-1.4.18-linux-arm64.dgst.sig.2 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.3 b/node/node-1.4.18-linux-arm64.dgst.sig.3 index 170a8da..293207a 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.3 and b/node/node-1.4.18-linux-arm64.dgst.sig.3 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.4 b/node/node-1.4.18-linux-arm64.dgst.sig.4 index 9b53b07..c8376d0 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.4 and b/node/node-1.4.18-linux-arm64.dgst.sig.4 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.5 b/node/node-1.4.18-linux-arm64.dgst.sig.5 new file mode 100644 index 0000000..5985fcf Binary files /dev/null and b/node/node-1.4.18-linux-arm64.dgst.sig.5 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.6 b/node/node-1.4.18-linux-arm64.dgst.sig.6 new file mode 100644 index 0000000..b9c5782 Binary files /dev/null and b/node/node-1.4.18-linux-arm64.dgst.sig.6 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.8 b/node/node-1.4.18-linux-arm64.dgst.sig.8 index 275cf53..2026957 100644 Binary files a/node/node-1.4.18-linux-arm64.dgst.sig.8 and b/node/node-1.4.18-linux-arm64.dgst.sig.8 differ diff --git a/node/node-1.4.18-linux-arm64.dgst.sig.9 b/node/node-1.4.18-linux-arm64.dgst.sig.9 new file mode 100644 index 0000000..8295904 Binary files /dev/null and b/node/node-1.4.18-linux-arm64.dgst.sig.9 differ diff --git a/node/protobufs/data.pb.go b/node/protobufs/data.pb.go new file mode 100644 index 0000000..538d8ab --- /dev/null +++ b/node/protobufs/data.pb.go @@ -0,0 +1,258 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: data.proto + +package protobufs + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ChallengeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Challenge []byte `protobuf:"bytes,1,opt,name=challenge,proto3" json:"challenge,omitempty"` + Core uint32 `protobuf:"varint,2,opt,name=core,proto3" json:"core,omitempty"` + Skew int64 `protobuf:"varint,3,opt,name=skew,proto3" json:"skew,omitempty"` + NowMs int64 `protobuf:"varint,4,opt,name=now_ms,json=nowMs,proto3" json:"now_ms,omitempty"` +} + +func (x *ChallengeProofRequest) Reset() { + *x = ChallengeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_data_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChallengeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChallengeProofRequest) ProtoMessage() {} + +func (x *ChallengeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_data_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChallengeProofRequest.ProtoReflect.Descriptor instead. +func (*ChallengeProofRequest) Descriptor() ([]byte, []int) { + return file_data_proto_rawDescGZIP(), []int{0} +} + +func (x *ChallengeProofRequest) GetChallenge() []byte { + if x != nil { + return x.Challenge + } + return nil +} + +func (x *ChallengeProofRequest) GetCore() uint32 { + if x != nil { + return x.Core + } + return 0 +} + +func (x *ChallengeProofRequest) GetSkew() int64 { + if x != nil { + return x.Skew + } + return 0 +} + +func (x *ChallengeProofRequest) GetNowMs() int64 { + if x != nil { + return x.NowMs + } + return 0 +} + +type ChallengeProofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Output []byte `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + NextSkew int64 `protobuf:"varint,2,opt,name=next_skew,json=nextSkew,proto3" json:"next_skew,omitempty"` +} + +func (x *ChallengeProofResponse) Reset() { + *x = ChallengeProofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_data_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChallengeProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChallengeProofResponse) ProtoMessage() {} + +func (x *ChallengeProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_data_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChallengeProofResponse.ProtoReflect.Descriptor instead. +func (*ChallengeProofResponse) Descriptor() ([]byte, []int) { + return file_data_proto_rawDescGZIP(), []int{1} +} + +func (x *ChallengeProofResponse) GetOutput() []byte { + if x != nil { + return x.Output + } + return nil +} + +func (x *ChallengeProofResponse) GetNextSkew() int64 { + if x != nil { + return x.NextSkew + } + return 0 +} + +var File_data_proto protoreflect.FileDescriptor + +var file_data_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x71, 0x75, + 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x70, 0x62, 0x22, 0x74, 0x0a, 0x15, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, + 0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, + 0x73, 0x6b, 0x65, 0x77, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x77, 0x5f, 0x6d, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6e, 0x6f, 0x77, 0x4d, 0x73, 0x22, 0x4d, 0x0a, 0x16, 0x43, + 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x6b, 0x65, 0x77, 0x32, 0x8c, 0x01, 0x0a, 0x0e, 0x44, + 0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a, + 0x17, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, + 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_data_proto_rawDescOnce sync.Once + file_data_proto_rawDescData = file_data_proto_rawDesc +) + +func file_data_proto_rawDescGZIP() []byte { + file_data_proto_rawDescOnce.Do(func() { + file_data_proto_rawDescData = protoimpl.X.CompressGZIP(file_data_proto_rawDescData) + }) + return file_data_proto_rawDescData +} + +var file_data_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_data_proto_goTypes = []interface{}{ + (*ChallengeProofRequest)(nil), // 0: quilibrium.node.data.pb.ChallengeProofRequest + (*ChallengeProofResponse)(nil), // 1: quilibrium.node.data.pb.ChallengeProofResponse +} +var file_data_proto_depIdxs = []int32{ + 0, // 0: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:input_type -> quilibrium.node.data.pb.ChallengeProofRequest + 1, // 1: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:output_type -> quilibrium.node.data.pb.ChallengeProofResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_data_proto_init() } +func file_data_proto_init() { + if File_data_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_data_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChallengeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_data_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChallengeProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_data_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_data_proto_goTypes, + DependencyIndexes: file_data_proto_depIdxs, + MessageInfos: file_data_proto_msgTypes, + }.Build() + File_data_proto = out.File + file_data_proto_rawDesc = nil + file_data_proto_goTypes = nil + file_data_proto_depIdxs = nil +} diff --git a/node/protobufs/data.pb.gw.go b/node/protobufs/data.pb.gw.go new file mode 100644 index 0000000..769dcd8 --- /dev/null +++ b/node/protobufs/data.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: data.proto + +/* +Package protobufs is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package protobufs + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_DataIPCService_CalculateChallengeProof_0(ctx context.Context, marshaler runtime.Marshaler, client DataIPCServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ChallengeProofRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.CalculateChallengeProof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_DataIPCService_CalculateChallengeProof_0(ctx context.Context, marshaler runtime.Marshaler, server DataIPCServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ChallengeProofRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.CalculateChallengeProof(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterDataIPCServiceHandlerServer registers the http handlers for service DataIPCService to "mux". +// UnaryRPC :call DataIPCServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterDataIPCServiceHandlerFromEndpoint instead. +func RegisterDataIPCServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server DataIPCServiceServer) error { + + mux.Handle("POST", pattern_DataIPCService_CalculateChallengeProof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof", runtime.WithHTTPPathPattern("/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_DataIPCService_CalculateChallengeProof_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_DataIPCService_CalculateChallengeProof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterDataIPCServiceHandlerFromEndpoint is same as RegisterDataIPCServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterDataIPCServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterDataIPCServiceHandler(ctx, mux, conn) +} + +// RegisterDataIPCServiceHandler registers the http handlers for service DataIPCService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterDataIPCServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterDataIPCServiceHandlerClient(ctx, mux, NewDataIPCServiceClient(conn)) +} + +// RegisterDataIPCServiceHandlerClient registers the http handlers for service DataIPCService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DataIPCServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DataIPCServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "DataIPCServiceClient" to call the correct interceptors. +func RegisterDataIPCServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DataIPCServiceClient) error { + + mux.Handle("POST", pattern_DataIPCService_CalculateChallengeProof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof", runtime.WithHTTPPathPattern("/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_DataIPCService_CalculateChallengeProof_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_DataIPCService_CalculateChallengeProof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_DataIPCService_CalculateChallengeProof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.data.pb.DataIPCService", "CalculateChallengeProof"}, "")) +) + +var ( + forward_DataIPCService_CalculateChallengeProof_0 = runtime.ForwardResponseMessage +) diff --git a/node/protobufs/data.proto b/node/protobufs/data.proto new file mode 100644 index 0000000..1e959c6 --- /dev/null +++ b/node/protobufs/data.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package quilibrium.node.data.pb; + +option go_package = "source.quilibrium.com/quilibrium/monorepo/node/protobufs"; + +message ChallengeProofRequest { + bytes challenge = 1; + uint32 core = 2; + int64 skew = 3; + int64 now_ms = 4; +} + +message ChallengeProofResponse { + bytes output = 1; + int64 next_skew = 2; +} + +service DataIPCService { + rpc CalculateChallengeProof(ChallengeProofRequest) returns (ChallengeProofResponse); +} \ No newline at end of file diff --git a/node/protobufs/data_grpc.pb.go b/node/protobufs/data_grpc.pb.go new file mode 100644 index 0000000..c1c24e8 --- /dev/null +++ b/node/protobufs/data_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: data.proto + +package protobufs + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + DataIPCService_CalculateChallengeProof_FullMethodName = "/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof" +) + +// DataIPCServiceClient is the client API for DataIPCService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DataIPCServiceClient interface { + CalculateChallengeProof(ctx context.Context, in *ChallengeProofRequest, opts ...grpc.CallOption) (*ChallengeProofResponse, error) +} + +type dataIPCServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewDataIPCServiceClient(cc grpc.ClientConnInterface) DataIPCServiceClient { + return &dataIPCServiceClient{cc} +} + +func (c *dataIPCServiceClient) CalculateChallengeProof(ctx context.Context, in *ChallengeProofRequest, opts ...grpc.CallOption) (*ChallengeProofResponse, error) { + out := new(ChallengeProofResponse) + err := c.cc.Invoke(ctx, DataIPCService_CalculateChallengeProof_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DataIPCServiceServer is the server API for DataIPCService service. +// All implementations must embed UnimplementedDataIPCServiceServer +// for forward compatibility +type DataIPCServiceServer interface { + CalculateChallengeProof(context.Context, *ChallengeProofRequest) (*ChallengeProofResponse, error) + mustEmbedUnimplementedDataIPCServiceServer() +} + +// UnimplementedDataIPCServiceServer must be embedded to have forward compatible implementations. +type UnimplementedDataIPCServiceServer struct { +} + +func (UnimplementedDataIPCServiceServer) CalculateChallengeProof(context.Context, *ChallengeProofRequest) (*ChallengeProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CalculateChallengeProof not implemented") +} +func (UnimplementedDataIPCServiceServer) mustEmbedUnimplementedDataIPCServiceServer() {} + +// UnsafeDataIPCServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DataIPCServiceServer will +// result in compilation errors. +type UnsafeDataIPCServiceServer interface { + mustEmbedUnimplementedDataIPCServiceServer() +} + +func RegisterDataIPCServiceServer(s grpc.ServiceRegistrar, srv DataIPCServiceServer) { + s.RegisterService(&DataIPCService_ServiceDesc, srv) +} + +func _DataIPCService_CalculateChallengeProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ChallengeProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataIPCServiceServer).CalculateChallengeProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataIPCService_CalculateChallengeProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataIPCServiceServer).CalculateChallengeProof(ctx, req.(*ChallengeProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// DataIPCService_ServiceDesc is the grpc.ServiceDesc for DataIPCService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DataIPCService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "quilibrium.node.data.pb.DataIPCService", + HandlerType: (*DataIPCServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CalculateChallengeProof", + Handler: _DataIPCService_CalculateChallengeProof_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "data.proto", +} diff --git a/node/rpc/data_worker_ipc_server.go b/node/rpc/data_worker_ipc_server.go new file mode 100644 index 0000000..068979a --- /dev/null +++ b/node/rpc/data_worker_ipc_server.go @@ -0,0 +1,119 @@ +package rpc + +import ( + "context" + "os" + "runtime" + "syscall" + "time" + + "source.quilibrium.com/quilibrium/monorepo/node/crypto" + + "github.com/multiformats/go-multiaddr" + mn "github.com/multiformats/go-multiaddr/net" + "github.com/pkg/errors" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + "source.quilibrium.com/quilibrium/monorepo/node/protobufs" +) + +type DataWorkerIPCServer struct { + protobufs.UnimplementedDataIPCServiceServer + listenAddrGRPC string + logger *zap.Logger + coreId uint32 + prover crypto.FrameProver + parentProcessId int +} + +// GetFrameInfo implements protobufs.NodeServiceServer. +func (r *DataWorkerIPCServer) CalculateChallengeProof( + ctx context.Context, + req *protobufs.ChallengeProofRequest, +) (*protobufs.ChallengeProofResponse, error) { + if r.coreId != req.Core { + return nil, errors.Wrap( + errors.New("invalid core id"), + "calculate challenge proof", + ) + } + + proof, nextSkew, err := r.prover.CalculateChallengeProof( + req.Challenge, + uint32(r.coreId), + req.Skew, + req.NowMs, + ) + if err != nil { + return nil, errors.Wrap(err, "calculate challenge proof") + } + + return &protobufs.ChallengeProofResponse{ + Output: proof, + NextSkew: nextSkew, + }, nil +} + +func NewDataWorkerIPCServer( + listenAddrGRPC string, + logger *zap.Logger, + coreId uint32, + prover crypto.FrameProver, + parentProcessId int, +) (*DataWorkerIPCServer, error) { + return &DataWorkerIPCServer{ + listenAddrGRPC: listenAddrGRPC, + logger: logger, + coreId: coreId, + prover: prover, + parentProcessId: parentProcessId, + }, nil +} + +func (r *DataWorkerIPCServer) Start() error { + s := grpc.NewServer( + grpc.MaxRecvMsgSize(10*1024*1024), + grpc.MaxSendMsgSize(10*1024*1024), + ) + protobufs.RegisterDataIPCServiceServer(s, r) + reflection.Register(s) + + mg, err := multiaddr.NewMultiaddr(r.listenAddrGRPC) + if err != nil { + return errors.Wrap(err, "start") + } + + lis, err := mn.Listen(mg) + if err != nil { + return errors.Wrap(err, "start") + } + + go r.monitorParent() + + if err := s.Serve(mn.NetListener(lis)); err != nil { + panic(err) + } + + return nil +} + +func (r *DataWorkerIPCServer) monitorParent() { + for { + time.Sleep(1 * time.Second) + proc, err := os.FindProcess(r.parentProcessId) + if err != nil { + r.logger.Error("parent process not found, terminating") + os.Exit(1) + } + + // Windows returns an error if the process is dead, nobody else does + if runtime.GOOS != "windows" { + err := proc.Signal(syscall.Signal(0)) + if err != nil { + r.logger.Error("parent process not found, terminating") + os.Exit(1) + } + } + } +} diff --git a/node/rpc/rpc_server.go b/node/rpc/node_rpc_server.go similarity index 100% rename from node/rpc/rpc_server.go rename to node/rpc/node_rpc_server.go