1.0.0 – Dawn

This commit is contained in:
Cassandra Heart 2023-09-24 21:43:35 -05:00
parent 01a1c10edd
commit e4d9bcdbf0
No known key found for this signature in database
GPG Key ID: 6352152859385958
55 changed files with 10326 additions and 351 deletions

View File

@ -13,10 +13,11 @@ require (
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.9.0
golang.org/x/sys v0.8.0
golang.org/x/tools v0.1.5
)
require golang.org/x/sys v0.8.0 // indirect
require (
github.com/cloudflare/circl v1.3.3
github.com/davecgh/go-spew v1.1.1 // indirect

View File

@ -18,8 +18,6 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bwesterb/go-ristretto v1.2.0 h1:xxWOVbN5m8NNKiSDZXE1jtZvZnC6JSJ9cYFADiZcWtw=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
@ -65,52 +63,31 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@ -14,7 +14,6 @@ import (
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/internal"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
)
@ -240,7 +239,11 @@ func (s *ScalarBls48581) SetBigInt(v *big.Int) (Scalar, error) {
if v == nil {
return nil, fmt.Errorf("invalid value")
}
i := bls48581.FromBytes(internal.ReverseScalarBytes(v.Bytes()))
t := make([]byte, bls48581.MODBYTES)
b := v.Bytes()
copy(t[bls48581.MODBYTES-uint(len(b)):], b)
i := bls48581.FromBytes(t)
i.Mod(bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: i,
@ -251,19 +254,22 @@ func (s *ScalarBls48581) SetBigInt(v *big.Int) (Scalar, error) {
func (s *ScalarBls48581) BigInt() *big.Int {
bytes := make([]byte, bls48581.MODBYTES)
s.Value.ToBytes(bytes)
return new(big.Int).SetBytes(internal.ReverseScalarBytes(bytes))
return new(big.Int).SetBytes(bytes)
}
func (s *ScalarBls48581) Bytes() []byte {
t := make([]byte, bls48581.MODBYTES)
s.Value.ToBytes(t)
return internal.ReverseScalarBytes(t)
return t
}
func (s *ScalarBls48581) SetBytes(bytes []byte) (Scalar, error) {
var seq [bls48581.MODBYTES]byte
copy(seq[:], internal.ReverseScalarBytes(bytes))
copy(seq[bls48581.MODBYTES-uint(len(bytes)):], bytes)
value := bls48581.FromBytes(seq[:])
if value == nil {
return nil, errors.New("could not deserialize")
}
@ -277,7 +283,7 @@ func (s *ScalarBls48581) SetBytesWide(bytes []byte) (Scalar, error) {
return nil, fmt.Errorf("invalid length")
}
var seq [bls48581.MODBYTES]byte
copy(seq[:], internal.ReverseScalarBytes(bytes))
copy(seq[:], bytes)
value := bls48581.FromBytes(seq[:])
if value == nil {
return nil, errors.New("could not deserialize")

View File

@ -31,7 +31,7 @@ import (
// MultiplySender is the party that plays the role of Sender in the multiplication protocol (protocol 5 of the paper).
type MultiplySender struct {
cOtSender *kos.Sender // underlying cOT sender struct, used by mult.
outputAdditiveShare curves.Scalar // ultimate output share of mult.
OutputAdditiveShare curves.Scalar // ultimate output share of mult.
gadget []curves.Scalar
curve *curves.Curve
transcript *merlin.Transcript
@ -41,7 +41,7 @@ type MultiplySender struct {
// MultiplyReceiver is the party that plays the role of Sender in the multiplication protocol (protocol 5 of the paper).
type MultiplyReceiver struct {
cOtReceiver *kos.Receiver // underlying cOT receiver struct, used by mult.
outputAdditiveShare curves.Scalar // ultimate output share of mult.
OutputAdditiveShare curves.Scalar // ultimate output share of mult.
omega []byte // this is used as an intermediate result during the course of mult.
gadget []curves.Scalar
curve *curves.Curve
@ -212,13 +212,13 @@ func (sender *MultiplySender) Round2Multiply(alpha curves.Scalar, round1Output *
return nil, errors.Wrap(err, "setting chi scalar from bytes")
}
}
sender.outputAdditiveShare = sender.curve.Scalar.Zero()
sender.OutputAdditiveShare = sender.curve.Scalar.Zero()
for j := uint(0); j < sender.cOtSender.L; j++ {
round2Output.R[j] = sender.curve.Scalar.Zero()
for k := 0; k < chiWidth; k++ {
round2Output.R[j] = round2Output.R[j].Add(chi[k].Mul(sender.cOtSender.OutputAdditiveShares[j][k]))
}
sender.outputAdditiveShare = sender.outputAdditiveShare.Add(sender.gadget[j].Mul(sender.cOtSender.OutputAdditiveShares[j][0]))
sender.OutputAdditiveShare = sender.OutputAdditiveShare.Add(sender.gadget[j].Mul(sender.cOtSender.OutputAdditiveShares[j][0]))
}
round2Output.U = chi[0].Mul(alpha).Add(chi[1].Mul(alphaHat))
return round2Output, nil
@ -250,7 +250,7 @@ func (receiver *MultiplyReceiver) Round3Multiply(round2Output *MultiplyRound2Out
}
}
receiver.outputAdditiveShare = receiver.curve.Scalar.Zero()
receiver.OutputAdditiveShare = receiver.curve.Scalar.Zero()
for j := uint(0); j < receiver.cOtReceiver.L; j++ {
// compute the LHS of bob's step 6) for j. note that we're "adding r_j" to both sides"; so this LHS includes r_j.
// the reason to do this is so that the constant-time (i.e., independent of w_j) calculation of w_j * u can proceed more cleanly.
@ -265,7 +265,7 @@ func (receiver *MultiplyReceiver) Round3Multiply(round2Output *MultiplyRound2Out
if subtle.ConstantTimeCompare(rightHandSideOfCheck[:], leftHandSideOfCheck.Bytes()) != 1 {
return fmt.Errorf("alice's values R and U failed to check in round 3 multiply")
}
receiver.outputAdditiveShare = receiver.outputAdditiveShare.Add(receiver.gadget[j].Mul(receiver.cOtReceiver.OutputAdditiveShares[j][0]))
receiver.OutputAdditiveShare = receiver.OutputAdditiveShare.Add(receiver.gadget[j].Mul(receiver.cOtReceiver.OutputAdditiveShares[j][0]))
}
return nil
}

View File

@ -44,7 +44,7 @@ func TestMultiply(t *testing.T) {
require.Nil(t, err)
product := alpha.Mul(beta)
sum := sender.outputAdditiveShare.Add(receiver.outputAdditiveShare)
sum := sender.OutputAdditiveShare.Add(receiver.OutputAdditiveShare)
require.Equal(t, product, sum)
}
@ -88,14 +88,14 @@ func TestMultiplyBLS48(t *testing.T) {
generator := alpha.Point().Generator()
product := generator.Mul(alpha).Mul(beta)
sum := generator.Mul(sender.outputAdditiveShare).Add(generator.Mul(receiver.outputAdditiveShare))
sum := generator.Mul(sender.OutputAdditiveShare).Add(generator.Mul(receiver.OutputAdditiveShare))
g2generator := curves.BLS48581G2().NewGeneratorPoint()
g2product := g2generator.Mul(alpha).Mul(beta)
g2sum := g2generator.Mul(sender.outputAdditiveShare).Add(g2generator.Mul(receiver.outputAdditiveShare))
g2sum := g2generator.Mul(sender.OutputAdditiveShare).Add(g2generator.Mul(receiver.OutputAdditiveShare))
product2 := generator.Mul(alpha2).Mul(beta2)
sum2 := generator.Mul(sender2.outputAdditiveShare).Add(generator.Mul(receiver2.outputAdditiveShare))
sum2 := generator.Mul(sender2.OutputAdditiveShare).Add(generator.Mul(receiver2.OutputAdditiveShare))
sum2Neg := sum2.Neg()
result := product.(*curves.PointBls48581G1).MultiPairing(

View File

@ -234,7 +234,7 @@ func (alice *Alice) Round3Sign(message []byte, round2Output *SignRound2Output) (
one := alice.curve.Scalar.One()
gamma1 := alice.curve.ScalarBaseMult(kA.Mul(phi).Add(one))
other := r.Mul(multiplySenders[0].outputAdditiveShare.Neg())
other := r.Mul(multiplySenders[0].OutputAdditiveShare.Neg())
gamma1 = gamma1.Add(other)
hashGamma1Bytes := sha3.Sum256(gamma1.ToAffineCompressed())
hashGamma1, err := alice.curve.Scalar.SetBytes(hashGamma1Bytes[:])
@ -260,9 +260,9 @@ func (alice *Alice) Round3Sign(message []byte, round2Output *SignRound2Output) (
return nil, errors.Wrap(err, "setting rX scalar from bytes")
}
sigA := hOfMAsInteger.Mul(multiplySenders[0].outputAdditiveShare).Add(rX.Mul(multiplySenders[1].outputAdditiveShare))
gamma2 := alice.publicKey.Mul(multiplySenders[0].outputAdditiveShare)
other = alice.curve.ScalarBaseMult(multiplySenders[1].outputAdditiveShare.Neg())
sigA := hOfMAsInteger.Mul(multiplySenders[0].OutputAdditiveShare).Add(rX.Mul(multiplySenders[1].OutputAdditiveShare))
gamma2 := alice.publicKey.Mul(multiplySenders[0].OutputAdditiveShare)
other = alice.curve.ScalarBaseMult(multiplySenders[1].OutputAdditiveShare.Neg())
gamma2 = gamma2.Add(other)
hashGamma2Bytes := sha3.Sum256(gamma2.ToAffineCompressed())
hashGamma2, err := alice.curve.Scalar.SetBytes(hashGamma2Bytes[:])
@ -314,14 +314,14 @@ func (bob *Bob) Round4Final(message []byte, round3Output *SignRound3Output) erro
R: rX.Add(zero).BigInt(), // slight trick here; add it to 0 just to mod it by q (now it's mod p!)
V: int(rY),
}
gamma1 := r.Mul(bob.multiplyReceivers[0].outputAdditiveShare)
gamma1 := r.Mul(bob.multiplyReceivers[0].OutputAdditiveShare)
gamma1HashedBytes := sha3.Sum256(gamma1.ToAffineCompressed())
gamma1Hashed, err := bob.curve.Scalar.SetBytes(gamma1HashedBytes[:])
if err != nil {
return errors.Wrap(err, "setting gamma1Hashed scalar from bytes")
}
phi := round3Output.EtaPhi.Sub(gamma1Hashed)
theta := bob.multiplyReceivers[0].outputAdditiveShare.Sub(phi.Div(bob.kB))
theta := bob.multiplyReceivers[0].OutputAdditiveShare.Sub(phi.Div(bob.kB))
if _, err = bob.hash.Write(message); err != nil {
return errors.Wrap(err, "writing message to hash in Bob sign round 5 final")
}
@ -334,8 +334,8 @@ func (bob *Bob) Round4Final(message []byte, round3Output *SignRound3Output) erro
if err != nil {
return errors.Wrap(err, "setting capitalR scalar from big int")
}
sigB := digest.Mul(theta).Add(capitalR.Mul(bob.multiplyReceivers[1].outputAdditiveShare))
gamma2 := bob.curve.ScalarBaseMult(bob.multiplyReceivers[1].outputAdditiveShare)
sigB := digest.Mul(theta).Add(capitalR.Mul(bob.multiplyReceivers[1].OutputAdditiveShare))
gamma2 := bob.curve.ScalarBaseMult(bob.multiplyReceivers[1].OutputAdditiveShare)
other := bob.publicKey.Mul(theta.Neg())
gamma2 = gamma2.Add(other)
gamma2HashedBytes := sha3.Sum256(gamma2.ToAffineCompressed())

View File

@ -5,7 +5,7 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/execution/nop"
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony"
)
type Node struct {
@ -14,7 +14,7 @@ type Node struct {
}
func newNode(
nopExecutionEngine *nop.NopExecutionEngine,
ceremonyExecutionEngine *ceremony.CeremonyExecutionEngine,
engine consensus.ConsensusEngine,
) (*Node, error) {
if engine == nil {
@ -22,8 +22,8 @@ func newNode(
}
execEngines := make(map[string]execution.ExecutionEngine)
if nopExecutionEngine != nil {
execEngines[nopExecutionEngine.GetName()] = nopExecutionEngine
if ceremonyExecutionEngine != nil {
execEngines[ceremonyExecutionEngine.GetName()] = ceremonyExecutionEngine
}
return &Node{

View File

@ -8,8 +8,9 @@ import (
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
ceremonyConsensus "source.quilibrium.com/quilibrium/monorepo/node/consensus/ceremony"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/execution/nop"
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/store"
@ -38,7 +39,9 @@ var storeSet = wire.NewSet(
wire.FieldsOf(new(*config.Config), "DB"),
store.NewPebbleDB,
store.NewPebbleClockStore,
store.NewPebbleKeyStore,
wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)),
wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)),
)
var pubSubSet = wire.NewSet(
@ -47,12 +50,20 @@ var pubSubSet = wire.NewSet(
wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)),
)
var dataConsensusSet = wire.NewSet(
wire.FieldsOf(new(*config.Config), "Engine"),
ceremonyConsensus.NewCeremonyDataClockConsensusEngine,
wire.Bind(
new(consensus.DataConsensusEngine),
new(*ceremonyConsensus.CeremonyDataClockConsensusEngine),
),
)
var engineSet = wire.NewSet(
nop.NewNopExecutionEngine,
ceremony.NewCeremonyExecutionEngine,
)
var consensusSet = wire.NewSet(
wire.FieldsOf(new(*config.Config), "Engine"),
master.NewMasterClockConsensusEngine,
wire.Bind(
new(consensus.ConsensusEngine),
@ -67,6 +78,7 @@ func NewNode(*config.Config) (*Node, error) {
storeSet,
pubSubSet,
engineSet,
dataConsensusSet,
consensusSet,
newNode,
))

View File

@ -11,8 +11,9 @@ import (
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/ceremony"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/execution/nop"
ceremony2 "source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/store"
@ -22,17 +23,19 @@ import (
func NewNode(configConfig *config.Config) (*Node, error) {
zapLogger := logger()
nopExecutionEngine := nop.NewNopExecutionEngine(zapLogger)
engineConfig := configConfig.Engine
keyConfig := configConfig.Key
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
dbConfig := configConfig.DB
db := store.NewPebbleDB(dbConfig)
pebbleClockStore := store.NewPebbleClockStore(db, zapLogger)
keyConfig := configConfig.Key
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
pebbleKeyStore := store.NewPebbleKeyStore(db, zapLogger)
p2PConfig := configConfig.P2P
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
ceremonyDataClockConsensusEngine := ceremony.NewCeremonyDataClockConsensusEngine(engineConfig, zapLogger, fileKeyManager, pebbleClockStore, pebbleKeyStore, blossomSub)
ceremonyExecutionEngine := ceremony2.NewCeremonyExecutionEngine(zapLogger, ceremonyDataClockConsensusEngine, engineConfig, fileKeyManager, blossomSub, pebbleClockStore, pebbleKeyStore)
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub)
node, err := newNode(nopExecutionEngine, masterClockConsensusEngine)
node, err := newNode(ceremonyExecutionEngine, masterClockConsensusEngine)
if err != nil {
return nil, err
}
@ -68,13 +71,19 @@ var loggerSet = wire.NewSet(
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, store.NewPebbleClockStore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)))
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, store.NewPebbleClockStore, store.NewPebbleKeyStore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)))
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)))
var engineSet = wire.NewSet(nop.NewNopExecutionEngine)
var dataConsensusSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Engine"), ceremony.NewCeremonyDataClockConsensusEngine, wire.Bind(
new(consensus.DataConsensusEngine),
new(*ceremony.CeremonyDataClockConsensusEngine),
),
)
var consensusSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Engine"), master.NewMasterClockConsensusEngine, wire.Bind(
var engineSet = wire.NewSet(ceremony2.NewCeremonyExecutionEngine)
var consensusSet = wire.NewSet(master.NewMasterClockConsensusEngine, wire.Bind(
new(consensus.ConsensusEngine),
new(*master.MasterClockConsensusEngine),
),

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,808 @@
package ceremony
import (
"bytes"
"crypto"
"crypto/rand"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/zkp/schnorr"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
func (e *CeremonyDataClockConsensusEngine) handleMessage(
message *pb.Message,
) error {
e.logger.Debug(
"received message",
zap.Binary("data", message.Data),
zap.Binary("from", message.From),
zap.Binary("signature", message.Signature),
)
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
return errors.Wrap(err, "handle message")
}
eg := errgroup.Group{}
eg.SetLimit(len(e.executionEngines))
for name := range e.executionEngines {
name := name
eg.Go(func() error {
messages, err := e.executionEngines[name].ProcessMessage(
msg.Address,
msg,
)
if err != nil {
e.logger.Error(
"could not process message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
for _, appMessage := range messages {
appMsg := &anypb.Any{}
err := proto.Unmarshal(appMessage.Payload, appMsg)
if err != nil {
e.logger.Error(
"could not unmarshal app message",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
switch appMsg.TypeUrl {
case protobufs.CeremonyLobbyStateTransitionType:
t := &protobufs.CeremonyLobbyStateTransition{}
err := proto.Unmarshal(appMsg.Value, t)
if err != nil {
return errors.Wrap(err, "handle message")
}
if err := e.handleCeremonyLobbyStateTransition(t); err != nil {
return errors.Wrap(err, "handle message")
}
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
e.logger.Error("rejecting invalid message", zap.Error(err))
return errors.Wrap(err, "execution failed")
}
any := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, any); err != nil {
return errors.Wrap(err, "handle message")
}
switch any.TypeUrl {
case protobufs.ClockFrameType:
if err := e.handleClockFrameData(
message.From,
msg.Address,
any,
); err != nil {
return errors.Wrap(err, "handle message")
}
case protobufs.ProvingKeyRequestType:
if err := e.handleProvingKeyRequest(
message.From,
msg.Address,
any,
); err != nil {
return errors.Wrap(err, "handle message")
}
case protobufs.ProvingKeyAnnouncementType:
if err := e.handleProvingKey(message.From, msg.Address, any); err != nil {
return errors.Wrap(err, "handle message")
}
case protobufs.KeyBundleAnnouncementType:
if err := e.handleKeyBundle(message.From, msg.Address, any); err != nil {
return errors.Wrap(err, "handle message")
}
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleCeremonyLobbyStateTransition(
transition *protobufs.CeremonyLobbyStateTransition,
) error {
if len(transition.TransitionInputs) != len(transition.TypeUrls) {
return errors.Wrap(
errors.New("invalid state transition"),
"handle ceremony lobby state transition",
)
}
e.stagedLobbyStateTransitionsMx.Lock()
if e.stagedLobbyStateTransitions == nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
}
found := false
for _, ti := range e.stagedLobbyStateTransitions.TransitionInputs {
for _, nti := range transition.TransitionInputs {
if bytes.Equal(ti, nti) {
found = true
}
}
}
if !found {
for i := range transition.TransitionInputs {
e.stagedLobbyStateTransitions.TypeUrls = append(
e.stagedLobbyStateTransitions.TypeUrls,
transition.TypeUrls[i],
)
e.stagedLobbyStateTransitions.TransitionInputs = append(
e.stagedLobbyStateTransitions.TransitionInputs,
transition.TransitionInputs[i],
)
}
}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleKeyBundle(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
e.logger.Info("received key bundle")
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{}
if err := any.UnmarshalTo(keyBundleAnnouncement); err != nil {
return errors.Wrap(err, "handle key bundle")
}
if len(keyBundleAnnouncement.ProvingKeyBytes) == 0 {
return errors.Wrap(errors.New("proving key is nil"), "handle key bundle")
}
k, err := e.keyStore.GetLatestKeyBundle(keyBundleAnnouncement.ProvingKeyBytes)
if err != nil && !errors.Is(err, store.ErrNotFound) {
return errors.Wrap(err, "handle key bundle")
}
if k != nil {
latestAnnouncement := &protobufs.KeyBundleAnnouncement{}
err := proto.Unmarshal(k.Data, latestAnnouncement)
if err != nil {
return errors.Wrap(err, "handle key bundle")
}
if bytes.Equal(
latestAnnouncement.IdentityKey.Challenge,
keyBundleAnnouncement.IdentityKey.Challenge,
) && bytes.Equal(
latestAnnouncement.IdentityKey.Response,
keyBundleAnnouncement.IdentityKey.Response,
) && bytes.Equal(
latestAnnouncement.IdentityKey.Statement,
keyBundleAnnouncement.IdentityKey.Statement,
) && bytes.Equal(
latestAnnouncement.SignedPreKey.Challenge,
keyBundleAnnouncement.SignedPreKey.Challenge,
) && bytes.Equal(
latestAnnouncement.SignedPreKey.Response,
keyBundleAnnouncement.SignedPreKey.Response,
) && bytes.Equal(
latestAnnouncement.SignedPreKey.Statement,
keyBundleAnnouncement.SignedPreKey.Statement,
) {
// This has already been proven, ignore
return nil
}
}
var provingKey *protobufs.ProvingKeyAnnouncement
inclusion, err := e.keyStore.GetProvingKey(
keyBundleAnnouncement.ProvingKeyBytes,
)
if err != nil {
if !errors.Is(err, store.ErrNotFound) {
return errors.Wrap(err, "handle key bundle")
}
provingKey, err = e.keyStore.GetStagedProvingKey(
keyBundleAnnouncement.ProvingKeyBytes,
)
if err != nil && !errors.Is(err, store.ErrNotFound) {
return errors.Wrap(err, "handle key bundle")
}
} else {
err := proto.Unmarshal(inclusion.Data, provingKey)
if err != nil {
return errors.Wrap(err, "handle key bundle")
}
}
// We have a matching proving key, we can set this up to be committed.
if provingKey != nil {
e.logger.Info("verifying key bundle announcement")
if err := keyBundleAnnouncement.Verify(provingKey); err != nil {
e.logger.Error(
"could not verify key bundle announcement",
zap.Error(err),
)
return errors.Wrap(err, "handle key bundle")
}
go func() {
e.logger.Info("adding key bundle announcement to pending commits")
e.pendingCommits <- any
}()
return nil
} else {
e.logger.Info("proving key not found, requesting from peers")
if err = e.publishMessage(e.filter, &protobufs.ProvingKeyRequest{
ProvingKeyBytes: keyBundleAnnouncement.ProvingKeyBytes,
}); err != nil {
return errors.Wrap(err, "handle key bundle")
}
e.dependencyMapMx.Lock()
e.dependencyMap[string(keyBundleAnnouncement.ProvingKeyBytes)] = any
e.dependencyMapMx.Unlock()
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleProvingKey(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
e.logger.Info("received proving key")
provingKeyAnnouncement := &protobufs.ProvingKeyAnnouncement{}
if err := any.UnmarshalTo(provingKeyAnnouncement); err != nil {
return errors.Wrap(err, "handle proving key")
}
if err := provingKeyAnnouncement.Verify(); err != nil {
return errors.Wrap(err, "handle proving key")
}
if err := e.keyStore.StageProvingKey(provingKeyAnnouncement); err != nil {
return errors.Wrap(err, "handle proving key")
}
provingKey := provingKeyAnnouncement.PublicKey()
e.logger.Info(
"proving key staged",
zap.Binary("proving_key", provingKey),
)
if e.dependencyMap[string(provingKey)] != nil {
go func() {
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{}
if err := proto.Unmarshal(
e.dependencyMap[string(provingKey)].Value,
keyBundleAnnouncement,
); err != nil {
e.logger.Error(
"could not unmarshal key bundle announcement",
zap.Error(err),
)
}
if err := keyBundleAnnouncement.Verify(
provingKeyAnnouncement,
); err != nil {
e.logger.Error(
"could not verify key bundle announcement",
zap.Error(err),
)
}
e.pendingCommits <- e.dependencyMap[string(provingKey)]
e.dependencyMapMx.Lock()
delete(e.dependencyMap, string(provingKey))
e.dependencyMapMx.Unlock()
}()
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
frame := &protobufs.ClockFrame{}
if err := any.UnmarshalTo(frame); err != nil {
return errors.Wrap(err, "handle clock frame data")
}
earliestFrame, _, count := e.frameProverTrie.Get(address)
_, latestFrame, _ := e.frameSeenProverTrie.Get(address)
if frame.FrameNumber == latestFrame {
e.logger.Info(
"already received frame from address",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
return nil
} else if frame.FrameNumber <= earliestFrame || count == 0 {
e.logger.Info(
"prover not in trie at frame, address may be in fork",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
return nil
}
e.logger.Info(
"got clock frame",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
)
if err := frame.VerifyDataClockFrame(); err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
aggregateCommitments := []curves.PairingPoint{}
for i := 0; i < (len(frame.Input)-516)/74; i++ {
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
frame.Input[516+(i*74) : 516+(i*74)+74],
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
aggregateCommitments = append(aggregateCommitments, c.(curves.PairingPoint))
}
for i, proof := range frame.AggregateProofs {
aggregatePoly := [][]curves.PairingScalar{}
commitments := []curves.PairingPoint{}
for _, commit := range proof.GetInclusionCommitments() {
switch commit.TypeUrl {
case protobufs.IntrinsicExecutionOutputType:
e.logger.Info("confirming inclusion in aggregate")
digest := sha3.NewShake256()
_, err := digest.Write(commit.Data)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame data")
}
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame data")
}
poly, err := e.prover.BytesToPolynomial(expand)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame data")
}
evalPoly, err := qcrypto.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
if err != nil {
e.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame data")
}
e.logger.Info(
"created fft of polynomial",
zap.Int("poly_size", len(evalPoly)),
)
aggregatePoly = append(aggregatePoly, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
commitments = append(commitments, c.(curves.PairingPoint))
default:
e.logger.Info("confirming inclusion in aggregate")
poly, err := e.prover.BytesToPolynomial(commit.Data)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame data")
}
for i := 0; i < 128-len(poly); i++ {
poly = append(
poly,
curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
)
}
evalPoly, err := qcrypto.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
128,
false,
)
if err != nil {
e.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame data")
}
e.logger.Info(
"created fft of polynomial",
zap.Int("poly_size", len(evalPoly)),
)
aggregatePoly = append(aggregatePoly, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
commitments = append(commitments, c.(curves.PairingPoint))
}
}
p, err := curves.BLS48581G1().Point.FromAffineCompressed(
proof.Proof,
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
result, err := e.prover.VerifyAggregateProof(
aggregatePoly,
commitments,
aggregateCommitments[i],
p.(curves.PairingPoint),
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
if !result {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(
errors.New("invalid proof"),
"handle clock frame data",
)
}
}
e.logger.Info(
"clock frame was valid",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
parentSelector, selector, distance, err :=
frame.GetParentSelectorAndDistance()
if err != nil {
return errors.Wrap(err, "handle clock frame data")
}
e.logger.Info(
"difference between selector/discriminator",
zap.Binary("difference", distance.Bytes()),
)
if _, err := e.clockStore.GetParentDataClockFrame(
frame.Filter,
frame.FrameNumber-1,
frame.ParentSelector,
); errors.Is(err, store.ErrNotFound) {
// If this is a frame number higher than what we're already caught up to,
// push a request to fill the gap, unless we're syncing or it's in step,
// then just lazily seek.
from := e.frame
if e.syncingStatus != SyncStatusNotSyncing || from >= frame.FrameNumber-1 {
from = frame.FrameNumber - 1
}
if err := e.publishMessage(e.filter, &protobufs.ClockFramesRequest{
Filter: e.filter,
FromFrameNumber: from,
ToFrameNumber: frame.FrameNumber,
}); err != nil {
e.logger.Error(
"could not publish clock frame parent request, skipping",
zap.Error(err),
)
}
}
txn, err := e.clockStore.NewTransaction()
if err != nil {
e.logger.Error("could not save candidate clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
if err := e.clockStore.PutCandidateDataClockFrame(
parentSelector.Bytes(),
distance.Bytes(),
selector.Bytes(),
frame,
txn,
); err != nil {
e.logger.Error("could not save candidate clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
if err := txn.Commit(); err != nil {
e.logger.Error("could not save candidate clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
if e.frame < frame.FrameNumber {
e.latestFrameReceived = frame.FrameNumber
e.lastFrameReceivedAt = time.Now().UTC()
}
e.frameSeenProverTrie.Add(address, frame.FrameNumber)
return nil
}
func (e *CeremonyDataClockConsensusEngine) publishProof(
frame *protobufs.ClockFrame,
) error {
if e.state == consensus.EngineStatePublishing {
e.logger.Info(
"publishing frame and aggregations",
zap.Uint64("frame_number", frame.FrameNumber),
)
if err := e.publishMessage(e.filter, frame); err != nil {
return errors.Wrap(
err,
"publish proof",
)
}
e.state = consensus.EngineStateCollecting
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) publishMessage(
filter []byte,
message proto.Message,
) error {
any := &anypb.Any{}
if err := any.MarshalFrom(message); err != nil {
return errors.Wrap(err, "publish message")
}
any.TypeUrl = strings.Replace(
any.TypeUrl,
"type.googleapis.com",
"types.quilibrium.com",
1,
)
payload, err := proto.Marshal(any)
if err != nil {
return errors.Wrap(err, "publish message")
}
h, err := poseidon.HashBytes(payload)
if err != nil {
return errors.Wrap(err, "publish message")
}
msg := &protobufs.Message{
Hash: h.Bytes(),
Address: e.provingKeyAddress,
Payload: payload,
}
data, err := proto.Marshal(msg)
if err != nil {
return errors.Wrap(err, "publish message")
}
return e.pubSub.PublishToBitmask(filter, data)
}
func (e *CeremonyDataClockConsensusEngine) announceKeyBundle() error {
e.logger.Info("announcing key bundle")
idk, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
idk, err = e.keyManager.CreateAgreementKey(
"q-ratchet-idk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
} else {
return errors.Wrap(err, "announce key bundle")
}
}
spk, err := e.keyManager.GetAgreementKey("q-ratchet-spk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
spk, err = e.keyManager.CreateAgreementKey(
"q-ratchet-spk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
} else {
return errors.Wrap(err, "announce key bundle")
}
}
idkPoint := curves.ED448().NewGeneratorPoint().Mul(idk)
idkProver := schnorr.NewProver(
curves.ED448(),
curves.ED448().NewGeneratorPoint(),
sha3.New256(),
[]byte{},
)
spkPoint := curves.ED448().NewGeneratorPoint().Mul(spk)
spkProver := schnorr.NewProver(
curves.ED448(),
curves.ED448().NewGeneratorPoint(),
sha3.New256(),
[]byte{},
)
idkProof, idkCommitment, err := idkProver.ProveCommit(idk)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
spkProof, spkCommitment, err := spkProver.ProveCommit(spk)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
msg := append(
append([]byte{}, idkCommitment...),
spkCommitment...,
)
signature, err := e.provingKey.Sign(rand.Reader, msg, crypto.Hash(0))
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
signatureProto := &protobufs.ProvingKeyAnnouncement_ProvingKeySignatureEd448{
ProvingKeySignatureEd448: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.provingKeyBytes,
},
Signature: signature,
},
}
provingKeyAnnouncement := &protobufs.ProvingKeyAnnouncement{
IdentityCommitment: idkCommitment,
PrekeyCommitment: spkCommitment,
ProvingKeySignature: signatureProto,
}
if err := e.publishMessage(e.filter, provingKeyAnnouncement); err != nil {
return errors.Wrap(err, "announce key bundle")
}
idkSignature, err := e.provingKey.Sign(
rand.Reader,
idkPoint.ToAffineCompressed(),
crypto.Hash(0),
)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
spkSignature, err := e.provingKey.Sign(
rand.Reader,
spkPoint.ToAffineCompressed(),
crypto.Hash(0),
)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{
ProvingKeyBytes: e.provingKeyBytes,
IdentityKey: &protobufs.IdentityKey{
Challenge: idkProof.C.Bytes(),
Response: idkProof.S.Bytes(),
Statement: idkProof.Statement.ToAffineCompressed(),
IdentityKeySignature: &protobufs.IdentityKey_PublicKeySignatureEd448{
PublicKeySignatureEd448: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: idkPoint.ToAffineCompressed(),
},
Signature: idkSignature,
},
},
},
SignedPreKey: &protobufs.SignedPreKey{
Challenge: spkProof.C.Bytes(),
Response: spkProof.S.Bytes(),
Statement: spkProof.Statement.ToAffineCompressed(),
SignedPreKeySignature: &protobufs.SignedPreKey_PublicKeySignatureEd448{
PublicKeySignatureEd448: &protobufs.Ed448Signature{
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: spkPoint.ToAffineCompressed(),
},
Signature: spkSignature,
},
},
},
}
return errors.Wrap(
e.publishMessage(e.filter, keyBundleAnnouncement),
"announce key bundle",
)
}

View File

@ -0,0 +1,280 @@
package ceremony
import (
"crypto"
"math/big"
"sync"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
type InclusionMap = map[curves.PairingPoint]*protobufs.InclusionCommitment
type PolynomialMap = map[curves.PairingPoint][]curves.PairingScalar
type SyncStatusType int
const (
SyncStatusNotSyncing = iota
SyncStatusAwaitingResponse
SyncStatusSynchronizing
)
type CeremonyDataClockConsensusEngine struct {
frame uint64
activeFrame *protobufs.ClockFrame
difficulty uint32
logger *zap.Logger
state consensus.EngineState
clockStore store.ClockStore
keyStore store.KeyStore
pubSub p2p.PubSub
keyManager keys.KeyManager
provingKey crypto.Signer
provingKeyBytes []byte
provingKeyType keys.KeyType
provingKeyAddress []byte
lastFrameReceivedAt time.Time
latestFrameReceived uint64
frameProverTrie *tries.RollingFrecencyCritbitTrie
frameSeenProverTrie *tries.RollingFrecencyCritbitTrie
dependencyMap map[string]*anypb.Any
pendingCommits chan *anypb.Any
pendingCommitWorkers int64
prover *qcrypto.KZGProver
stagedKeyCommits InclusionMap
stagedKeyPolynomials PolynomialMap
stagedLobbyStateTransitions *protobufs.CeremonyLobbyStateTransition
frameChan chan *protobufs.ClockFrame
executionEngines map[string]execution.ExecutionEngine
filter []byte
input []byte
parentSelector []byte
syncingStatus SyncStatusType
syncingTarget []byte
currentDistance *big.Int
engineMx sync.Mutex
dependencyMapMx sync.Mutex
stagedKeyCommitsMx sync.Mutex
stagedLobbyStateTransitionsMx sync.Mutex
lastKeyBundleAnnouncementFrame uint64
}
var _ consensus.DataConsensusEngine = (*CeremonyDataClockConsensusEngine)(nil)
// Creates a new data clock for ceremony execution  this is a hybrid clock,
// normally data clocks are bloom sharded and have node-specific proofs along
// with the public VDF proofs, but in this case it is a proof from the execution
// across all participating nodes.
func NewCeremonyDataClockConsensusEngine(
engineConfig *config.EngineConfig,
logger *zap.Logger,
keyManager keys.KeyManager,
clockStore store.ClockStore,
keyStore store.KeyStore,
pubSub p2p.PubSub,
) *CeremonyDataClockConsensusEngine {
if logger == nil {
panic(errors.New("logger is nil"))
}
if engineConfig == nil {
panic(errors.New("engine config is nil"))
}
if keyManager == nil {
panic(errors.New("key manager is nil"))
}
if clockStore == nil {
panic(errors.New("clock store is nil"))
}
if keyStore == nil {
panic(errors.New("key store is nil"))
}
if pubSub == nil {
panic(errors.New("pubsub is nil"))
}
e := &CeremonyDataClockConsensusEngine{
frame: 0,
difficulty: 10000,
logger: logger,
state: consensus.EngineStateStopped,
clockStore: clockStore,
keyStore: keyStore,
keyManager: keyManager,
pubSub: pubSub,
frameChan: make(chan *protobufs.ClockFrame),
executionEngines: map[string]execution.ExecutionEngine{},
dependencyMap: make(map[string]*anypb.Any),
parentSelector: []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
lastFrameReceivedAt: time.Time{},
frameProverTrie: &tries.RollingFrecencyCritbitTrie{},
frameSeenProverTrie: &tries.RollingFrecencyCritbitTrie{},
pendingCommits: make(chan *anypb.Any),
pendingCommitWorkers: engineConfig.PendingCommitWorkers,
prover: qcrypto.DefaultKZGProver(),
stagedKeyCommits: make(InclusionMap),
stagedKeyPolynomials: make(PolynomialMap),
syncingStatus: SyncStatusNotSyncing,
}
logger.Info("constructing consensus engine")
signer, keyType, bytes, address := e.GetProvingKey(
engineConfig,
)
e.provingKey = signer
e.provingKeyType = keyType
e.provingKeyBytes = bytes
e.provingKeyAddress = address
return e
}
func (e *CeremonyDataClockConsensusEngine) Start(
filter []byte,
seed []byte,
) <-chan error {
e.logger.Info("starting ceremony consensus engine")
e.state = consensus.EngineStateStarting
errChan := make(chan error)
e.filter = filter
e.input = seed
e.state = consensus.EngineStateLoading
e.logger.Info("loading last seen state")
latestFrame, err := e.clockStore.GetLatestDataClockFrame(
e.filter,
e.frameProverTrie,
)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
}
if latestFrame != nil {
e.setFrame(latestFrame)
} else {
latestFrame = e.createGenesisFrame()
}
e.logger.Info("subscribing to pubsub messages")
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
e.pubSub.Subscribe(
append(append([]byte{}, e.filter...), e.pubSub.GetPeerID()...),
e.handleSync,
true,
)
e.state = consensus.EngineStateCollecting
for i := int64(0); i < e.pendingCommitWorkers; i++ {
go e.handlePendingCommits(i)
}
go func() {
for e.state < consensus.EngineStateStopping {
switch e.state {
case consensus.EngineStateCollecting:
if latestFrame, err = e.collect(latestFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
errChan <- err
}
case consensus.EngineStateProving:
if latestFrame, err = e.prove(latestFrame); err != nil {
e.logger.Error("could not prove", zap.Error(err))
errChan <- err
}
case consensus.EngineStatePublishing:
if err = e.publishProof(latestFrame); err != nil {
e.logger.Error("could not publish", zap.Error(err))
errChan <- err
}
}
}
}()
go func() {
errChan <- nil
}()
return errChan
}
func (e *CeremonyDataClockConsensusEngine) Stop(force bool) <-chan error {
e.logger.Info("stopping ceremony consensus engine")
e.state = consensus.EngineStateStopping
errChan := make(chan error)
wg := sync.WaitGroup{}
wg.Add(len(e.executionEngines))
for name := range e.executionEngines {
name := name
go func(name string) {
err := <-e.UnregisterExecutor(name, e.frame, force)
if err != nil {
errChan <- err
}
wg.Done()
}(name)
}
e.logger.Info("waiting for execution engines to stop")
wg.Wait()
e.logger.Info("execution engines stopped")
e.state = consensus.EngineStateStopped
e.engineMx.Lock()
defer e.engineMx.Unlock()
go func() {
errChan <- nil
}()
return errChan
}
func (e *CeremonyDataClockConsensusEngine) GetDifficulty() uint32 {
return e.difficulty
}
func (e *CeremonyDataClockConsensusEngine) GetFrame() uint64 {
return e.frame
}
func (e *CeremonyDataClockConsensusEngine) GetState() consensus.EngineState {
return e.state
}
func (
e *CeremonyDataClockConsensusEngine,
) GetFrameChannel() <-chan *protobufs.ClockFrame {
return e.frameChan
}
func (
e *CeremonyDataClockConsensusEngine,
) GetActiveFrame() *protobufs.ClockFrame {
return e.activeFrame
}

View File

@ -0,0 +1,940 @@
package ceremony
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"math/big"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/ff"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/vdf"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
func (e *CeremonyDataClockConsensusEngine) prove(
previousFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
if e.state == consensus.EngineStateProving {
e.logger.Info("proving new frame")
if !e.frameProverTrie.Contains(e.provingKeyAddress) {
e.state = consensus.EngineStateCollecting
return previousFrame, nil
}
commitments := [][]byte{}
aggregations := []*protobufs.InclusionAggregateProof{}
e.stagedKeyCommitsMx.Lock()
if len(e.stagedKeyCommits) > 0 && len(e.stagedKeyPolynomials) > 0 {
e.logger.Info(
"adding staged key commits to frame",
zap.Uint64("frame_number", previousFrame.FrameNumber+1),
)
keyCommitments := []curves.PairingPoint{}
keyInclusions := []*protobufs.InclusionCommitment{}
keyPolynomials := [][]curves.PairingScalar{}
i := uint32(0)
for commit, inclusion := range e.stagedKeyCommits {
e.logger.Info(
"adding staged key commit to aggregate proof",
zap.Uint64("frame_number", previousFrame.FrameNumber+1),
zap.Uint32("position", i),
)
keyCommitments = append(keyCommitments, commit)
inclusion.FrameNumber = previousFrame.FrameNumber + 1
inclusion.Position = i
keyInclusions = append(keyInclusions, inclusion)
keyPolynomials = append(keyPolynomials, e.stagedKeyPolynomials[commit])
}
proof, commitment, err := e.prover.ProveAggregate(
keyPolynomials,
keyCommitments,
)
if err != nil {
e.logger.Error("could not produce proof", zap.Error(err))
return nil, errors.Wrap(err, "prove")
}
if proof.IsIdentity() {
return nil, errors.Wrap(errors.New("invalid proof"), "prove")
}
commitments = append(commitments, commitment.ToAffineCompressed())
keyAggregation := &protobufs.InclusionAggregateProof{
Filter: e.filter,
FrameNumber: previousFrame.FrameNumber + 1,
InclusionCommitments: keyInclusions,
Proof: proof.ToAffineCompressed(),
}
aggregations = append(aggregations, keyAggregation)
e.stagedKeyCommits = make(
map[curves.PairingPoint]*protobufs.InclusionCommitment,
)
e.stagedKeyPolynomials = make(
map[curves.PairingPoint][]curves.PairingScalar,
)
}
e.stagedKeyCommitsMx.Unlock()
e.stagedLobbyStateTransitionsMx.Lock()
executionOutput := &protobufs.IntrinsicExecutionOutput{}
app, err := application.MaterializeApplicationFromFrame(previousFrame)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
if e.stagedLobbyStateTransitions == nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
}
app, err = app.ApplyTransition(
previousFrame.FrameNumber,
e.stagedLobbyStateTransitions,
)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
lobbyState, err := app.MaterializeLobbyStateFromApplication()
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
executionOutput.Address = application.CEREMONY_ADDRESS
executionOutput.Output, err = proto.Marshal(lobbyState)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
executionOutput.Proof, err = proto.Marshal(e.stagedLobbyStateTransitions)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
data, err := proto.Marshal(executionOutput)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
e.logger.Info("encoded execution output")
// Execution data in the ceremony is plaintext, we do not need to leverage
// full encoding for commit/proof reference.
digest := sha3.NewShake256()
_, err = digest.Write(data)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove")
}
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove")
}
poly, err := e.prover.BytesToPolynomial(expand)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
e.logger.Info("proving execution output for inclusion")
polys, err := qcrypto.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
e.logger.Info("converted execution output chunk to evaluation form")
e.logger.Info("creating kzg commitment")
commitment, err := e.prover.Commit(polys)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
e.logger.Info("creating kzg proof")
proof, aggregate, err := e.prover.ProveAggregate(
[][]curves.PairingScalar{polys},
[]curves.PairingPoint{commitment},
)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
if proof.IsIdentity() {
return nil, errors.Wrap(errors.New("invalid proof"), "prove")
}
commitments = append(commitments, aggregate.ToAffineCompressed())
e.logger.Info("finalizing execution proof")
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
execInclusion := &protobufs.InclusionCommitment{
Filter: e.filter,
FrameNumber: previousFrame.FrameNumber + 1,
TypeUrl: protobufs.IntrinsicExecutionOutputType,
Data: data,
Commitment: commitment.ToAffineCompressed(),
}
execAggregation := &protobufs.InclusionAggregateProof{
Filter: e.filter,
FrameNumber: previousFrame.FrameNumber + 1,
InclusionCommitments: []*protobufs.InclusionCommitment{
execInclusion,
},
Proof: proof.ToAffineCompressed(),
}
aggregations = append(aggregations, execAggregation)
frame, err := protobufs.ProveDataClockFrame(
previousFrame,
commitments,
aggregations,
e.provingKey,
e.difficulty,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
e.state = consensus.EngineStatePublishing
e.logger.Info(
"returning new proven frame",
zap.Int("proof_count", len(aggregations)),
zap.Int("commitment_count", len(commitments)),
)
return frame, nil
}
return nil, nil
}
func (e *CeremonyDataClockConsensusEngine) setFrame(
frame *protobufs.ClockFrame,
) {
pubkey := []byte{}
discriminator := big.NewInt(0)
ed448PublicKey := frame.GetPublicKeySignatureEd448()
if frame.PublicKeySignature == nil && frame.FrameNumber != 0 {
e.logger.Error("could not set frame, signature invalid for non-zero frame")
return
} else if ed448PublicKey != nil {
pubkey = ed448PublicKey.PublicKey.KeyValue
}
if len(pubkey) != 0 {
var err error
discriminator, err = poseidon.HashBytes(pubkey)
if err != nil {
e.logger.Error(
"could not set frame",
zap.Error(err),
)
return
}
}
selector := new(big.Int).SetBytes(frame.ParentSelector)
l := new(big.Int).Mod(new(big.Int).Sub(selector, discriminator), ff.Modulus())
r := new(big.Int).Mod(new(big.Int).Sub(discriminator, selector), ff.Modulus())
distance := r
if l.Cmp(r) == -1 {
distance = l
}
previousSelectorBytes := [516]byte{}
copy(previousSelectorBytes[:], frame.Output[:516])
parent, err := poseidon.HashBytes(previousSelectorBytes[:])
if err != nil {
panic(errors.Wrap(err, "set frame"))
}
e.logger.Info("set frame", zap.Uint64("frame_number", frame.FrameNumber))
e.currentDistance = distance
e.frame = frame.FrameNumber
e.parentSelector = parent.Bytes()
e.activeFrame = frame
go func() {
e.frameChan <- frame
}()
}
func (
e *CeremonyDataClockConsensusEngine,
) createGenesisFrame() *protobufs.ClockFrame {
e.logger.Info("creating genesis frame")
for _, l := range strings.Split(string(e.input), "\n") {
e.logger.Info(l)
}
b := sha3.Sum256(e.input)
v := vdf.New(e.difficulty, b)
v.Execute()
o := v.GetOutput()
inputMessage := o[:]
e.logger.Info("encoding ceremony and phase one signatories")
transcript := &protobufs.CeremonyTranscript{}
for p, s := range qcrypto.CeremonyBLS48581G1 {
transcript.G1Powers = append(
transcript.G1Powers,
&protobufs.BLS48581G1PublicKey{
KeyValue: s.ToAffineCompressed(),
},
)
e.logger.Info(fmt.Sprintf("encoded G1 power %d", p))
}
for p, s := range qcrypto.CeremonyBLS48581G2 {
transcript.G2Powers = append(
transcript.G2Powers,
&protobufs.BLS48581G2PublicKey{
KeyValue: s.ToAffineCompressed(),
},
)
e.logger.Info(fmt.Sprintf("encoded G2 power %d", p))
}
transcript.RunningG1_256Witnesses = append(
transcript.RunningG1_256Witnesses,
&protobufs.BLS48581G1PublicKey{
KeyValue: qcrypto.CeremonyRunningProducts[0].ToAffineCompressed(),
},
)
transcript.RunningG2_256Powers = append(
transcript.RunningG2_256Powers,
&protobufs.BLS48581G2PublicKey{
KeyValue: qcrypto.CeremonyPotPubKeys[len(qcrypto.CeremonyPotPubKeys)-1].
ToAffineCompressed(),
},
)
outputProof := &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{},
TransitionInputs: [][]byte{},
}
proofBytes, err := proto.Marshal(outputProof)
if err != nil {
panic(err)
}
e.logger.Info("encoded transcript")
e.logger.Info("encoding ceremony signatories into application state")
rewardTrie := &tries.RewardCritbitTrie{}
for _, s := range qcrypto.CeremonySignatories {
pubkey := s.ToAffineCompressed()
addr, err := poseidon.HashBytes(pubkey)
if err != nil {
panic(err)
}
addrBytes := addr.Bytes()
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
rewardTrie.Add(addrBytes, 0, 50)
}
trieBytes, err := rewardTrie.Serialize()
if err != nil {
panic(err)
}
ceremonyLobbyState := &protobufs.CeremonyLobbyState{
LobbyState: 0,
CeremonyState: &protobufs.CeremonyLobbyState_CeremonyOpenState{
CeremonyOpenState: &protobufs.CeremonyOpenState{
JoinedParticipants: []*protobufs.CeremonyLobbyJoin{},
PreferredParticipants: []*protobufs.Ed448PublicKey{},
},
},
LatestTranscript: transcript,
RewardTrie: trieBytes,
}
outputBytes, err := proto.Marshal(ceremonyLobbyState)
if err != nil {
panic(err)
}
executionOutput := &protobufs.IntrinsicExecutionOutput{
Address: []byte(e.filter),
Output: outputBytes,
Proof: proofBytes,
}
data, err := proto.Marshal(executionOutput)
if err != nil {
panic(err)
}
e.logger.Info("encoded execution output")
digest := sha3.NewShake256()
_, err = digest.Write(data)
if err != nil {
panic(err)
}
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
panic(err)
}
poly, err := e.prover.BytesToPolynomial(expand)
if err != nil {
panic(err)
}
e.logger.Info("proving execution output for inclusion")
evalPoly, err := qcrypto.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
if err != nil {
panic(err)
}
e.logger.Info(
"converted execution output chunk to evaluation form",
zap.Int("poly_size", len(evalPoly)),
)
e.logger.Info("creating kzg commitment")
commitment, err := e.prover.Commit(evalPoly)
if err != nil {
panic(err)
}
e.logger.Info("creating kzg proof")
proof, aggregate, err := e.prover.ProveAggregate(
[][]curves.PairingScalar{evalPoly},
[]curves.PairingPoint{commitment},
)
if err != nil {
panic(err)
}
e.logger.Info("finalizing execution proof")
inputMessage = append(
append([]byte{}, inputMessage...),
aggregate.ToAffineCompressed()...,
)
ceremonyExecutiveProof := &protobufs.InclusionAggregateProof{
Filter: e.filter,
FrameNumber: 0,
InclusionCommitments: []*protobufs.InclusionCommitment{
{
Filter: e.filter,
FrameNumber: 0,
Position: 0,
TypeUrl: protobufs.IntrinsicExecutionOutputType,
Data: data,
Commitment: commitment.ToAffineCompressed(),
},
},
Proof: proof.ToAffineCompressed(),
}
// Signatories are special, they don't have an inclusion proof because they
// have not broadcasted communication keys, but they still get contribution
// rights prior to PoMW, because they did produce meaningful work in the
// first phase:
e.logger.Info("encoding signatories to prover trie")
for _, s := range qcrypto.CeremonySignatories {
pubkey := s.ToAffineCompressed()
e.logger.Info("0x" + hex.EncodeToString(pubkey))
addr, err := poseidon.HashBytes(pubkey)
if err != nil {
panic(err)
}
addrBytes := addr.Bytes()
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
e.frameProverTrie.Add(addrBytes, 0)
}
e.logger.Info("proving genesis frame")
input := []byte{}
input = append(input, e.filter...)
input = binary.BigEndian.AppendUint64(input, e.frame)
input = binary.BigEndian.AppendUint64(input, uint64(0))
input = binary.BigEndian.AppendUint32(input, e.difficulty)
input = append(input, e.input...)
b = sha3.Sum256(input)
v = vdf.New(e.difficulty, b)
v.Execute()
o = v.GetOutput()
frame := &protobufs.ClockFrame{
Filter: e.filter,
FrameNumber: e.frame,
Timestamp: 0,
Difficulty: e.difficulty,
Input: inputMessage,
Output: o[:],
ParentSelector: e.parentSelector,
AggregateProofs: []*protobufs.InclusionAggregateProof{
ceremonyExecutiveProof,
},
PublicKeySignature: nil,
}
parent, distance, selector, err := frame.GetParentSelectorAndDistance()
if err != nil {
panic(err)
}
txn, err := e.clockStore.NewTransaction()
if err != nil {
panic(err)
}
if err := e.clockStore.PutCandidateDataClockFrame(
parent.Bytes(),
distance.Bytes(),
selector.Bytes(),
frame,
txn,
); err != nil {
panic(err)
}
if err := e.clockStore.PutDataClockFrame(
frame,
e.frameProverTrie,
txn,
); err != nil {
panic(err)
}
if err := txn.Commit(); err != nil {
panic(err)
}
e.setFrame(frame)
return frame
}
func (e *CeremonyDataClockConsensusEngine) commitLongestPath() (
*protobufs.ClockFrame,
error,
) {
current, err := e.clockStore.GetLatestDataClockFrame(e.filter, nil)
if err != nil {
return nil, errors.Wrap(err, "commit longest path")
}
e.logger.Info(
"searching from committed frame",
zap.Uint64("frame_number", current.FrameNumber),
)
runningFrames := [][]*protobufs.ClockFrame{{current}}
commitReady := false
currentDepth := 0
for {
nextRunningFrames := [][]*protobufs.ClockFrame{}
for _, s := range runningFrames {
e.logger.Info(
"ranging over candidates for frame",
zap.Uint64("frame_number", s[currentDepth].FrameNumber),
)
selector, err := s[currentDepth].GetSelector()
if err != nil {
return nil, errors.Wrap(err, "commit longest path")
}
iter, err := e.clockStore.RangeCandidateDataClockFrames(
e.filter,
selector.Bytes(),
s[currentDepth].FrameNumber+1,
)
if err != nil {
return nil, errors.Wrap(err, "commit longest path")
}
for iter.First(); iter.Valid(); iter.Next() {
value, err := iter.Value()
if err != nil {
return nil, errors.Wrap(err, "commit longest path")
}
selectorBytes := selector.Bytes()
selectorBytes = append(
make([]byte, 32-len(selectorBytes)),
selectorBytes...,
)
nearest := e.frameProverTrie.FindNearest(
selectorBytes,
)
addr, err := value.GetAddress()
// If we got the outright nearest, then skip all this, we know this is
// the right frame for the selector.
if err != nil && bytes.Equal(nearest.Bits(), addr) {
nextRunningFrames = append(
nextRunningFrames,
append(
append([]*protobufs.ClockFrame{}, s...),
value,
),
)
break
}
// Iterated values will always be in order of shortest distance, this
// will always keep closest selected, longest path
if current.FrameNumber < value.FrameNumber {
e.logger.Info(
"setting longest path cursor to frame",
zap.Uint64("frame_number", value.FrameNumber),
)
current = value
}
e.logger.Info(
"adding candidate",
zap.Uint64("frame_number", value.FrameNumber),
zap.Binary("output", value.Output),
)
nextRunningFrames = append(
nextRunningFrames,
append(
append([]*protobufs.ClockFrame{}, s...),
value,
),
)
}
iter.Close()
}
if commitReady && len(nextRunningFrames) == 1 {
commitReady = false
e.logger.Info(
"consensus found, committing frames",
zap.Int("commit_depth", len(runningFrames[0])),
)
txn, err := e.clockStore.NewTransaction()
if err != nil {
return nil, errors.Wrap(err, "commit longest path")
}
for _, s := range runningFrames[0][1:] {
e.logger.Info(
"committing candidate",
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("output", s.Output),
)
addr, err := s.GetAddress()
if err != nil {
return nil, errors.Wrap(err, "commit longest path")
}
e.frameProverTrie.Add(addr, s.FrameNumber)
if err := e.clockStore.PutDataClockFrame(
s,
e.frameProverTrie,
txn,
); err != nil {
e.logger.Error(
"could not commit candidate",
zap.Error(err),
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("output", s.Output),
)
return nil, errors.Wrap(err, "commit longest path")
}
e.logger.Info(
"committing aggregate proofs",
zap.Int("proof_count", len(s.AggregateProofs)),
)
for _, p := range s.AggregateProofs {
e.logger.Info(
"committing inclusions",
zap.Int("inclusions_count", len(p.InclusionCommitments)),
)
for _, c := range p.InclusionCommitments {
switch c.TypeUrl {
case protobufs.ProvingKeyAnnouncementType:
provingKey := &protobufs.ProvingKeyAnnouncement{}
if err := proto.Unmarshal(c.Data, provingKey); err != nil {
e.logger.Error(
"could not commit candidate",
zap.Error(err),
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("commitment", c.Commitment),
)
return nil, errors.Wrap(err, "commit longest path")
}
e.logger.Info(
"committing proving key",
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("commitment", c.Commitment),
)
if err := e.keyStore.IncludeProvingKey(c, txn); err != nil {
e.logger.Error(
"could not commit candidate",
zap.Error(err),
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("output", s.Output),
)
return nil, errors.Wrap(err, "commit longest path")
}
case protobufs.KeyBundleAnnouncementType:
bundle := &protobufs.KeyBundleAnnouncement{}
if err := proto.Unmarshal(c.Data, bundle); err != nil {
e.logger.Error(
"could not commit candidate",
zap.Error(err),
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("commitment", c.Commitment),
)
return nil, errors.Wrap(err, "commit longest path")
}
e.logger.Info(
"committing key bundle",
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("commitment", c.Commitment),
)
if err := e.keyStore.PutKeyBundle(
bundle.ProvingKeyBytes,
c,
txn,
); err != nil {
e.logger.Error(
"could not commit candidate",
zap.Error(err),
zap.Uint64("frame_number", s.FrameNumber),
zap.Binary("output", s.Output),
)
return nil, errors.Wrap(err, "commit longest path")
}
}
}
}
}
if err := txn.Commit(); err != nil {
e.logger.Error(
"could not commit candidates",
zap.Error(err),
)
return nil, errors.Wrap(err, "commit longest path")
}
runningFrames = [][]*protobufs.ClockFrame{
{nextRunningFrames[0][currentDepth+1]},
}
currentDepth = 0
} else {
e.logger.Info(
"not ready to commit",
zap.Int("forks", len(nextRunningFrames)),
zap.Int("current_depth", currentDepth),
)
commitReady = len(nextRunningFrames) == 1
runningFrames = nextRunningFrames
currentDepth++
}
if len(nextRunningFrames) == 0 {
e.logger.Info("deepest consensus reached")
break
}
}
return current, nil
}
func (e *CeremonyDataClockConsensusEngine) collect(
currentFramePublished *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
if e.state == consensus.EngineStateCollecting {
e.logger.Info("collecting vdf proofs")
latest, err := e.clockStore.GetLatestDataClockFrame(e.filter, nil)
if err != nil {
e.logger.Error("could not obtain latest clock frame", zap.Error(err))
return nil, errors.Wrap(err, "collect")
}
if e.syncingStatus == SyncStatusNotSyncing {
peer, err := e.pubSub.GetRandomPeer(e.filter)
if err != nil {
if errors.Is(err, p2p.ErrNoPeersAvailable) {
e.logger.Warn("no peers available, skipping sync")
} else {
e.logger.Error("error while fetching random peer", zap.Error(err))
}
} else {
e.syncingStatus = SyncStatusAwaitingResponse
e.logger.Info("setting syncing target", zap.Binary("peer_id", peer))
e.syncingTarget = peer
channel := e.createPeerReceiveChannel(peer)
e.logger.Info(
"listening on peer receive channel",
zap.Binary("channel", channel),
)
e.pubSub.Subscribe(channel, e.handleSync, true)
e.pubSub.Subscribe(
append(append([]byte{}, e.filter...), peer...),
func(message *pb.Message) error { return nil },
true,
)
go func() {
time.Sleep(2 * time.Second)
if err := e.publishMessage(
append(append([]byte{}, e.filter...), peer...),
&protobufs.ClockFramesRequest{
Filter: e.filter,
FromFrameNumber: latest.FrameNumber + 1,
}); err != nil {
e.logger.Error(
"could not publish clock frame request",
zap.Error(err),
)
}
}()
}
}
waitDecay := time.Duration(2000)
for e.syncingStatus != SyncStatusNotSyncing {
e.logger.Info(
"waiting for sync to complete...",
zap.Duration("wait_decay", waitDecay),
)
time.Sleep(waitDecay * time.Millisecond)
waitDecay = waitDecay * 2
if waitDecay >= (100 * (2 << 6)) {
if e.syncingStatus == SyncStatusAwaitingResponse {
e.logger.Info("maximum wait for sync response, skipping sync")
e.syncingStatus = SyncStatusNotSyncing
break
} else {
waitDecay = 100 * (2 << 6)
}
}
}
e.logger.Info("selecting leader")
latestFrame, err := e.commitLongestPath()
if err != nil {
e.logger.Error("could not collect longest path", zap.Error(err))
return nil, errors.Wrap(err, "collect")
}
go func() {
_, err = e.keyStore.GetProvingKey(e.provingKeyBytes)
if errors.Is(err, store.ErrNotFound) &&
latestFrame.FrameNumber-e.lastKeyBundleAnnouncementFrame > 6 {
if err = e.announceKeyBundle(); err != nil {
panic(err)
}
e.lastKeyBundleAnnouncementFrame = latestFrame.FrameNumber
}
}()
e.logger.Info(
"returning leader frame",
zap.Uint64("frame_number", latestFrame.FrameNumber),
)
if latestFrame.FrameNumber >= currentFramePublished.FrameNumber {
e.setFrame(latestFrame)
e.state = consensus.EngineStateProving
return latestFrame, nil
} else {
return latestFrame, nil
}
}
return nil, nil
}

View File

@ -0,0 +1,78 @@
package ceremony
import (
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
)
func (e *CeremonyDataClockConsensusEngine) RegisterExecutor(
exec execution.ExecutionEngine,
frame uint64,
) <-chan error {
logger := e.logger.With(zap.String("execution_engine_name", exec.GetName()))
logger.Info("registering execution engine")
errChan := make(chan error)
go func() {
for {
logger.Info(
"awaiting frame",
zap.Uint64("current_frame", e.frame),
zap.Uint64("target_frame", frame),
)
newFrame := e.frame
if newFrame >= frame {
logger.Info(
"injecting execution engine at frame",
zap.Uint64("current_frame", newFrame),
)
e.engineMx.Lock()
e.executionEngines[exec.GetName()] = exec
e.engineMx.Unlock()
errChan <- nil
break
}
}
}()
return errChan
}
func (e *CeremonyDataClockConsensusEngine) UnregisterExecutor(
name string,
frame uint64,
force bool,
) <-chan error {
logger := e.logger.With(zap.String("execution_engine_name", name))
logger.Info("unregistering execution engine")
errChan := make(chan error)
go func() {
for {
logger.Info(
"awaiting frame",
zap.Uint64("current_frame", e.frame),
zap.Uint64("target_frame", frame),
)
newFrame := e.frame
if newFrame >= frame {
logger.Info(
"removing execution engine at frame",
zap.Uint64("current_frame", newFrame),
)
e.engineMx.Lock()
delete(e.executionEngines, name)
e.engineMx.Unlock()
errChan <- nil
break
}
}
}()
return errChan
}

View File

@ -0,0 +1,85 @@
package ceremony
import (
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *CeremonyDataClockConsensusEngine) handlePendingCommits(
workerId int64,
) {
for {
msg := <-e.pendingCommits
switch msg.TypeUrl {
case protobufs.KeyBundleAnnouncementType:
if err := e.includeKeyBundle(msg); err != nil {
e.logger.Error(
"failed to include key bundle",
zap.Error(errors.Wrap(err, "handle pending commits")),
zap.Int64("worker_id", workerId),
)
}
}
}
}
func (e *CeremonyDataClockConsensusEngine) includeKeyBundle(
any *anypb.Any,
) error {
poly, err := e.prover.BytesToPolynomial(any.Value)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "include key bundle")
}
for i := 0; i < 128-len(poly); i++ {
poly = append(
poly,
curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
)
}
evalPoly, err := crypto.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
128,
false,
)
if err != nil {
e.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return errors.Wrap(err, "include key bundle")
}
commitment, err := e.prover.Commit(evalPoly)
if err != nil {
e.logger.Error(
"error creating kzg commitment",
zap.Error(err),
)
return errors.Wrap(err, "include key bundle")
}
e.stagedKeyCommitsMx.Lock()
e.stagedKeyCommits[commitment] = &protobufs.InclusionCommitment{
Filter: e.filter,
TypeUrl: any.TypeUrl,
Data: any.Value,
Commitment: commitment.ToAffineCompressed(),
}
e.stagedKeyPolynomials[commitment] = evalPoly
e.stagedKeyCommitsMx.Unlock()
return nil
}

View File

@ -0,0 +1,655 @@
package ceremony
import (
"bytes"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
func (e *CeremonyDataClockConsensusEngine) handleSync(
message *pb.Message,
) error {
e.logger.Debug(
"received message",
zap.Binary("data", message.Data),
zap.Binary("from", message.From),
zap.Binary("signature", message.Signature),
)
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
return errors.Wrap(err, "handle sync")
}
any := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, any); err != nil {
return errors.Wrap(err, "handle sync")
}
eg := errgroup.Group{}
eg.SetLimit(len(e.executionEngines))
for name := range e.executionEngines {
name := name
eg.Go(func() error {
// if message,err := e.executionEngines[name].ProcessMessage(
if _, err := e.executionEngines[name].ProcessMessage(
msg.Address,
msg,
); err != nil {
e.logger.Error(
"could not process message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return err
}
return nil
})
}
if err := eg.Wait(); err != nil {
e.logger.Error("rejecting invalid message", zap.Error(err))
return errors.Wrap(err, "handle sync")
}
switch any.TypeUrl {
case protobufs.ClockFramesResponseType:
if err := e.handleClockFramesResponse(
message.From,
msg.Address,
any,
); err != nil {
return errors.Wrap(err, "handle sync")
}
case protobufs.ClockFramesRequestType:
if err := e.handleClockFramesRequest(
message.From,
msg.Address,
any,
); err != nil {
return errors.Wrap(err, "handle sync")
}
case protobufs.ProvingKeyAnnouncementType:
if err := e.handleProvingKey(
message.From,
msg.Address,
any,
); err != nil {
return errors.Wrap(err, "handle sync")
}
case protobufs.KeyBundleAnnouncementType:
if err := e.handleKeyBundle(
message.From,
msg.Address,
any,
); err != nil {
return errors.Wrap(err, "handle sync")
}
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) createPeerReceiveChannel(
peerID []byte,
) []byte {
return append(
append(append([]byte{}, e.filter...), peerID...),
e.pubSub.GetPeerID()...,
)
}
func (e *CeremonyDataClockConsensusEngine) createPeerSendChannel(
peerID []byte,
) []byte {
return append(
append(append([]byte{}, e.filter...), e.pubSub.GetPeerID()...),
peerID...,
)
}
func (e *CeremonyDataClockConsensusEngine) handleClockFramesResponse(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if bytes.Equal(address, e.provingKeyAddress) {
return nil
}
if !bytes.Equal(peerID, e.syncingTarget) {
e.logger.Warn(
"received clock frames response from unexpected target",
zap.Binary("peer_id", peerID),
zap.Binary("expected_peer_id", e.syncingTarget),
)
return nil
}
e.syncingStatus = SyncStatusSynchronizing
defer func() { e.syncingStatus = SyncStatusNotSyncing }()
response := &protobufs.ClockFramesResponse{}
if err := any.UnmarshalTo(response); err != nil {
return errors.Wrap(err, "handle clock frames response")
}
trieCopyBytes, err := e.frameProverTrie.Serialize()
if err != nil {
return errors.Wrap(err, "handle clock frames response")
}
trieCopy := &tries.RollingFrecencyCritbitTrie{}
if err = trieCopy.Deserialize(trieCopyBytes); err != nil {
return errors.Wrap(err, "handle clock frames response")
}
for _, frame := range response.ClockFrames {
prover, err := frame.GetAddress()
if err != nil {
return errors.Wrap(err, "handle clock frames response")
}
earliestFrame, _, count := trieCopy.Get(prover)
if count == 0 || earliestFrame >= frame.FrameNumber {
return errors.Wrap(
errors.New("prover not in trie"),
"handle clock frame response",
)
}
e.logger.Info(
"processing clock frame",
zap.Binary("sender_address", address),
zap.Binary("prover_address", prover),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
if err := frame.VerifyDataClockFrame(); err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
aggregateCommitments := []curves.PairingPoint{}
for i := 0; i < (len(frame.Input)-516)/74; i++ {
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
frame.Input[516+(i*74) : 516+(i*74)+74],
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
aggregateCommitments = append(
aggregateCommitments,
c.(curves.PairingPoint),
)
}
for i, proof := range frame.AggregateProofs {
aggregatePoly := [][]curves.PairingScalar{}
commitments := []curves.PairingPoint{}
for _, commit := range proof.GetInclusionCommitments() {
switch commit.TypeUrl {
case protobufs.IntrinsicExecutionOutputType:
e.logger.Info("confirming inclusion in aggregate")
digest := sha3.NewShake256()
_, err := digest.Write(commit.Data)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame response")
}
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame response")
}
poly, err := e.prover.BytesToPolynomial(expand)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame response")
}
evalPoly, err := qcrypto.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
if err != nil {
e.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame response")
}
e.logger.Info(
"created fft of polynomial",
zap.Int("poly_size", len(evalPoly)),
)
aggregatePoly = append(aggregatePoly, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
commitments = append(commitments, c.(curves.PairingPoint))
default:
poly, err := e.prover.BytesToPolynomial(commit.Data)
if err != nil {
e.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame response")
}
for i := 0; i < 128-len(poly); i++ {
poly = append(
poly,
curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
)
}
evalPoly, err := qcrypto.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
128,
false,
)
if err != nil {
e.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return errors.Wrap(err, "handle clock frame response")
}
aggregatePoly = append(aggregatePoly, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
commitments = append(commitments, c.(curves.PairingPoint))
}
}
p, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
proof.Proof,
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
result, err := e.prover.VerifyAggregateProof(
aggregatePoly,
commitments,
aggregateCommitments[i],
p.(curves.PairingPoint),
)
if err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
if !result {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(
errors.New("invalid proof"),
"handle clock frame response",
)
}
}
e.logger.Info(
"clock frame was valid",
zap.Binary("sender_address", address),
zap.Binary("prover_address", prover),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
parentSelector, selector, distance, err :=
frame.GetParentSelectorAndDistance()
if err != nil {
return errors.Wrap(err, "handle clock frame data")
}
e.logger.Info(
"difference between selector/discriminator",
zap.Binary("difference", distance.Bytes()),
)
txn, err := e.clockStore.NewTransaction()
if err != nil {
e.logger.Error("could not save candidate clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
if err := e.clockStore.PutCandidateDataClockFrame(
parentSelector.Bytes(),
distance.Bytes(),
selector.Bytes(),
frame,
txn,
); err != nil {
e.logger.Error("could not save candidate clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
if err := txn.Commit(); err != nil {
e.logger.Error("could not save candidate clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame response")
}
if e.frame < frame.FrameNumber {
e.latestFrameReceived = frame.FrameNumber
e.lastFrameReceivedAt = time.Now().UTC()
}
trieCopy.Add(prover, frame.FrameNumber)
e.frameSeenProverTrie.Add(prover, frame.FrameNumber)
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleProvingKeyRequest(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if bytes.Equal(address, e.provingKeyAddress) {
return nil
}
request := &protobufs.ProvingKeyRequest{}
if err := any.UnmarshalTo(request); err != nil {
return errors.Wrap(err, "handle proving key request")
}
if len(request.ProvingKeyBytes) == 0 {
e.logger.Warn(
"received proving key request for empty key",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
)
return errors.Wrap(
errors.New("empty proving key"),
"handle proving key request",
)
}
channel := e.createPeerSendChannel(peerID)
e.pubSub.Subscribe(channel, e.handleSync, true)
e.logger.Info(
"received proving key request",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Binary("proving_key", request.ProvingKeyBytes),
)
var provingKey *protobufs.ProvingKeyAnnouncement
inclusion, err := e.keyStore.GetProvingKey(request.ProvingKeyBytes)
if err != nil {
if !errors.Is(err, store.ErrNotFound) {
e.logger.Error(
"peer asked for proving key that returned error",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Binary("proving_key", request.ProvingKeyBytes),
)
return errors.Wrap(err, "handle proving key request")
}
provingKey, err = e.keyStore.GetStagedProvingKey(request.ProvingKeyBytes)
if !errors.Is(err, store.ErrNotFound) {
e.logger.Error(
"peer asked for proving key that returned error",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Binary("proving_key", request.ProvingKeyBytes),
)
return errors.Wrap(err, "handle proving key request")
} else if err != nil {
e.logger.Warn(
"peer asked for unknown proving key",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Binary("proving_key", request.ProvingKeyBytes),
)
return nil
}
} else {
err := proto.Unmarshal(inclusion.Data, provingKey)
if err != nil {
e.logger.Error(
"inclusion commitment could not be deserialized",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Binary("proving_key", request.ProvingKeyBytes),
)
return errors.Wrap(err, "handle proving key request")
}
}
if err := e.publishMessage(channel, provingKey); err != nil {
return errors.Wrap(err, "handle proving key request")
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleClockFramesRequest(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
if bytes.Equal(address, e.provingKeyAddress) {
return nil
}
request := &protobufs.ClockFramesRequest{}
if err := any.UnmarshalTo(request); err != nil {
return errors.Wrap(err, "handle clock frame request")
}
channel := e.createPeerSendChannel(peerID)
e.pubSub.Subscribe(channel, e.handleSync, true)
e.logger.Info(
"received clock frame request",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Uint64("from_frame_number", request.FromFrameNumber),
zap.Uint64("to_frame_number", request.ToFrameNumber),
)
from := request.FromFrameNumber
base, _, err := e.clockStore.GetDataClockFrame(
request.Filter,
from,
)
if err != nil {
if !errors.Is(err, store.ErrNotFound) {
e.logger.Error(
"peer asked for frame that returned error",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Uint64("frame_number", request.FromFrameNumber),
)
return errors.Wrap(err, "handle clock frame request")
} else {
e.logger.Info(
"peer asked for undiscovered frame",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Uint64("frame_number", request.FromFrameNumber),
)
if err = e.publishMessage(channel, &protobufs.ClockFramesResponse{
Filter: request.Filter,
FromFrameNumber: 0,
ToFrameNumber: 0,
ClockFrames: []*protobufs.ClockFrame{},
}); err != nil {
return errors.Wrap(err, "handle clock frame request")
}
return nil
}
}
to := request.ToFrameNumber
if to == 0 || to-request.FromFrameNumber > 128 {
to = request.FromFrameNumber + 127
}
set := []*protobufs.ClockFrame{base}
noMoreFinalized := false
searchSpan := []*protobufs.ClockFrame{base}
currentNumber := 1
for len(searchSpan) != 0 && from+uint64(currentNumber) <= to {
e.logger.Info(
"scanning frames to add to response",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Uint64("from", from),
zap.Uint64("to", to),
zap.Uint64("current_number", uint64(currentNumber)),
)
nextSpan := []*protobufs.ClockFrame{}
for _, s := range searchSpan {
selector, err := s.GetSelector()
if err != nil {
return errors.Wrap(err, "handle clock frame request")
}
if !noMoreFinalized {
frame, _, err := e.clockStore.GetDataClockFrame(
s.Filter,
s.FrameNumber+1,
)
if err != nil {
if errors.Is(err, store.ErrNotFound) {
noMoreFinalized = true
} else {
e.logger.Error(
"fetching clock frame produced error",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Uint64("frame_number", s.FrameNumber+1),
)
return errors.Wrap(err, "handle clock frame request")
}
} else {
nextSpan = append(nextSpan, frame)
set = append(set, frame)
}
}
if noMoreFinalized {
iter, err := e.clockStore.RangeCandidateDataClockFrames(
s.Filter,
selector.Bytes(),
s.FrameNumber+1,
)
if err != nil {
e.logger.Error(
"peer asked for frame that returned error while iterating",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Binary("parent_selector", s.ParentSelector),
zap.Uint64("frame_number", s.FrameNumber+1),
)
return errors.Wrap(err, "handle clock frame request")
}
for iter.First(); iter.Valid(); iter.Next() {
frame, err := iter.Value()
if err != nil {
e.logger.Error(
"peer asked for frame that returned error while getting value",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Binary("parent_selector", selector.Bytes()),
zap.Uint64("frame_number", s.FrameNumber+1),
)
return errors.Wrap(err, "handle clock frame request")
}
nextSpan = append(nextSpan, frame)
set = append(set, frame)
}
iter.Close()
}
}
currentNumber++
searchSpan = nextSpan
}
e.logger.Info(
"sending response",
zap.Binary("peer_id", peerID),
zap.Binary("address", address),
zap.Uint64("from", from),
zap.Uint64("to", to),
zap.Uint64("total_frames", uint64(len(set))),
)
if err = e.publishMessage(channel, &protobufs.ClockFramesResponse{
Filter: request.Filter,
FromFrameNumber: request.FromFrameNumber,
ToFrameNumber: to,
ClockFrames: set,
}); err != nil {
return errors.Wrap(err, "handle clock frame request")
}
return nil
}

View File

@ -0,0 +1,61 @@
package ceremony
import (
"crypto"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
)
func (e *CeremonyDataClockConsensusEngine) GetProvingKey(
engineConfig *config.EngineConfig,
) (crypto.Signer, keys.KeyType, []byte, []byte) {
provingKey, err := e.keyManager.GetSigningKey(engineConfig.ProvingKeyId)
if errors.Is(err, keys.KeyNotFoundErr) {
e.logger.Info("could not get proving key, generating")
provingKey, err = e.keyManager.CreateSigningKey(
engineConfig.ProvingKeyId,
keys.KeyTypeEd448,
)
}
if err != nil {
e.logger.Error("could not get proving key", zap.Error(err))
panic(err)
}
rawKey, err := e.keyManager.GetRawKey(engineConfig.ProvingKeyId)
if err != nil {
e.logger.Error("could not get proving key type", zap.Error(err))
panic(err)
}
provingKeyType := rawKey.Type
h, err := poseidon.HashBytes(rawKey.PublicKey)
if err != nil {
e.logger.Error("could not hash proving key", zap.Error(err))
panic(err)
}
provingKeyAddress := h.Bytes()
provingKeyAddress = append(
make([]byte, 32-len(provingKeyAddress)),
provingKeyAddress...,
)
return provingKey, provingKeyType, rawKey.PublicKey, provingKeyAddress
}
func (e *CeremonyDataClockConsensusEngine) IsInProverTrie(key []byte) bool {
h, err := poseidon.HashBytes(key)
if err != nil {
return false
}
provingKeyAddress := h.Bytes()
return e.frameProverTrie.Contains(provingKeyAddress)
}

View File

@ -1,7 +1,12 @@
package consensus
import (
"crypto"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type EngineState int
@ -27,3 +32,19 @@ type ConsensusEngine interface {
GetState() EngineState
GetFrameChannel() <-chan uint64
}
type DataConsensusEngine interface {
Start(filter []byte, seed []byte) <-chan error
Stop(force bool) <-chan error
RegisterExecutor(exec execution.ExecutionEngine, frame uint64) <-chan error
UnregisterExecutor(name string, frame uint64, force bool) <-chan error
GetFrame() uint64
GetDifficulty() uint32
GetState() EngineState
GetFrameChannel() <-chan *protobufs.ClockFrame
GetActiveFrame() *protobufs.ClockFrame
GetProvingKey(
engineConfig *config.EngineConfig,
) (crypto.Signer, keys.KeyType, []byte, []byte)
IsInProverTrie(key []byte) bool
}

View File

@ -40,32 +40,27 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
for name := range e.executionEngines {
name := name
eg.Go(func() error {
applications := e.executionEngines[name].GetSupportedApplications()
for _, application := range applications {
if bytes.Equal(msg.Address, application.Address) {
messages, err := e.executionEngines[name].ProcessMessage(
msg.Address,
msg,
)
if err != nil {
e.logger.Error(
"could not process message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
messages, err := e.executionEngines[name].ProcessMessage(
msg.Address,
msg,
)
if err != nil {
e.logger.Error(
"could not process message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
for _, m := range messages {
if err := e.publishMessage(e.filter, m); err != nil {
e.logger.Error(
"could not publish message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
}
for _, m := range messages {
if err := e.publishMessage(m.Address, m); err != nil {
e.logger.Error(
"could not publish message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
}
@ -87,6 +82,7 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
return errors.Wrap(err, "handle message")
}
}
return nil
}
@ -115,7 +111,10 @@ func (e *MasterClockConsensusEngine) handleClockFrameData(
"frame difficulty mismatched",
zap.Uint32("difficulty", frame.Difficulty),
)
return nil
return errors.Wrap(
errors.New("frame difficulty"),
"handle clock frame data",
)
}
e.logger.Info(

View File

@ -4,7 +4,6 @@ import (
"bytes"
"encoding/binary"
"sort"
"strings"
"time"
"github.com/pkg/errors"
@ -56,10 +55,6 @@ func (
e *MasterClockConsensusEngine,
) createGenesisFrame() *protobufs.ClockFrame {
e.logger.Info("creating genesis frame")
for _, l := range strings.Split(string(e.input), "\n") {
e.logger.Info(l)
}
b := sha3.Sum256(e.input)
v := vdf.New(e.difficulty, b)

View File

@ -177,6 +177,10 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
zap.Int("peer_store_count", e.pubSub.GetPeerstoreCount()),
zap.Int("network_peer_count", e.pubSub.GetNetworkPeersCount()),
)
e.logger.Info(
"peers by bitmask",
zap.Any("peers", e.pubSub.GetBitmaskPeers()),
)
time.Sleep(10 * time.Second)
}
}()

View File

@ -36,32 +36,27 @@ func (e *MasterClockConsensusEngine) handleSync(message *pb.Message) error {
for name := range e.executionEngines {
name := name
eg.Go(func() error {
applications := e.executionEngines[name].GetSupportedApplications()
for _, application := range applications {
if bytes.Equal(msg.Address, application.Address) {
messages, err := e.executionEngines[name].ProcessMessage(
msg.Address,
msg,
)
if err != nil {
e.logger.Error(
"could not process message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
messages, err := e.executionEngines[name].ProcessMessage(
msg.Address,
msg,
)
if err != nil {
e.logger.Error(
"could not process message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
for _, m := range messages {
if err := e.publishMessage(e.filter, m); err != nil {
e.logger.Error(
"could not publish message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
}
for _, m := range messages {
if err := e.publishMessage(e.filter, m); err != nil {
e.logger.Error(
"could not publish message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return errors.Wrap(err, "handle message")
}
}

View File

@ -16,8 +16,8 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
const PROTOCOL_VERSION = 1
const PROTOCOL = 1<<8 + PROTOCOL_VERSION
const DOUBLE_RATCHET_PROTOCOL_VERSION = 1
const DOUBLE_RATCHET_PROTOCOL = 1<<8 + DOUBLE_RATCHET_PROTOCOL_VERSION
const CHAIN_KEY = 0x01
const MESSAGE_KEY = 0x02
@ -29,7 +29,7 @@ const AEAD_KEY = 0x03
type DoubleRatchetParticipant struct {
sendingEphemeralPrivateKey curves.Scalar
receivingEphemeralKey curves.Point
curve curves.Curve
curve *curves.Curve
keyManager keys.KeyManager
rootKey []byte
sendingChainKey []byte
@ -52,7 +52,7 @@ func NewDoubleRatchetParticipant(
isSender bool,
sendingEphemeralPrivateKey curves.Scalar,
receivingEphemeralKey curves.Point,
curve curves.Curve,
curve *curves.Curve,
keyManager keys.KeyManager,
) (*DoubleRatchetParticipant, error) {
participant := &DoubleRatchetParticipant{}
@ -106,7 +106,7 @@ func (r *DoubleRatchetParticipant) RatchetEncrypt(
message []byte,
) (*protobufs.P2PChannelEnvelope, error) {
envelope := &protobufs.P2PChannelEnvelope{
ProtocolIdentifier: PROTOCOL,
ProtocolIdentifier: DOUBLE_RATCHET_PROTOCOL,
MessageHeader: &protobufs.MessageCiphertext{},
MessageBody: &protobufs.MessageCiphertext{},
}
@ -181,8 +181,6 @@ func (r *DoubleRatchetParticipant) RatchetDecrypt(
}
newChainKey, messageKey, aeadKey := ratchetKeys(r.receivingChainKey)
r.receivingChainKey = newChainKey
r.currentReceivingChainLength++
plaintext, err = r.decrypt(
envelope.MessageBody,
@ -193,6 +191,9 @@ func (r *DoubleRatchetParticipant) RatchetDecrypt(
),
)
r.receivingChainKey = newChainKey
r.currentReceivingChainLength++
return plaintext, errors.Wrap(err, "could not decrypt message")
}

View File

@ -43,7 +43,7 @@ func TestRatchetEncrypt(t *testing.T) {
true,
x448SendingEphemeralPrivateKey,
x448ReceivingSignedPreKey,
*curves.ED448(),
curves.ED448(),
nil,
)
require.NoError(t, err)
@ -55,7 +55,7 @@ func TestRatchetEncrypt(t *testing.T) {
false,
x448ReceivingSignedPrePrivateKey,
x448SendingEphemeralKey,
*curves.ED448(),
curves.ED448(),
nil,
)
require.NoError(t, err)

View File

@ -86,7 +86,7 @@ func FFT(
}
// We make a copy so we can mutate it during the work.
workingValues := make([]curves.PairingScalar, width, width)
workingValues := make([]curves.PairingScalar, width)
for i := 0; i < len(values); i++ {
workingValue := values[i].Clone()
workingValues[i] = workingValue.(curves.PairingScalar)
@ -101,7 +101,7 @@ func FFT(
workingValues[i] = workingValue.(curves.PairingScalar)
}
out := make([]curves.PairingScalar, width, width)
out := make([]curves.PairingScalar, width)
stride := fftWidth / width
for i := 0; i < len(out); i++ {
@ -210,7 +210,7 @@ func FFTG1(
width = nearestPowerOfTwo(width)
}
workingValues := make([]curves.PairingPoint, width, width)
workingValues := make([]curves.PairingPoint, width)
for i := 0; i < len(values); i++ {
workingValue, err := curve.NewG1GeneratorPoint().FromAffineCompressed(
values[i].ToAffineCompressed(),
@ -224,7 +224,7 @@ func FFTG1(
workingValues[i] = curve.NewG1IdentityPoint()
}
out := make([]curves.PairingPoint, width, width)
out := make([]curves.PairingPoint, width)
stride := fftWidth / width
for i := 0; i < len(out); i++ {

View File

@ -82,6 +82,8 @@ func Init() {
panic(err)
}
bls48581.Init()
cs := &CeremonyState{}
if err := json.Unmarshal(csBytes, cs); err != nil {
panic(err)
@ -607,7 +609,7 @@ func (p *KZGProver) CommitAggregate(
var err error
commitments[i], err = p.Commit(poly)
if err != nil {
return nil, errors.Wrap(err, "prove aggregate")
return nil, errors.Wrap(err, "commit aggregate")
}
}

View File

@ -328,16 +328,7 @@ func TestKZGProof(t *testing.T) {
commitment,
proof,
)
require.False(t, proof.IsIdentity())
require.NoError(t, err)
require.True(t, valid)
}
func ReverseScalarBytes(inBytes []byte) []byte {
outBytes := make([]byte, len(inBytes))
for i, j := 0, len(inBytes)-1; j >= 0; i, j = i+1, j-1 {
outBytes[i] = inBytes[j]
}
return outBytes
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,262 @@
package application
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applyTranscriptShare(
share *protobufs.CeremonyTranscriptShare,
) error {
if len(share.AdditiveG1Powers) != len(a.LatestTranscript.G1Powers)-1 {
return errors.Wrap(errors.New("invalid g1s"), "apply transcript share")
}
if len(share.AdditiveG2Powers) != len(a.LatestTranscript.G2Powers)-1 {
return errors.Wrap(errors.New("invalid g2s"), "apply transcript share")
}
if share.AdditiveG1_256Witness == nil ||
share.AdditiveG1_256Witness.KeyValue == nil {
return errors.Wrap(
errors.New("invalid g1 witness"),
"apply transcript share",
)
}
if _, err := curves.BLS48581G1().Point.FromAffineCompressed(
share.AdditiveG1_256Witness.KeyValue,
); err != nil {
return errors.Wrap(
errors.Wrap(err, "invalid g1 witness"),
"apply transcript share",
)
}
if share.AdditiveG2_256Witness == nil ||
share.AdditiveG2_256Witness.KeyValue == nil {
return errors.Wrap(
errors.New("invalid g2 witness"),
"apply transcript share",
)
}
for _, s := range a.TranscriptShares {
if bytes.Equal(
s.AdditiveG1Powers[0].KeyValue,
share.AdditiveG1Powers[0].KeyValue,
) {
return nil
}
}
matchFound := false
for _, c := range a.FinalCommits {
if bytes.Equal(
share.ProverSignature.PublicKey.KeyValue,
c.ProverSignature.PublicKey.KeyValue,
) {
matchFound = true
break
}
}
if !matchFound {
return errors.Wrap(
errors.New(
fmt.Sprintf(
"no corresponding commit in commit set (size %d)",
len(a.FinalCommits),
),
),
"apply transcript share",
)
}
if err := share.VerifySignature(); err != nil {
return errors.Wrap(err, "apply transcript share")
}
eg := errgroup.Group{}
eg.SetLimit(100)
for i, g1 := range a.LatestTranscript.G1Powers {
i := i
g1 := g1
eg.Go(func() error {
if _, err := curves.BLS48581G1().Point.FromAffineCompressed(
g1.KeyValue,
); err != nil {
return errors.Wrap(
errors.Wrap(err, fmt.Sprintf("invalid g1 at position %d", i)),
"apply transcript share",
)
}
return nil
})
}
for i, g2 := range a.LatestTranscript.G2Powers {
i := i
g2 := g2
eg.Go(func() error {
if _, err := curves.BLS48581G2().Point.FromAffineCompressed(
g2.KeyValue,
); err != nil {
return errors.Wrap(
errors.Wrap(err, fmt.Sprintf("invalid g2 at position %d", i)),
"apply transcript share",
)
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
exists := false
for _, s := range a.TranscriptShares {
exists = bytes.Equal(
s.ProverSignature.Signature,
share.ProverSignature.Signature,
)
if exists {
break
}
}
if !exists {
a.TranscriptShares = append(a.TranscriptShares, share)
}
return nil
}
func (a *CeremonyApplication) finalizeTranscript() error {
a.UpdatedTranscript = &protobufs.CeremonyTranscript{
G1Powers: make(
[]*protobufs.BLS48581G1PublicKey,
len(a.LatestTranscript.G1Powers),
),
G2Powers: make(
[]*protobufs.BLS48581G2PublicKey,
len(a.LatestTranscript.G2Powers),
),
RunningG1_256Witnesses: a.LatestTranscript.RunningG1_256Witnesses,
RunningG2_256Powers: a.LatestTranscript.RunningG2_256Powers,
}
a.UpdatedTranscript.G1Powers[0] = a.LatestTranscript.G1Powers[0]
a.UpdatedTranscript.G2Powers[0] = a.LatestTranscript.G2Powers[0]
for i := range a.UpdatedTranscript.G1Powers[1:] {
g1, err := curves.BLS48581G1().Point.FromAffineCompressed(
a.TranscriptShares[0].AdditiveG1Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
if len(a.TranscriptShares) > 1 {
for _, share := range a.TranscriptShares[1:] {
ag1, err := curves.BLS48581G1().Point.FromAffineCompressed(
share.AdditiveG1Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
g1 = g1.Add(ag1)
}
}
if !g1.IsOnCurve() || g1.IsIdentity() {
return errors.Wrap(
errors.New("invalid g1 power"),
"finalize transcript",
)
}
a.UpdatedTranscript.G1Powers[i+1] = &protobufs.BLS48581G1PublicKey{
KeyValue: g1.ToAffineCompressed(),
}
}
for i := range a.UpdatedTranscript.G2Powers[1:] {
g2, err := curves.BLS48581G2().Point.FromAffineCompressed(
a.TranscriptShares[0].AdditiveG2Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
if len(a.TranscriptShares) > 1 {
for _, share := range a.TranscriptShares[1:] {
ag2, err := curves.BLS48581G2().Point.FromAffineCompressed(
share.AdditiveG2Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
g2 = g2.Add(ag2)
}
}
if !g2.IsOnCurve() || g2.IsIdentity() {
return errors.Wrap(
errors.New("invalid g2 power"),
"finalize transcript",
)
}
a.UpdatedTranscript.G2Powers[i+1] = &protobufs.BLS48581G2PublicKey{
KeyValue: g2.ToAffineCompressed(),
}
}
g1Witness, err := curves.BLS48581G1().Point.FromAffineCompressed(
a.TranscriptShares[0].AdditiveG1_256Witness.KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
if len(a.TranscriptShares) > 1 {
for _, share := range a.TranscriptShares[1:] {
ag1, err := curves.BLS48581G1().Point.FromAffineCompressed(
share.AdditiveG1_256Witness.KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
g1Witness = g1Witness.Add(ag1)
}
}
if !g1Witness.IsOnCurve() || g1Witness.IsIdentity() {
return errors.Wrap(
errors.New("invalid witness"),
"finalize transcript",
)
}
a.UpdatedTranscript.RunningG1_256Witnesses = append(
a.UpdatedTranscript.RunningG1_256Witnesses,
&protobufs.BLS48581G1PublicKey{
KeyValue: g1Witness.ToAffineCompressed(),
},
)
a.UpdatedTranscript.RunningG2_256Powers = append(
a.UpdatedTranscript.RunningG2_256Powers,
a.UpdatedTranscript.G2Powers[len(a.UpdatedTranscript.G2Powers)-1],
)
return nil
}

View File

@ -0,0 +1,252 @@
package application
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applySeenProverAttestation(
seenProverAttestation *protobufs.CeremonySeenProverAttestation,
) error {
if seenProverAttestation.SeenProverKey == nil ||
seenProverAttestation.SeenProverKey.KeyValue == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply seen prover attestation",
)
}
inParticipantList := false
for _, p := range a.ActiveParticipants {
if bytes.Equal(p.KeyValue, seenProverAttestation.SeenProverKey.KeyValue) {
inParticipantList = true
break
}
}
if !inParticipantList {
return errors.Wrap(
errors.New("prover not in active participant list"),
"apply seen prover attestation",
)
}
b := binary.BigEndian.AppendUint64(
[]byte("lastseen"),
seenProverAttestation.LastSeenFrame,
)
b = append(b, seenProverAttestation.SeenProverKey.KeyValue...)
signature := seenProverAttestation.GetProverSignature()
if signature == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply seen prover attestation",
)
}
if err := signature.Verify(b); err != nil {
return errors.Wrap(err, "apply seen prover attestation")
}
replaced := false
for i, att := range a.LatestSeenProverAttestations {
if bytes.Equal(
att.SeenProverKey.KeyValue,
seenProverAttestation.SeenProverKey.KeyValue,
) &&
bytes.Equal(
att.ProverSignature.PublicKey.KeyValue,
seenProverAttestation.ProverSignature.PublicKey.KeyValue,
) && att.LastSeenFrame < seenProverAttestation.LastSeenFrame {
a.LatestSeenProverAttestations[i] = att
replaced = true
break
}
}
if !replaced {
a.LatestSeenProverAttestations = append(
a.LatestSeenProverAttestations,
seenProverAttestation,
)
}
return nil
}
func (a *CeremonyApplication) applyDroppedProverAttestation(
droppedProverAttestation *protobufs.CeremonyDroppedProverAttestation,
) error {
if droppedProverAttestation.DroppedProverKey == nil ||
droppedProverAttestation.DroppedProverKey.KeyValue == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply dropped prover attestation",
)
}
inParticipantList := false
for _, p := range a.ActiveParticipants {
if bytes.Equal(
p.KeyValue,
droppedProverAttestation.DroppedProverKey.KeyValue,
) {
inParticipantList = true
break
}
}
if !inParticipantList {
return errors.Wrap(
errors.New("prover not in active participant list"),
"apply dropped prover attestation",
)
}
b := binary.BigEndian.AppendUint64(
[]byte("dropped"),
droppedProverAttestation.LastSeenFrame,
)
b = append(b, droppedProverAttestation.DroppedProverKey.KeyValue...)
signature := droppedProverAttestation.GetProverSignature()
if signature == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply dropped prover attestation",
)
}
if err := signature.Verify(b); err != nil {
return errors.Wrap(err, "apply dropped prover attestation")
}
replaced := false
for i, att := range a.DroppedParticipantAttestations {
if bytes.Equal(
att.DroppedProverKey.KeyValue,
droppedProverAttestation.DroppedProverKey.KeyValue,
) &&
bytes.Equal(
att.ProverSignature.PublicKey.KeyValue,
droppedProverAttestation.ProverSignature.PublicKey.KeyValue,
) && att.LastSeenFrame < droppedProverAttestation.LastSeenFrame {
a.DroppedParticipantAttestations[i] = att
replaced = true
break
}
}
if !replaced {
a.DroppedParticipantAttestations = append(
a.DroppedParticipantAttestations,
droppedProverAttestation,
)
}
return nil
}
func (a *CeremonyApplication) applyTranscriptCommit(
transcriptCommit *protobufs.CeremonyTranscriptCommit,
) error {
if transcriptCommit.ContributionSignature == nil ||
transcriptCommit.ProverSignature == nil ||
transcriptCommit.ContributionSignature.PublicKey == nil ||
transcriptCommit.ProverSignature.PublicKey == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply transcript commit",
)
}
point, err := curves.BLS48581G2().Point.FromAffineCompressed(
transcriptCommit.ContributionSignature.PublicKey.KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript commit")
}
if err := VerifySignatureOfProverKey(
transcriptCommit.ProverSignature.PublicKey.KeyValue,
transcriptCommit.ContributionSignature.Signature,
point,
); err != nil {
return errors.Wrap(err, "apply transcript commit")
}
if err := transcriptCommit.ProverSignature.Verify(
transcriptCommit.ContributionSignature.PublicKey.KeyValue,
); err != nil {
return errors.Wrap(err, "apply transcript commit")
}
inParticipantList := false
for _, p := range a.ActiveParticipants {
if bytes.Equal(
p.KeyValue,
transcriptCommit.ProverSignature.PublicKey.KeyValue,
) {
inParticipantList = true
break
}
}
if !inParticipantList {
return errors.Wrap(
errors.New("prover not in active participant list"),
"apply transcript commit",
)
}
maxRounds := uint64(1)
for i := 0; i < len(a.ActiveParticipants)-1; i++ {
maxRounds = maxRounds << 1
}
if len(a.TranscriptRoundAdvanceCommits) == 0 {
a.TranscriptRoundAdvanceCommits = []*protobufs.CeremonyAdvanceRound{
{
Commits: []*protobufs.CeremonyTranscriptCommit{},
},
}
}
if len(a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits) ==
len(a.ActiveParticipants) {
if maxRounds ==
uint64(len(a.TranscriptRoundAdvanceCommits)) {
return errors.Wrap(
errors.New("round limit exceeded"),
"apply transcript commit",
)
}
a.TranscriptRoundAdvanceCommits = append(
a.TranscriptRoundAdvanceCommits,
&protobufs.CeremonyAdvanceRound{
Commits: []*protobufs.CeremonyTranscriptCommit{
transcriptCommit,
},
},
)
a.RoundCount++
} else {
for _, c := range a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits {
if bytes.Equal(
c.ProverSignature.PublicKey.KeyValue,
transcriptCommit.ProverSignature.PublicKey.KeyValue,
) {
return nil
}
}
a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits = append(
a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits,
transcriptCommit,
)
}
return nil
}

View File

@ -0,0 +1,100 @@
package application
import (
"bytes"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applyLobbyJoin(
join *protobufs.CeremonyLobbyJoin,
) error {
signature := join.GetPublicKeySignatureEd448()
if signature == nil {
return errors.Wrap(errors.New("signature is nil"), "apply lobby join")
}
if join.IdentityKey == nil || join.IdentityKey.KeyValue == nil {
return errors.Wrap(errors.New("identity key is nil"), "apply lobby join")
}
if join.SignedPreKey == nil || join.SignedPreKey.KeyValue == nil {
return errors.Wrap(errors.New("signed prekey is nil"), "apply lobby join")
}
if _, err := curves.ED448().Point.FromAffineCompressed(
join.IdentityKey.KeyValue,
); err != nil {
return errors.Wrap(err, "apply lobby join")
}
if _, err := curves.ED448().Point.FromAffineCompressed(
join.SignedPreKey.KeyValue,
); err != nil {
return errors.Wrap(err, "apply lobby join")
}
if err := join.VerifySignature(); err != nil {
return errors.Wrap(err, "apply lobby join")
}
if len(a.LobbyJoins) == 256 {
return nil
}
for _, p := range a.LobbyJoins {
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
signature.PublicKey.KeyValue,
) {
return nil
}
}
prepend := false
nextRoundPreferredParticipants := []*protobufs.Ed448PublicKey{}
for _, p := range a.NextRoundPreferredParticipants {
if !bytes.Equal(p.KeyValue, signature.PublicKey.KeyValue) {
nextRoundPreferredParticipants = append(
nextRoundPreferredParticipants,
p,
)
}
}
if len(a.NextRoundPreferredParticipants) !=
len(nextRoundPreferredParticipants) {
prepend = true
}
a.NextRoundPreferredParticipants = nextRoundPreferredParticipants
if prepend {
a.LobbyJoins = append(
append([]*protobufs.CeremonyLobbyJoin{}, join),
a.LobbyJoins...,
)
} else {
a.LobbyJoins = append(a.LobbyJoins, join)
}
return nil
}
func (a *CeremonyApplication) finalizeParticipantSet() error {
power := uint64(1)
for uint64(len(a.LobbyJoins)) > power {
power = power << 1
}
if power != uint64(len(a.LobbyJoins)) {
power = power >> 1
}
a.ActiveParticipants = []*protobufs.Ed448PublicKey{}
for i := 0; i < int(power); i++ {
a.ActiveParticipants = append(
a.ActiveParticipants,
a.LobbyJoins[i].PublicKeySignatureEd448.PublicKey,
)
}
return nil
}

View File

@ -0,0 +1,284 @@
package application
import (
"bytes"
"crypto"
"crypto/rand"
"testing"
"github.com/cloudflare/circl/sign/ed448"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
func TestCeremonyTransitions(t *testing.T) {
bls48581.Init()
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
old2 := old.Mul(old)
old3 := old2.Mul(old)
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
require.NoError(t, err)
idk := curves.ED448().Scalar.Random(rand.Reader)
idkPub := curves.ED448().Point.Generator().Mul(idk).ToAffineCompressed()
spk := curves.ED448().Scalar.Random(rand.Reader)
spkPub := curves.ED448().Point.Generator().Mul(spk).ToAffineCompressed()
require.NoError(t, err)
trie := &tries.RewardCritbitTrie{}
a := &CeremonyApplication{
RewardTrie: trie,
LatestTranscript: &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
},
}
join := &protobufs.CeremonyLobbyJoin{
FrameNumber: 0,
IdentityKey: &protobufs.X448PublicKey{
KeyValue: idkPub,
},
SignedPreKey: &protobufs.X448PublicKey{
KeyValue: spkPub,
},
}
sig, err := join.SignWithProverKey(proverKey)
require.NoError(t, err)
join.PublicKeySignatureEd448 = &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
}
joinBytes, err := proto.Marshal(join)
require.NoError(t, err)
a, err = a.ApplyTransition(0, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyLobbyJoinType},
TransitionInputs: [][]byte{joinBytes},
})
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_OPEN)
for i := uint64(0); i < 10; i++ {
a, err = a.ApplyTransition(i+1, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{},
TransitionInputs: [][]byte{},
})
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_OPEN)
}
a, err = a.ApplyTransition(12, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{},
TransitionInputs: [][]byte{},
})
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_IN_PROGRESS)
require.True(t, bytes.Equal(a.ActiveParticipants[0].KeyValue, proverPubKey))
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
tau2 := tau.Mul(tau)
tau3 := tau2.Mul(tau)
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
proverSig, err := proverKey.Sign(
rand.Reader,
tauPubG2.ToAffineCompressed(),
crypto.Hash(0),
)
require.NoError(t, err)
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
key := tau.Bytes()
if bls48581.Core_Sign(blsSignature, proverPubKey, key) != bls48581.BLS_OK {
require.Fail(t, "could not sign")
}
blsSig := blsSignature[:]
advanceRound := &protobufs.CeremonyTranscriptCommit{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: blsSig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: tauPubG2.ToAffineCompressed(),
},
},
}
advanceRoundBytes, err := proto.Marshal(advanceRound)
require.NoError(t, err)
a, err = a.ApplyTransition(13, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyTranscriptCommitType},
TransitionInputs: [][]byte{advanceRoundBytes},
})
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_FINALIZING)
g1 := curves.BLS48581G1().Point.Generator()
g2 := curves.BLS48581G2().Point.Generator()
transcriptShare := &protobufs.CeremonyTranscriptShare{
AdditiveG1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: g1.Mul(old.Mul(tau)).ToAffineCompressed(),
},
{
KeyValue: g1.Mul(old2.Mul(tau2)).ToAffineCompressed(),
},
{
KeyValue: g1.Mul(old3.Mul(tau3)).ToAffineCompressed(),
},
},
AdditiveG2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: g2.Mul(old.Mul(tau)).ToAffineCompressed(),
},
},
AdditiveG1_256Witness: &protobufs.BLS48581G1PublicKey{
KeyValue: g1.Mul(tau).ToAffineCompressed(),
},
AdditiveG2_256Witness: &protobufs.BLS48581G2PublicKey{
KeyValue: g2.Mul(old.Mul(tau)).ToAffineCompressed(),
},
}
sig, err = transcriptShare.SignWithProverKey(proverKey)
require.NoError(t, err)
transcriptShare.ProverSignature = &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
}
shareBytes, err := proto.Marshal(transcriptShare)
require.NoError(t, err)
a, err = a.ApplyTransition(14, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyTranscriptShareType},
TransitionInputs: [][]byte{shareBytes},
})
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_VALIDATING)
updatedTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).Mul(tau2).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).Mul(tau3).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
}
transcriptBytes, err := proto.Marshal(updatedTranscript)
require.NoError(t, err)
a, err = a.ApplyTransition(15, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyTranscriptType},
TransitionInputs: [][]byte{transcriptBytes},
})
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_OPEN)
bi, err := poseidon.HashBytes(proverPubKey)
require.NoError(t, err)
addr := bi.FillBytes(make([]byte, 32))
_, f, reward := a.RewardTrie.Get(addr)
require.Equal(t, f, uint64(15))
require.Equal(t, reward, uint64(161))
}

View File

@ -0,0 +1,210 @@
package application
import (
"bytes"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applyTranscript(
transcript *protobufs.CeremonyTranscript,
) error {
if len(a.UpdatedTranscript.G1Powers) != len(transcript.G1Powers) {
return errors.Wrap(errors.New("invalid g1s"), "apply transcript")
}
if len(a.UpdatedTranscript.G2Powers) != len(transcript.G2Powers) {
return errors.Wrap(errors.New("invalid g2s"), "apply transcript")
}
if len(a.UpdatedTranscript.RunningG1_256Witnesses) !=
len(transcript.RunningG1_256Witnesses) ||
len(transcript.RunningG1_256Witnesses) !=
len(a.LatestTranscript.RunningG1_256Witnesses)+1 {
return errors.Wrap(
errors.New("invalid witnesses"),
"apply transcript",
)
}
if len(a.UpdatedTranscript.RunningG2_256Powers) !=
len(transcript.RunningG2_256Powers) ||
len(transcript.RunningG2_256Powers) !=
len(a.LatestTranscript.RunningG2_256Powers)+1 {
return errors.Wrap(
errors.New("invalid g2^256 powers"),
"apply transcript",
)
}
g1s := []*curves.PointBls48581G1{}
for i := range a.UpdatedTranscript.G1Powers {
if !bytes.Equal(
a.UpdatedTranscript.G1Powers[i].KeyValue,
transcript.G1Powers[i].KeyValue,
) {
return errors.Wrap(errors.New("invalid g1s"), "apply transcript")
}
g1 := &curves.PointBls48581G1{}
x, err := g1.FromAffineCompressed(a.UpdatedTranscript.G1Powers[i].KeyValue)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g1, _ = x.(*curves.PointBls48581G1)
g1s = append(g1s, g1)
}
g2s := []*curves.PointBls48581G2{}
for i := range a.UpdatedTranscript.G2Powers {
if !bytes.Equal(
a.UpdatedTranscript.G2Powers[i].KeyValue,
transcript.G2Powers[i].KeyValue,
) {
return errors.Wrap(errors.New("invalid g2s"), "apply transcript")
}
g2 := &curves.PointBls48581G2{}
x, err := g2.FromAffineCompressed(a.UpdatedTranscript.G2Powers[i].KeyValue)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g2, _ = x.(*curves.PointBls48581G2)
g2s = append(g2s, g2)
}
g1Witnesses := []*curves.PointBls48581G1{}
for i := range a.UpdatedTranscript.RunningG1_256Witnesses {
if !bytes.Equal(
a.UpdatedTranscript.RunningG1_256Witnesses[i].KeyValue,
transcript.RunningG1_256Witnesses[i].KeyValue,
) {
return errors.Wrap(errors.New("invalid g1 witnesses"), "apply transcript")
}
g1w := &curves.PointBls48581G1{}
w, err := g1w.FromAffineCompressed(
a.UpdatedTranscript.RunningG1_256Witnesses[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g1w, _ = w.(*curves.PointBls48581G1)
g1Witnesses = append(g1Witnesses, g1w)
}
g2Powers := []*curves.PointBls48581G2{}
for i := range a.UpdatedTranscript.RunningG2_256Powers {
if !bytes.Equal(
a.UpdatedTranscript.RunningG2_256Powers[i].KeyValue,
transcript.RunningG2_256Powers[i].KeyValue,
) {
return errors.Wrap(
errors.New("invalid g2^256 powers"),
"apply transcript",
)
}
g2w := &curves.PointBls48581G2{}
w, err := g2w.FromAffineCompressed(
a.UpdatedTranscript.RunningG2_256Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g2w, _ = w.(*curves.PointBls48581G2)
g2Powers = append(g2Powers, g2w)
}
if !g2Powers[len(g2Powers)-1].Equal(g2s[len(g2s)-1]) {
return errors.Wrap(
errors.New("invalid running g2^256 power"),
"apply transcript",
)
}
for i := 0; i < len(a.LatestTranscript.RunningG1_256Witnesses); i++ {
if !bytes.Equal(
a.LatestTranscript.RunningG1_256Witnesses[i].KeyValue,
a.UpdatedTranscript.RunningG1_256Witnesses[i].KeyValue,
) {
return errors.Wrap(
errors.New("running witness mismatch"),
"apply transcript",
)
}
}
for i := 0; i < len(a.LatestTranscript.RunningG2_256Powers); i++ {
if !bytes.Equal(
a.LatestTranscript.RunningG2_256Powers[i].KeyValue,
a.UpdatedTranscript.RunningG2_256Powers[i].KeyValue,
) {
return errors.Wrap(
errors.New("running g2^256 power mismatch"),
"apply transcript",
)
}
}
mp := []curves.PairingPoint{}
mpg2 := curves.BLS48581G2().Point.Generator().(curves.PairingPoint)
mpg2n := g2s[1].Neg().(curves.PairingPoint)
for i := 0; i < len(g1s)-1; i++ {
mp = append(mp, g1s[i])
mp = append(mp, mpg2n)
mp = append(mp, g1s[i+1])
mp = append(mp, mpg2)
}
mp2 := []curves.PairingPoint{}
mpg1 := curves.BLS48581G1().Point.Generator().(curves.PairingPoint)
mpg1n := g1s[1].Neg().(curves.PairingPoint)
for i := 0; i < len(g2s)-1; i++ {
mp2 = append(mp2, mpg1n)
mp2 = append(mp2, g2s[i])
mp2 = append(mp2, mpg1)
mp2 = append(mp2, g2s[i+1])
}
l := g1s[0].MultiPairing(mp...)
if !l.IsOne() {
return errors.Wrap(
errors.New("pairing check failed for g1s"),
"apply transcript",
)
}
l = g1s[0].MultiPairing(mp2...)
if !l.IsOne() {
return errors.Wrap(
errors.New("pairing check failed for g2s"),
"apply transcript",
)
}
mp3 := []curves.PairingPoint{}
for i := 0; i < len(g2Powers)-1; i++ {
mp3 = append(mp3, g1Witnesses[i+1].Neg().(curves.PairingPoint))
mp3 = append(mp3, g2Powers[i])
mp3 = append(mp3, mpg1)
mp3 = append(mp3, g2Powers[i+1])
}
l = g1s[0].MultiPairing(mp3...)
if !l.IsOne() {
return errors.Wrap(
errors.New("pairing check failed for witnesses"),
"apply transcript",
)
}
a.LatestTranscript = a.UpdatedTranscript
a.UpdatedTranscript = nil
return nil
}

View File

@ -0,0 +1,326 @@
package application
import (
"crypto"
"crypto/rand"
"testing"
"github.com/cloudflare/circl/sign/ed448"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func TestApplyTranscript(t *testing.T) {
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
old2 := old.Mul(old)
old3 := old2.Mul(old)
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
tau2 := tau.Mul(tau)
tau3 := tau2.Mul(tau)
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
require.NoError(t, err)
proverSig, err := proverKey.Sign(
rand.Reader,
tauPubG2.ToAffineCompressed(),
crypto.Hash(0),
)
require.NoError(t, err)
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
key := tau.Bytes()
for i, j := 0, len(key)-1; i < j; i, j = i+1, j-1 {
key[i], key[j] = key[j], key[i]
}
if bls48581.Core_Sign(blsSignature, proverKey, key) != bls48581.BLS_OK {
require.Fail(t, "could not sign")
}
blsSig := blsSignature[:]
updatedTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).Mul(tau2).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).Mul(tau3).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
}
a := &CeremonyApplication{
StateCount: 0,
RoundCount: 0,
LobbyState: CEREMONY_APPLICATION_STATE_VALIDATING,
FinalCommits: []*protobufs.CeremonyTranscriptCommit{
{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: blsSig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: tauPubG2.ToAffineCompressed(),
},
},
},
},
LatestTranscript: &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
},
UpdatedTranscript: updatedTranscript,
}
err = a.applyTranscript(updatedTranscript)
require.NoError(t, err)
}
func TestApplyRewritingTranscriptFails(t *testing.T) {
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
old2 := old.Mul(old)
old3 := old2.Mul(old)
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
tau2 := tau.Mul(tau)
tau3 := tau2.Mul(tau)
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
require.NoError(t, err)
proverSig, err := proverKey.Sign(
rand.Reader,
tauPubG2.ToAffineCompressed(),
crypto.Hash(0),
)
require.NoError(t, err)
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
key := tau.Bytes()
for i, j := 0, len(key)-1; i < j; i, j = i+1, j-1 {
key[i], key[j] = key[j], key[i]
}
if bls48581.Core_Sign(blsSignature, proverKey, key) != bls48581.BLS_OK {
require.Fail(t, "could not sign")
}
blsSig := blsSignature[:]
updatedTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
// Pretend we're accumulating still
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
}
a := &CeremonyApplication{
StateCount: 0,
RoundCount: 0,
LobbyState: CEREMONY_APPLICATION_STATE_VALIDATING,
FinalCommits: []*protobufs.CeremonyTranscriptCommit{
{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: blsSig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: tauPubG2.ToAffineCompressed(),
},
},
},
},
LatestTranscript: &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
},
UpdatedTranscript: updatedTranscript,
}
err = a.applyTranscript(updatedTranscript)
require.NoError(t, err)
}

View File

@ -0,0 +1,583 @@
package application
import (
"encoding/binary"
"encoding/json"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/ot/base/simplest"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/ot/extension/kos"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/tecdsa/dkls/v1/sign"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/zkp/schnorr"
)
type MultiplyReceiverRound int
type MultiplySenderRound int
const (
MULTIPLY_RECEIVER_ROUND_UNINITIALIZED = MultiplyReceiverRound(iota)
MULTIPLY_RECEIVER_ROUND_1_COMPUTE_AND_ZKP_TO_PUBKEY
MULTIPLY_RECEIVER_ROUND_2_PAD_TRANSFER
MULTIPLY_RECEIVER_ROUND_3_VERIFY
MULTIPLY_RECEIVER_ROUND_4_MULTIPLY_INIT
MULTIPLY_RECEIVER_ROUND_5_MULTIPLY
MULTIPLY_RECEIVER_ROUND_6_DONE
)
const (
MULTIPLY_SENDER_ROUND_UNINITIALIZED = MultiplySenderRound(iota)
MULTIPLY_SENDER_ROUND_1_INITIALIZED
MULTIPLY_SENDER_ROUND_2_VERIFY_SCHNORR_AND_PAD_TRANSFER
MULTIPLY_SENDER_ROUND_3_RESPOND_TO_CHALLENGE
MULTIPLY_SENDER_ROUND_4_VERIFY
MULTIPLY_SENDER_ROUND_5_MULTIPLY
MULTIPLY_SENDER_ROUND_6_DONE
)
type Iterator interface {
Init() error
Next(message []byte) ([]byte, error)
IsDone() bool
GetPoints() []curves.Point
GetScalars() []curves.Scalar
}
type MultiplySender struct {
seed [32]byte
alphas []curves.Scalar
curve *curves.Curve
simplestReceiver *simplest.Receiver
sender []*sign.MultiplySender
step MultiplySenderRound
}
type MultiplyReceiver struct {
seed [32]byte
betas []curves.Scalar
curve *curves.Curve
simplestSender *simplest.Sender
receiver []*sign.MultiplyReceiver
step MultiplyReceiverRound
}
var _ Iterator = (*MultiplySender)(nil)
var _ Iterator = (*MultiplyReceiver)(nil)
type SchnorrProof struct {
C []byte
S []byte
Statement []byte
}
type KOSRound2Output struct {
Tau [][][]byte
}
type MultiplyRound2Output struct {
COTRound2Output *KOSRound2Output
R [][]byte
U []byte
}
func NewMultiplySender(
alphas []curves.Scalar,
curve *curves.Curve,
seed [32]byte,
) *MultiplySender {
return &MultiplySender{
seed: seed,
alphas: alphas,
curve: curve,
simplestReceiver: nil,
sender: []*sign.MultiplySender{},
step: MULTIPLY_SENDER_ROUND_UNINITIALIZED,
}
}
func NewMultiplyReceiver(
betas []curves.Scalar,
curve *curves.Curve,
seed [32]byte,
) *MultiplyReceiver {
return &MultiplyReceiver{
seed: seed,
betas: betas,
curve: curve,
simplestSender: nil,
receiver: []*sign.MultiplyReceiver{},
step: MULTIPLY_RECEIVER_ROUND_UNINITIALIZED,
}
}
func (s *MultiplySender) Init() error {
seed := sha3.Sum256(append(append([]byte{}, s.seed[:]...), []byte("OT")...))
var err error
s.simplestReceiver, err = simplest.NewReceiver(s.curve, 584, seed)
s.step = MULTIPLY_SENDER_ROUND_1_INITIALIZED
return err
}
func (r *MultiplyReceiver) Init() error {
seed := sha3.Sum256(append(append([]byte{}, r.seed[:]...), []byte("OT")...))
var err error
r.simplestSender, err = simplest.NewSender(r.curve, 584, seed)
r.step = MULTIPLY_RECEIVER_ROUND_1_COMPUTE_AND_ZKP_TO_PUBKEY
return err
}
func (s *MultiplySender) Next(message []byte) ([]byte, error) {
switch s.step {
case MULTIPLY_SENDER_ROUND_1_INITIALIZED:
s.step = MULTIPLY_SENDER_ROUND_2_VERIFY_SCHNORR_AND_PAD_TRANSFER
return nil, nil
case MULTIPLY_SENDER_ROUND_2_VERIFY_SCHNORR_AND_PAD_TRANSFER:
proof := &SchnorrProof{}
err := json.Unmarshal([]byte(message), proof)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrC, err := s.curve.Scalar.SetBytes(proof.C)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrS, err := s.curve.Scalar.SetBytes(proof.S)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrStatement, err := s.curve.Point.FromAffineCompressed(proof.Statement)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrProof := &schnorr.Proof{
C: schnorrC,
S: schnorrS,
Statement: schnorrStatement,
}
receiversMaskedChoice, err :=
s.simplestReceiver.Round2VerifySchnorrAndPadTransfer(schnorrProof)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledReceiversMaskedChoice, err := json.Marshal(receiversMaskedChoice)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.step = MULTIPLY_SENDER_ROUND_3_RESPOND_TO_CHALLENGE
return marshaledReceiversMaskedChoice, nil
case MULTIPLY_SENDER_ROUND_3_RESPOND_TO_CHALLENGE:
challenge := [][32]byte{}
err := json.Unmarshal([]byte(message), &challenge)
if err != nil {
return nil, errors.Wrap(err, "next")
}
challengeResponse, err := s.simplestReceiver.Round4RespondToChallenge(
challenge,
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledChallengeResponse, err := json.Marshal(challengeResponse)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.step = MULTIPLY_SENDER_ROUND_4_VERIFY
return marshaledChallengeResponse, errors.Wrap(err, "next")
case MULTIPLY_SENDER_ROUND_4_VERIFY:
challengeOpenings := [][2][32]byte{}
err := json.Unmarshal([]byte(message), &challengeOpenings)
if err != nil {
return nil, errors.Wrap(err, "next")
}
err = s.simplestReceiver.Round6Verify(challengeOpenings)
if err != nil {
return nil, errors.Wrap(err, "next")
}
baseOtReceiverOutput := s.simplestReceiver.Output
for i := 0; i < len(s.alphas); i++ {
seed := sha3.Sum256(
append(
append(
append([]byte{}, s.seed[:]...),
[]byte("MUL")...,
),
binary.BigEndian.AppendUint64([]byte{}, uint64(i))...,
),
)
sender, err := sign.NewMultiplySender(
584,
160,
baseOtReceiverOutput,
s.curve,
seed,
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.sender = append(s.sender, sender)
}
s.step = MULTIPLY_SENDER_ROUND_5_MULTIPLY
return nil, nil
case MULTIPLY_SENDER_ROUND_5_MULTIPLY:
round1Outputs := []*kos.Round1Output{}
err := json.Unmarshal([]byte(message), &round1Outputs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
if len(round1Outputs) != len(s.alphas) {
return nil, errors.Wrap(errors.New("incorrect number of outputs"), "next")
}
outputs := []*MultiplyRound2Output{}
for i := 0; i < len(s.alphas); i++ {
round2Output, err := s.sender[i].Round2Multiply(
s.alphas[i],
round1Outputs[i],
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
tau := [][][]byte{}
for _, t := range round2Output.COTRound2Output.Tau {
tBytes := [][]byte{}
for _, ts := range t {
tBytes = append(tBytes, ts.Bytes())
}
tau = append(tau, tBytes)
}
r := [][]byte{}
for _, rs := range round2Output.R {
r = append(r, rs.Bytes())
}
outputs = append(outputs, &MultiplyRound2Output{
COTRound2Output: &KOSRound2Output{
Tau: tau,
},
R: r,
U: round2Output.U.Bytes(),
})
}
marshaledOutputs, err := json.Marshal(outputs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.step = MULTIPLY_SENDER_ROUND_6_DONE
return marshaledOutputs, nil
}
return nil, nil
}
func (r *MultiplyReceiver) Next(message []byte) ([]byte, error) {
switch r.step {
case MULTIPLY_RECEIVER_ROUND_1_COMPUTE_AND_ZKP_TO_PUBKEY:
proof, err := r.simplestSender.Round1ComputeAndZkpToPublicKey()
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrProof := &SchnorrProof{
C: proof.C.Bytes(),
S: proof.S.Bytes(),
Statement: proof.Statement.ToAffineCompressed(),
}
marshaledProof, err := json.Marshal(schnorrProof)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_2_PAD_TRANSFER
return marshaledProof, nil
case MULTIPLY_RECEIVER_ROUND_2_PAD_TRANSFER:
receiversMaskedChoice := [][]byte{}
err := json.Unmarshal([]byte(message), &receiversMaskedChoice)
if err != nil {
return nil, errors.Wrap(err, "next")
}
challenge, err := r.simplestSender.Round3PadTransfer(receiversMaskedChoice)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledChallenge, err := json.Marshal(challenge)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_3_VERIFY
return marshaledChallenge, nil
case MULTIPLY_RECEIVER_ROUND_3_VERIFY:
challengeResponse := [][32]byte{}
err := json.Unmarshal([]byte(message), &challengeResponse)
if err != nil {
return nil, errors.Wrap(err, "next")
}
challengeOpenings, err := r.simplestSender.Round5Verify(challengeResponse)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledChallengeOpenings, err := json.Marshal(challengeOpenings)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_4_MULTIPLY_INIT
return marshaledChallengeOpenings, nil
case MULTIPLY_RECEIVER_ROUND_4_MULTIPLY_INIT:
baseOtSenderOutput := r.simplestSender.Output
outputs := []*kos.Round1Output{}
for i := 0; i < len(r.betas); i++ {
seed := sha3.Sum256(
append(
append(
append([]byte{}, r.seed[:]...),
[]byte("MUL")...,
),
binary.BigEndian.AppendUint64([]byte{}, uint64(i))...,
),
)
receiver, err := sign.NewMultiplyReceiver(
584,
160,
baseOtSenderOutput,
r.curve,
seed,
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.receiver = append(r.receiver, receiver)
round1Output, err := r.receiver[i].Round1Initialize(r.betas[i])
if err != nil {
return nil, errors.Wrap(err, "next")
}
outputs = append(outputs, round1Output)
}
marshaledOutputs, err := json.Marshal(outputs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_5_MULTIPLY
return marshaledOutputs, nil
case MULTIPLY_RECEIVER_ROUND_5_MULTIPLY:
round2Output := []*MultiplyRound2Output{}
err := json.Unmarshal([]byte(message), &round2Output)
if err != nil {
return nil, errors.Wrap(err, "next")
}
if len(round2Output) != len(r.betas) {
return nil, errors.Wrap(errors.New("incorrect number of outputs"), "next")
}
for i := 0; i < len(r.betas); i++ {
rawRound2Output := &sign.MultiplyRound2Output{
COTRound2Output: &kos.Round2Output{
Tau: [][]curves.Scalar{},
},
R: []curves.Scalar{},
U: nil,
}
for _, t := range round2Output[i].COTRound2Output.Tau {
tScalars := []curves.Scalar{}
for _, ts := range t {
sc, err := r.curve.Scalar.SetBytes(ts)
if err != nil {
return nil, errors.Wrap(err, "next")
}
tScalars = append(tScalars, sc)
}
rawRound2Output.COTRound2Output.Tau = append(
rawRound2Output.COTRound2Output.Tau,
tScalars,
)
}
for _, rs := range round2Output[i].R {
sc, err := r.curve.Scalar.SetBytes(rs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
rawRound2Output.R = append(rawRound2Output.R, sc)
}
rawRound2Output.U, err = r.curve.Scalar.SetBytes(round2Output[i].U)
if err != nil {
return nil, errors.Wrap(err, "next")
}
err := r.receiver[i].Round3Multiply(rawRound2Output)
if err != nil {
return nil, errors.Wrap(err, "next")
}
}
r.step = MULTIPLY_RECEIVER_ROUND_6_DONE
return nil, nil
}
return nil, nil
}
func (s *MultiplySender) IsDone() bool {
return s.step == MULTIPLY_SENDER_ROUND_6_DONE
}
func (r *MultiplyReceiver) IsDone() bool {
return r.step == MULTIPLY_RECEIVER_ROUND_6_DONE
}
func (s *MultiplySender) GetPoints() []curves.Point {
points := []curves.Point{}
for i := 0; i < len(s.alphas); i++ {
points = append(
points,
s.curve.NewGeneratorPoint().Mul(
s.sender[i].OutputAdditiveShare,
),
)
}
return points
}
func (r *MultiplyReceiver) GetPoints() []curves.Point {
points := []curves.Point{}
for i := 0; i < len(r.betas); i++ {
points = append(
points,
r.curve.NewGeneratorPoint().Mul(
r.receiver[i].OutputAdditiveShare,
),
)
}
return points
}
func (s *MultiplySender) GetScalars() []curves.Scalar {
scalars := []curves.Scalar{}
for i := 0; i < len(s.alphas); i++ {
scalars = append(
scalars,
s.sender[i].OutputAdditiveShare,
)
}
return scalars
}
func (r *MultiplyReceiver) GetScalars() []curves.Scalar {
scalars := []curves.Scalar{}
for i := 0; i < len(r.betas); i++ {
scalars = append(
scalars,
r.receiver[i].OutputAdditiveShare,
)
}
return scalars
}
func (s *MultiplySender) GetSignatureOfProverKey(
proverKey []byte,
) ([]byte, error) {
signature := make([]byte, int(bls48581.MODBYTES)+1)
key := s.sender[0].OutputAdditiveShare.Bytes()
if bls48581.Core_Sign(signature, proverKey, key) != bls48581.BLS_OK {
return nil, errors.Wrap(
errors.New("could not sign"),
"get signature of prover key",
)
}
return signature[:], nil
}
func (r *MultiplyReceiver) GetSignatureOfProverKey(
proverKey []byte,
) ([]byte, error) {
signature := make([]byte, int(bls48581.MODBYTES)+1)
key := r.receiver[0].OutputAdditiveShare.Bytes()
if bls48581.Core_Sign(signature, proverKey, key) != bls48581.BLS_OK {
return nil, errors.Wrap(
errors.New("could not sign"),
"get signature of prover key",
)
}
return signature[:], nil
}
func SignProverKeyForCommit(
proverKey []byte,
commitKey curves.Scalar,
) ([]byte, error) {
signature := make([]byte, int(bls48581.MODBYTES)+1)
key := commitKey.Bytes()
if bls48581.Core_Sign(signature, proverKey, key) != bls48581.BLS_OK {
return nil, errors.Wrap(
errors.New("could not sign"),
"sign prover key for commit",
)
}
return signature[:], nil
}
func VerifySignatureOfProverKey(
proverKey []byte,
signature []byte,
publicPointG2 curves.Point,
) error {
w := publicPointG2.ToAffineCompressed()
if bls48581.Core_Verify(signature, proverKey, w) != bls48581.BLS_OK {
return errors.Wrap(
errors.New("could not verify"),
"verify signature of prover key",
)
}
return nil
}

View File

@ -0,0 +1,169 @@
package application
import (
"bytes"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"golang.org/x/sync/errgroup"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
)
func ProcessRound(
i []byte,
idkKey curves.Scalar,
round int,
peers [][]byte,
peerIdks []curves.Point,
secrets []curves.Scalar,
curve *curves.Curve,
send func(int, []byte, []byte) error,
recv func(int, []byte) ([]byte, error),
seed []byte,
) ([]curves.Scalar, error) {
roundPeers, roundIdks, isReceiver := GetPairings(i, round, peers, peerIdks)
if roundPeers == nil {
return nil, nil
}
var participants []Iterator
if isReceiver {
for _, roundIdk := range roundIdks {
hashKeySeed := sha3.Sum256(
append(
roundIdk.Mul(idkKey).ToAffineCompressed(),
seed...,
),
)
participant := NewMultiplyReceiver(secrets, curve, hashKeySeed)
participants = append(participants, participant)
if err := participant.Init(); err != nil {
return nil, errors.Wrap(err, "process round")
}
}
} else {
for _, roundIdk := range roundIdks {
hashKeySeed := sha3.Sum256(
append(
roundIdk.Mul(idkKey).ToAffineCompressed(),
seed...,
),
)
participant := NewMultiplySender(secrets, curve, hashKeySeed)
participants = append(participants, participant)
if err := participant.Init(); err != nil {
return nil, errors.Wrap(err, "process round")
}
}
}
eg := errgroup.Group{}
eg.SetLimit(len(participants))
for j := range participants {
j := j
eg.Go(func() error {
var msg []byte
seq := 0
for !participants[j].IsDone() {
var err error
if isReceiver {
msg, err = recv(seq, append(append([]byte{}, roundPeers[j]...), i...))
if err != nil {
return err
}
}
next, err := participants[j].Next(msg)
if err != nil {
return err
}
err = send(seq, append(append([]byte{}, i...), roundPeers[j]...), next)
if err != nil {
return err
}
if !isReceiver {
msg, err = recv(seq, append(append([]byte{}, roundPeers[j]...), i...))
if err != nil {
return err
}
}
seq++
}
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, errors.Wrap(err, "process round")
}
sums := make([]curves.Scalar, len(secrets))
for j := range sums {
sums[j] = curve.Scalar.Zero()
}
for _, participant := range participants {
scalars := participant.GetScalars()
for j := range sums {
sums[j] = sums[j].Add(scalars[j])
}
}
return sums, nil
}
func GetPairings(i []byte, round int, peers [][]byte, peerIdks []curves.Point) (
[][]byte,
[]curves.Point,
bool,
) {
n := len(peers)
index := -1
for j := 0; j < n; j++ {
if bytes.Equal([]byte(peers[j]), []byte(i)) {
index = j + 1
break
}
}
if index < 1 || index > n {
return nil, nil, false // invalid input
}
power := uint64(n) >> round
if power == 0 {
return nil, nil, false // rounds exceeded
}
// Find the size of the subset for this round
subsetSize := 1 << (round - 1)
// Determine the subset that i belongs to
subsetIndex := (index - 1) / subsetSize
// If subsetIndex is odd, i's pairings are in the subset before it
// If subsetIndex is even, i's pairings are in the subset after it
complementarySubsetStart := 0
if subsetIndex%2 == 0 {
complementarySubsetStart = (subsetIndex+1)*subsetSize + 1
} else {
complementarySubsetStart = subsetIndex*subsetSize - subsetSize + 1
}
// Generate the pairings
pairings := make([][]byte, subsetSize)
idks := make([]curves.Point, subsetSize)
for j := 0; j < subsetSize; j++ {
pairings[j] = peers[complementarySubsetStart+j-1]
idks[j] = peerIdks[complementarySubsetStart+j-1]
}
return pairings, idks, (index - 1) < complementarySubsetStart
}

View File

@ -0,0 +1,439 @@
package application_test
import (
"crypto/rand"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/syncmap"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/ot/base/simplest"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
)
func TestPairings(t *testing.T) {
a := []byte{0x01}
b := []byte{0x02}
c := []byte{0x03}
d := []byte{0x04}
e := []byte{0x05}
f := []byte{0x06}
g := []byte{0x07}
h := []byte{0x08}
peers := [][]byte{a, b, c, d, e, f, g, h}
idks := []curves.Point{
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
}
a1pairing, _, isABob := application.GetPairings(a, 1, peers, idks)
b1pairing, _, isBBob := application.GetPairings(b, 1, peers, idks)
c1pairing, _, isCBob := application.GetPairings(c, 1, peers, idks)
d1pairing, _, isDBob := application.GetPairings(d, 1, peers, idks)
e1pairing, _, isEBob := application.GetPairings(e, 1, peers, idks)
f1pairing, _, isFBob := application.GetPairings(f, 1, peers, idks)
g1pairing, _, isGBob := application.GetPairings(g, 1, peers, idks)
h1pairing, _, isHBob := application.GetPairings(h, 1, peers, idks)
require.ElementsMatch(t, a1pairing, [][]byte{b})
require.ElementsMatch(t, b1pairing, [][]byte{a})
require.ElementsMatch(t, c1pairing, [][]byte{d})
require.ElementsMatch(t, d1pairing, [][]byte{c})
require.ElementsMatch(t, e1pairing, [][]byte{f})
require.ElementsMatch(t, f1pairing, [][]byte{e})
require.ElementsMatch(t, g1pairing, [][]byte{h})
require.ElementsMatch(t, h1pairing, [][]byte{g})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, true, false, true, false, true, false, true},
)
a2pairing, _, isABob := application.GetPairings(a, 2, peers, idks)
b2pairing, _, isBBob := application.GetPairings(b, 2, peers, idks)
c2pairing, _, isCBob := application.GetPairings(c, 2, peers, idks)
d2pairing, _, isDBob := application.GetPairings(d, 2, peers, idks)
e2pairing, _, isEBob := application.GetPairings(e, 2, peers, idks)
f2pairing, _, isFBob := application.GetPairings(f, 2, peers, idks)
g2pairing, _, isGBob := application.GetPairings(g, 2, peers, idks)
h2pairing, _, isHBob := application.GetPairings(h, 2, peers, idks)
require.ElementsMatch(t, a2pairing, [][]byte{c, d})
require.ElementsMatch(t, b2pairing, [][]byte{c, d})
require.ElementsMatch(t, c2pairing, [][]byte{a, b})
require.ElementsMatch(t, d2pairing, [][]byte{a, b})
require.ElementsMatch(t, e2pairing, [][]byte{g, h})
require.ElementsMatch(t, f2pairing, [][]byte{g, h})
require.ElementsMatch(t, g2pairing, [][]byte{e, f})
require.ElementsMatch(t, h2pairing, [][]byte{e, f})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, false, true, true, false, false, true, true},
)
a3pairing, _, isABob := application.GetPairings(a, 3, peers, idks)
b3pairing, _, isBBob := application.GetPairings(b, 3, peers, idks)
c3pairing, _, isCBob := application.GetPairings(c, 3, peers, idks)
d3pairing, _, isDBob := application.GetPairings(d, 3, peers, idks)
e3pairing, _, isEBob := application.GetPairings(e, 3, peers, idks)
f3pairing, _, isFBob := application.GetPairings(f, 3, peers, idks)
g3pairing, _, isGBob := application.GetPairings(g, 3, peers, idks)
h3pairing, _, isHBob := application.GetPairings(h, 3, peers, idks)
require.ElementsMatch(t, a3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, b3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, c3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, d3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, e3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t, f3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t, g3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t, h3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, false, false, false, true, true, true, true},
)
a4pairing, _, isABob := application.GetPairings(a, 4, peers, idks)
b4pairing, _, isBBob := application.GetPairings(b, 4, peers, idks)
c4pairing, _, isCBob := application.GetPairings(c, 4, peers, idks)
d4pairing, _, isDBob := application.GetPairings(d, 4, peers, idks)
e4pairing, _, isEBob := application.GetPairings(e, 4, peers, idks)
f4pairing, _, isFBob := application.GetPairings(f, 4, peers, idks)
g4pairing, _, isGBob := application.GetPairings(g, 4, peers, idks)
h4pairing, _, isHBob := application.GetPairings(h, 4, peers, idks)
require.ElementsMatch(t, a4pairing, [][]byte{})
require.ElementsMatch(t, b4pairing, [][]byte{})
require.ElementsMatch(t, c4pairing, [][]byte{})
require.ElementsMatch(t, d4pairing, [][]byte{})
require.ElementsMatch(t, e4pairing, [][]byte{})
require.ElementsMatch(t, f4pairing, [][]byte{})
require.ElementsMatch(t, g4pairing, [][]byte{})
require.ElementsMatch(t, h4pairing, [][]byte{})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, false, false, false, false, false, false, false},
)
}
func TestProcessRound(t *testing.T) {
a := []byte{0x01}
aKey := curves.ED448().Scalar.Random(rand.Reader)
aPoint := curves.ED448().Point.Generator().Mul(aKey)
b := []byte{0x02}
bKey := curves.ED448().Scalar.Random(rand.Reader)
bPoint := curves.ED448().Point.Generator().Mul(bKey)
c := []byte{0x03}
cKey := curves.ED448().Scalar.Random(rand.Reader)
cPoint := curves.ED448().Point.Generator().Mul(cKey)
d := []byte{0x04}
dKey := curves.ED448().Scalar.Random(rand.Reader)
dPoint := curves.ED448().Point.Generator().Mul(dKey)
e := []byte{0x05}
eKey := curves.ED448().Scalar.Random(rand.Reader)
ePoint := curves.ED448().Point.Generator().Mul(eKey)
f := []byte{0x06}
fKey := curves.ED448().Scalar.Random(rand.Reader)
fPoint := curves.ED448().Point.Generator().Mul(fKey)
g := []byte{0x07}
gKey := curves.ED448().Scalar.Random(rand.Reader)
gPoint := curves.ED448().Point.Generator().Mul(gKey)
h := []byte{0x08}
hKey := curves.ED448().Scalar.Random(rand.Reader)
hPoint := curves.ED448().Point.Generator().Mul(hKey)
peerKeys := []curves.Scalar{aKey, bKey, cKey, dKey, eKey, fKey, gKey, hKey}
peerPoints := [][]byte{
aPoint.ToAffineCompressed(),
bPoint.ToAffineCompressed(),
cPoint.ToAffineCompressed(),
dPoint.ToAffineCompressed(),
ePoint.ToAffineCompressed(),
fPoint.ToAffineCompressed(),
gPoint.ToAffineCompressed(),
hPoint.ToAffineCompressed(),
}
idkPoints := []curves.Point{
aPoint,
bPoint,
cPoint,
dPoint,
ePoint,
fPoint,
gPoint,
hPoint,
}
peers := [][]byte{a, b, c, d, e, f, g, h}
peerSecrets := [][]curves.Scalar{}
originalPeerSecrets := [][]curves.Scalar{}
for i := range peers {
fmt.Printf("generating secrets for peer %d\n", i)
x := curves.BLS48581G1().Scalar.Random(rand.Reader)
xs := x.Clone()
secrets := []curves.Scalar{x}
originalSecrets := []curves.Scalar{x}
fmt.Printf("secret %d(%d): %+x\n", i, 0, xs.Bytes())
for j := 0; j < 1; j++ {
xs = xs.Mul(x)
secrets = append(secrets, xs)
fmt.Printf("secret %d(%d): %+x\n", i, 1, xs.Bytes())
originalSecrets = append(originalSecrets, xs)
}
peerSecrets = append(peerSecrets, secrets)
originalPeerSecrets = append(originalPeerSecrets, originalSecrets)
}
messages := syncmap.Map{}
send := func(seq int, dst, msg []byte) error {
fmt.Printf("send %d bytes for seq %d to %+x\n", len(msg), seq, dst)
b := byte(seq)
dst = append(append([]byte{}, b), dst...)
if msg == nil {
msg = []byte{0x01}
}
messages.Store(string(dst), string(msg))
return nil
}
recv := func(seq int, src []byte) ([]byte, error) {
fmt.Printf("recv %d from %+x\n", seq, src)
b := byte(seq)
bsrc := append(append([]byte{}, b), src...)
msg, ok := messages.LoadAndDelete(string(bsrc))
for !ok {
fmt.Printf("no message yet, waiting for recv %d from %+x\n", seq, src)
time.Sleep(100 * time.Millisecond)
msg, ok = messages.LoadAndDelete(string(bsrc))
}
return []byte(msg.(string)), nil
}
for j := 1; j < 4; j++ {
eg := errgroup.Group{}
eg.SetLimit(8)
for i := range peers {
i := i
eg.Go(func() error {
fmt.Printf("running round %d for %d\n", j, i)
newSecrets, err := application.ProcessRound(
peerPoints[i],
peerKeys[i],
j,
peerPoints,
idkPoints,
peerSecrets[i],
curves.BLS48581G1(),
send,
recv,
[]byte{0x01},
)
require.NoError(t, err)
for s := range newSecrets {
fmt.Printf("secret %d(%d): %+x\n", i, s, newSecrets[s].Bytes())
}
peerSecrets[i] = newSecrets
return err
})
}
err := eg.Wait()
require.NoError(t, err)
}
checks := []curves.Point{}
for i := 0; i < len(originalPeerSecrets[0]); i++ {
mul := curves.BLS48581G1().Scalar.One()
for j := 0; j < len(originalPeerSecrets); j++ {
mul = mul.Mul(originalPeerSecrets[j][i])
}
checks = append(checks, curves.BLS48581G1().Point.Generator().Mul(mul))
}
result := []curves.Point{}
for i := 0; i < len(peerSecrets[0]); i++ {
var add curves.Point = nil
for j := 0; j < len(peerSecrets); j++ {
if add == nil {
add = curves.BLS48581G1().Point.Generator().Mul(peerSecrets[j][i])
} else {
add = add.Add(
curves.BLS48581G1().Point.Generator().Mul(peerSecrets[j][i]),
)
}
}
result = append(result, add)
}
for i := range checks {
require.Equal(t, true, checks[i].Equal(result[i]))
}
}
func TestCompositeConstructionOfBLS(t *testing.T) {
// needed to verify signatures
bls48581.Init()
curve := curves.BLS48581G1()
hashKeySeed := [simplest.DigestSize]byte{}
_, err := rand.Read(hashKeySeed[:])
require.NoError(t, err)
alpha := curve.Scalar.Random(rand.Reader)
beta := curve.Scalar.Random(rand.Reader)
alpha2 := alpha.Mul(alpha)
beta2 := beta.Mul(beta)
sender := application.NewMultiplySender([]curves.Scalar{alpha, alpha2}, curve, hashKeySeed)
receiver := application.NewMultiplyReceiver([]curves.Scalar{beta, beta2}, curve, hashKeySeed)
var senderMsg []byte = nil
var receiverMsg []byte = nil
sErr := sender.Init()
require.NoError(t, sErr)
rErr := receiver.Init()
require.NoError(t, rErr)
x448SendingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingEphemeralPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingSignedPrePrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingIdentityPrivateKey)
x448SendingEphemeralKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingEphemeralPrivateKey)
x448ReceivingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingIdentityPrivateKey)
x448ReceivingSignedPreKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingSignedPrePrivateKey)
senderResult := crypto.SenderX3DH(
x448SendingIdentityPrivateKey,
x448SendingEphemeralPrivateKey,
x448ReceivingIdentityKey,
x448ReceivingSignedPreKey,
96,
)
receiverResult := crypto.ReceiverX3DH(
x448ReceivingIdentityPrivateKey,
x448ReceivingSignedPrePrivateKey,
x448SendingIdentityKey,
x448SendingEphemeralKey,
96,
)
drSender, err := crypto.NewDoubleRatchetParticipant(
senderResult[:32],
senderResult[32:64],
senderResult[64:],
true,
x448SendingEphemeralPrivateKey,
x448ReceivingSignedPreKey,
curves.ED448(),
nil,
)
require.NoError(t, err)
drReceiver, err := crypto.NewDoubleRatchetParticipant(
receiverResult[:32],
receiverResult[32:64],
receiverResult[64:],
false,
x448ReceivingSignedPrePrivateKey,
x448SendingEphemeralKey,
curves.ED448(),
nil,
)
require.NoError(t, err)
for !sender.IsDone() && !receiver.IsDone() {
senderMsg, err = sender.Next(receiverMsg)
require.NoError(t, err)
senderEnvelope, err := drSender.RatchetEncrypt(senderMsg)
require.NoError(t, err)
senderMsg, err = drReceiver.RatchetDecrypt(senderEnvelope)
require.NoError(t, err)
receiverMsg, err = receiver.Next(senderMsg)
require.NoError(t, err)
receiverEnvelope, err := drReceiver.RatchetEncrypt(receiverMsg)
require.NoError(t, err)
receiverMsg, err = drSender.RatchetDecrypt(receiverEnvelope)
require.NoError(t, err)
}
senderPoints := sender.GetPoints()
receiverPoints := receiver.GetPoints()
generator := alpha.Point().Generator()
product := generator.Mul(alpha).Mul(beta)
sum := senderPoints[0].Add(receiverPoints[0])
product2 := generator.Mul(alpha2).Mul(beta2)
sum2 := senderPoints[1].Add(receiverPoints[1])
fmt.Println(alpha.Bytes())
fmt.Println(beta.Bytes())
fmt.Println(curves.BLS48581G1().Point.Generator().ToAffineCompressed())
fmt.Println(sum.ToAffineCompressed())
fmt.Println(product.ToAffineCompressed())
require.Equal(t, true, product.Equal(sum))
require.Equal(t, true, product2.Equal(sum2))
sendSig, err := sender.GetSignatureOfProverKey([]byte{0x01})
require.NoError(t, err)
require.Equal(t, len(sendSig), 74)
recvSig, err := receiver.GetSignatureOfProverKey([]byte{0x02})
require.NoError(t, err)
require.Equal(t, len(recvSig), 74)
require.NoError(t, application.VerifySignatureOfProverKey(
[]byte{0x01},
sendSig,
curves.BLS48581G2().Point.Generator().Mul(
sender.GetScalars()[0],
),
))
require.NoError(t, application.VerifySignatureOfProverKey(
[]byte{0x02},
recvSig,
curves.BLS48581G2().Point.Generator().Mul(
receiver.GetScalars()[0],
),
))
require.Error(t, application.VerifySignatureOfProverKey(
[]byte{0x02},
sendSig,
curves.BLS48581G2().Point.Generator().Mul(
sender.GetScalars()[0],
),
))
require.Error(t, application.VerifySignatureOfProverKey(
[]byte{0x01},
recvSig,
curves.BLS48581G2().Point.Generator().Mul(
receiver.GetScalars()[0],
),
))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,95 +0,0 @@
package nop
import (
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type NopExecutionEngine struct {
logger *zap.Logger
}
func NewNopExecutionEngine(
logger *zap.Logger,
) *NopExecutionEngine {
if logger == nil {
panic(errors.New("logger is nil"))
}
return &NopExecutionEngine{
logger: logger,
}
}
var _ execution.ExecutionEngine = (*NopExecutionEngine)(nil)
// GetName implements ExecutionEngine
func (*NopExecutionEngine) GetName() string {
return "nop"
}
// GetSupportedApplications implements ExecutionEngine
func (
*NopExecutionEngine,
) GetSupportedApplications() []*protobufs.Application {
return []*protobufs.Application{
{
Address: []byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
},
ExecutionContext: protobufs.ExecutionContext_EXECUTION_CONTEXT_INTRINSIC,
},
}
}
// Start implements ExecutionEngine
func (e *NopExecutionEngine) Start() <-chan error {
errChan := make(chan error)
go func() {
errChan <- nil
}()
return errChan
}
// Stop implements ExecutionEngine
func (*NopExecutionEngine) Stop(force bool) <-chan error {
errChan := make(chan error)
go func() {
errChan <- nil
}()
return errChan
}
// ProcessMessage implements ExecutionEngine
func (e *NopExecutionEngine) ProcessMessage(
address []byte,
message *protobufs.Message,
) ([]*protobufs.Message, error) {
any := &anypb.Any{}
if err := proto.Unmarshal(message.Payload, any); err != nil {
return nil, errors.Wrap(err, "could not unmarshal message")
}
if any.TypeUrl == protobufs.ClockFrameType {
frame := &protobufs.ClockFrame{}
if err := any.UnmarshalTo(frame); err != nil {
return nil, errors.Wrap(err, "could not unmarshal clock frame")
}
e.logger.Info("nop")
}
return nil, nil
}

View File

@ -32,8 +32,10 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gtank/merlin v0.1.1 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 // indirect
github.com/multiformats/go-multiaddr v0.10.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@ -214,6 +214,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -357,6 +359,8 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUM
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0=
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=

View File

@ -24,7 +24,7 @@ type FileKeyManager struct {
}
var UnsupportedKeyTypeErr = errors.New("unsupported key type")
var KeyNotFoundErr = errors.New("unsupported key type")
var KeyNotFoundErr = errors.New("key not found")
func NewFileKeyManager(
keyStoreConfig *config.KeyConfig,

View File

@ -4,8 +4,10 @@ import (
"encoding/hex"
"flag"
"fmt"
"io/fs"
"os"
"os/signal"
"path/filepath"
"syscall"
"github.com/libp2p/go-libp2p/core/crypto"
@ -13,6 +15,7 @@ import (
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/node/app"
"source.quilibrium.com/quilibrium/monorepo/node/config"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
)
var (
@ -58,6 +61,8 @@ func main() {
panic(err)
}
clearIfTestData(*configDirectory, nodeConfig)
if *dbConsole {
console, err := app.NewDBConsole(nodeConfig)
if err != nil {
@ -68,6 +73,9 @@ func main() {
return
}
fmt.Println("Loading ceremony state and starting node...")
qcrypto.Init()
node, err := app.NewNode(nodeConfig)
if err != nil {
panic(err)
@ -78,6 +86,36 @@ func main() {
node.Stop()
}
func clearIfTestData(configDir string, nodeConfig *config.Config) {
_, err := os.Stat(filepath.Join(configDir, "RELEASE_VERSION"))
if os.IsNotExist(err) {
fmt.Println("Clearing test data...")
err := os.RemoveAll(nodeConfig.DB.Path)
if err != nil {
panic(err)
}
versionFile, err := os.OpenFile(
filepath.Join(configDir, "RELEASE_VERSION"),
os.O_CREATE|os.O_RDWR,
fs.FileMode(0700),
)
if err != nil {
panic(err)
}
_, err = versionFile.Write([]byte{0x01, 0x00, 0x00})
if err != nil {
panic(err)
}
err = versionFile.Close()
if err != nil {
panic(err)
}
}
}
func printPeerID(p2pConfig *config.P2PConfig) {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
@ -135,5 +173,5 @@ func printLogo() {
func printVersion() {
fmt.Println(" ")
fmt.Println(" Quilibrium Node - v1.0.0 DHT Verification")
fmt.Println(" Quilibrium Node - v1.0.0 Dawn")
}

View File

@ -4,8 +4,10 @@ import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"math/big"
"sync"
"time"
"github.com/libp2p/go-libp2p"
dht "github.com/libp2p/go-libp2p-kad-dht"
@ -76,12 +78,14 @@ func NewBlossomSub(
go discoverPeers(p2pConfig, ctx, logger, h)
// TODO: turn into an option flag for console logging, this is too noisy for
// default logging behavior
var tracer *blossomsub.JSONTracer
if p2pConfig.TraceLogFile == "" {
tracer, err = blossomsub.NewStdoutJSONTracer()
if err != nil {
panic(errors.Wrap(err, "error building stdout tracer"))
}
// tracer, err = blossomsub.NewStdoutJSONTracer()
// if err != nil {
// panic(errors.Wrap(err, "error building stdout tracer"))
// }
} else {
tracer, err = blossomsub.NewJSONTracer(p2pConfig.TraceLogFile)
if err != nil {
@ -89,8 +93,9 @@ func NewBlossomSub(
}
}
blossomOpts := []blossomsub.Option{
blossomsub.WithEventTracer(tracer),
blossomOpts := []blossomsub.Option{}
if tracer != nil {
blossomOpts = append(blossomOpts, blossomsub.WithEventTracer(tracer))
}
params := mergeDefaults(p2pConfig)
@ -236,30 +241,62 @@ func initDHT(
if err = kademliaDHT.Bootstrap(ctx); err != nil {
panic(err)
}
var wg sync.WaitGroup
logger.Info("connecting to bootstrap", zap.String("peer_id", h.ID().String()))
reconnect := func() {
var wg sync.WaitGroup
defaultBootstrapPeers := p2pConfig.BootstrapPeers
logger.Info("connecting to bootstrap", zap.String("peer_id", h.ID().String()))
for _, peerAddr := range defaultBootstrapPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
}
wg.Add(1)
go func() {
defer wg.Done()
if err := h.Connect(ctx, *peerinfo); err != nil {
logger.Warn("error while connecting to dht peer", zap.Error(err))
defaultBootstrapPeers := p2pConfig.BootstrapPeers
for _, peerAddr := range defaultBootstrapPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
if err := h.Connect(ctx, *peerinfo); err != nil {
logger.Warn("error while connecting to dht peer", zap.Error(err))
}
}()
}
wg.Wait()
}
wg.Wait()
reconnect()
go func() {
for {
time.Sleep(30 * time.Second)
if len(h.Network().Peers()) == 0 {
logger.Info("reconnecting to peers")
reconnect()
}
}
}()
return kademliaDHT
}
func (b *BlossomSub) GetBitmaskPeers() map[string][]string {
peers := map[string][]string{}
for _, k := range b.bitmaskMap {
peers[fmt.Sprintf("%+x", k.Bitmask())] = []string{}
for _, p := range k.ListPeers() {
peers[fmt.Sprintf("%+x", k.Bitmask())] = append(
peers[fmt.Sprintf("%+x", k.Bitmask())],
p.String(),
)
}
}
return peers
}
func (b *BlossomSub) GetPeerstoreCount() int {
return len(b.h.Peerstore().Peers())
}

194
node/p2p/public_channel.go Normal file
View File

@ -0,0 +1,194 @@
package p2p
import (
"crypto/rand"
"encoding/binary"
"sync"
"time"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
// A simplified P2P channel the pair of actors communicating is public
// knowledge, even though the data itself is encrypted.
type PublicP2PChannel struct {
participant *crypto.DoubleRatchetParticipant
sendMap map[uint64][]byte
receiveMap map[uint64][]byte
pubSub PubSub
sendFilter []byte
receiveFilter []byte
initiator bool
senderSeqNo uint64
receiverSeqNo uint64
receiveChan chan []byte
receiveMx sync.Mutex
}
func NewPublicP2PChannel(
senderIdentifier, receiverIdentifier []byte,
initiator bool,
sendingIdentityPrivateKey curves.Scalar,
sendingSignedPrePrivateKey curves.Scalar,
receivingIdentityKey curves.Point,
receivingSignedPreKey curves.Point,
curve *curves.Curve,
keyManager keys.KeyManager,
pubSub PubSub,
) (*PublicP2PChannel, error) {
sendFilter := append(
append([]byte{}, senderIdentifier...),
receiverIdentifier...,
)
receiveFilter := append(
append([]byte{}, receiverIdentifier...),
senderIdentifier...,
)
channel := &PublicP2PChannel{
sendMap: map[uint64][]byte{},
receiveMap: map[uint64][]byte{},
initiator: initiator,
sendFilter: sendFilter,
receiveFilter: receiveFilter,
pubSub: pubSub,
senderSeqNo: 0,
receiverSeqNo: 0,
receiveChan: make(chan []byte),
}
var err error
var participant *crypto.DoubleRatchetParticipant
if initiator {
sendingEphemeralPrivateKey := curve.Scalar.Random(
rand.Reader,
)
x3dh := crypto.SenderX3DH(
sendingIdentityPrivateKey,
sendingSignedPrePrivateKey,
receivingIdentityKey,
receivingSignedPreKey,
96,
)
participant, err = crypto.NewDoubleRatchetParticipant(
x3dh[:32],
x3dh[32:64],
x3dh[64:],
true,
sendingEphemeralPrivateKey,
receivingSignedPreKey,
curve,
keyManager,
)
if err != nil {
return nil, errors.Wrap(err, "new public p2p channel")
}
} else {
x3dh := crypto.SenderX3DH(
sendingIdentityPrivateKey,
sendingSignedPrePrivateKey,
receivingIdentityKey,
receivingSignedPreKey,
96,
)
participant, err = crypto.NewDoubleRatchetParticipant(
x3dh[:32],
x3dh[32:64],
x3dh[64:],
false,
sendingSignedPrePrivateKey,
nil,
curve,
keyManager,
)
if err != nil {
return nil, errors.Wrap(err, "new public p2p channel")
}
}
channel.participant = participant
pubSub.Subscribe(
sendFilter,
func(message *pb.Message) error { return nil },
true,
)
pubSub.Subscribe(
receiveFilter,
channel.handleReceive,
true,
)
return channel, nil
}
func (c *PublicP2PChannel) handleReceive(message *pb.Message) error {
envelope := &protobufs.P2PChannelEnvelope{}
if err := proto.Unmarshal(message.Data, envelope); err != nil {
return errors.Wrap(err, "handle receive")
}
c.receiveMx.Lock()
rawData, err := c.participant.RatchetDecrypt(envelope)
c.receiveMx.Unlock()
if err != nil {
return errors.Wrap(err, "handle receive")
}
seqNo := binary.BigEndian.Uint64(rawData[:8])
if seqNo == c.receiverSeqNo {
c.receiveChan <- rawData[8:]
} else {
c.receiveMx.Lock()
c.receiveMap[seqNo] = rawData[8:]
c.receiveMx.Unlock()
}
return nil
}
func (c *PublicP2PChannel) Send(message []byte) error {
c.senderSeqNo++
message = append(
binary.BigEndian.AppendUint64(nil, c.senderSeqNo),
message...,
)
envelope, err := c.participant.RatchetEncrypt(message)
if err != nil {
return errors.Wrap(err, "send")
}
rawBytes, err := proto.Marshal(envelope)
if err != nil {
return errors.Wrap(err, "send")
}
c.sendMap[c.senderSeqNo] = rawBytes
return errors.Wrap(c.pubSub.PublishToBitmask(c.sendFilter, rawBytes), "send")
}
func (c *PublicP2PChannel) Receive() ([]byte, error) {
c.receiverSeqNo++
after := time.After(20 * time.Second)
select {
case msg := <-c.receiveChan:
return msg, nil
case <-after:
return nil, errors.Wrap(errors.New("timed out"), "receive")
}
}
func (c *PublicP2PChannel) Close() {
c.pubSub.Unsubscribe(c.sendFilter, true)
c.pubSub.Unsubscribe(c.receiveFilter, true)
}

View File

@ -10,6 +10,7 @@ type PubSub interface {
Subscribe(bitmask []byte, handler func(message *pb.Message) error, raw bool)
Unsubscribe(bitmask []byte, raw bool)
GetPeerID() []byte
GetBitmaskPeers() map[string][]string
GetPeerstoreCount() int
GetNetworkPeersCount() int
GetRandomPeer(bitmask []byte) ([]byte, error)

View File

@ -0,0 +1,91 @@
package protobufs
import (
"crypto"
"crypto/rand"
"encoding/binary"
"github.com/cloudflare/circl/sign/ed448"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
)
func (j *CeremonyLobbyJoin) VerifySignature() error {
b := binary.BigEndian.AppendUint64([]byte("join"), j.FrameNumber)
b = append(b, j.IdentityKey.KeyValue...)
b = append(b, j.SignedPreKey.KeyValue...)
if !ed448.Verify(
j.PublicKeySignatureEd448.PublicKey.KeyValue,
b,
j.PublicKeySignatureEd448.Signature,
"",
) {
return errors.Wrap(errors.New("invalid signature"), "sign with prover key")
}
return nil
}
func (j *CeremonyLobbyJoin) SignWithProverKey(
signer crypto.Signer,
) ([]byte, error) {
b := binary.BigEndian.AppendUint64([]byte("join"), j.FrameNumber)
b = append(b, j.IdentityKey.KeyValue...)
b = append(b, j.SignedPreKey.KeyValue...)
// Non edwards signing variants need support to specify hash, edwards variants
// demand Hash(0) because it does SHA512 under the hood.
sig, err := signer.Sign(rand.Reader, b, crypto.Hash(0))
return sig, errors.Wrap(err, "sign with prover key")
}
func (t *CeremonyTranscriptShare) VerifySignature() error {
hash := sha3.New256()
for _, g1 := range t.AdditiveG1Powers {
if _, err := hash.Write(g1.KeyValue); err != nil {
return errors.Wrap(err, "verify signature")
}
}
for _, g2 := range t.AdditiveG2Powers {
if _, err := hash.Write(g2.KeyValue); err != nil {
return errors.Wrap(err, "verify signature")
}
}
if _, err := hash.Write(t.AdditiveG1_256Witness.KeyValue); err != nil {
return errors.Wrap(err, "verify signature")
}
return errors.Wrap(
t.ProverSignature.Verify(hash.Sum(nil)),
"verify signature",
)
}
func (t *CeremonyTranscriptShare) SignWithProverKey(
signer crypto.Signer,
) ([]byte, error) {
hash := sha3.New256()
for _, g1 := range t.AdditiveG1Powers {
if _, err := hash.Write(g1.KeyValue); err != nil {
return nil, errors.Wrap(err, "sign with prover key")
}
}
for _, g2 := range t.AdditiveG2Powers {
if _, err := hash.Write(g2.KeyValue); err != nil {
return nil, errors.Wrap(err, "sign with prover key")
}
}
if _, err := hash.Write(t.AdditiveG1_256Witness.KeyValue); err != nil {
return nil, errors.Wrap(err, "sign with prover key")
}
signature, err := signer.Sign(rand.Reader, hash.Sum(nil), crypto.Hash(0))
return signature, errors.Wrap(err, "sign with prover key")
}

File diff suppressed because it is too large Load Diff

View File

@ -36,4 +36,91 @@ message CeremonyTranscript {
// The running s^256 G2 powers see notes on running_g1_256_witnesses for why
// we do this.
repeated quilibrium.node.keys.pb.BLS48581G2PublicKey running_g2_256_powers = 4;
}
message CeremonyLobbyState {
int32 lobby_state = 1;
oneof ceremony_state {
CeremonyOpenState ceremony_open_state = 2;
CeremonyInProgressState ceremony_in_progress_state = 3;
CeremonyFinalizingState ceremony_finalizing_state = 4;
CeremonyValidatingState ceremony_validating_state = 5;
}
CeremonyTranscript latest_transcript = 6;
bytes reward_trie = 7;
}
message CeremonySeenProverAttestation {
quilibrium.node.keys.pb.Ed448PublicKey seen_prover_key = 1;
uint64 last_seen_frame = 2;
quilibrium.node.keys.pb.Ed448Signature prover_signature = 3;
}
message CeremonyDroppedProverAttestation {
quilibrium.node.keys.pb.Ed448PublicKey dropped_prover_key = 1;
uint64 last_seen_frame = 2;
quilibrium.node.keys.pb.Ed448Signature prover_signature = 3;
}
message CeremonyTranscriptShare {
repeated quilibrium.node.keys.pb.BLS48581G1PublicKey additive_g1_powers = 1;
repeated quilibrium.node.keys.pb.BLS48581G2PublicKey additive_g2_powers = 2;
quilibrium.node.keys.pb.BLS48581G1PublicKey additive_g1_256_witness = 3;
quilibrium.node.keys.pb.BLS48581G2PublicKey additive_g2_256_witness = 4;
quilibrium.node.keys.pb.Ed448Signature prover_signature = 5;
}
// Describes the required proof to commit to a transcript to advance a round,
// and as a proof to move to the verification state
message CeremonyTranscriptCommit {
// Prover key signature over the G1 point of the additive share of the first
// power.
quilibrium.node.keys.pb.Ed448Signature prover_signature = 1;
// BLS short signature over the Ed448 prover public key, using the additive
// share of the first power.
quilibrium.node.keys.pb.BLS48581Signature contribution_signature = 2;
}
message CeremonyAdvanceRound {
repeated CeremonyTranscriptCommit commits = 1;
}
message CeremonyLobbyJoin {
uint64 frame_number = 1;
quilibrium.node.keys.pb.X448PublicKey identity_key = 2;
quilibrium.node.keys.pb.X448PublicKey signed_pre_key = 3;
quilibrium.node.keys.pb.Ed448Signature public_key_signature_ed448 = 4;
}
message CeremonyLobbyStateTransition {
repeated string type_urls = 1;
repeated bytes transition_inputs = 2;
}
message CeremonyOpenState {
repeated CeremonyLobbyJoin joined_participants = 1;
repeated quilibrium.node.keys.pb.Ed448PublicKey preferred_participants = 2;
}
message CeremonyInProgressState {
repeated quilibrium.node.keys.pb.Ed448PublicKey active_participants = 1;
repeated CeremonySeenProverAttestation latest_seen_prover_attestations = 2;
repeated CeremonyDroppedProverAttestation dropped_participant_attestations = 3;
repeated CeremonyAdvanceRound transcript_round_advance_commits = 4;
repeated quilibrium.node.keys.pb.Ed448PublicKey next_round_participants = 5;
}
message CeremonyFinalizingState {
repeated quilibrium.node.keys.pb.Ed448PublicKey active_participants = 1;
repeated CeremonySeenProverAttestation latest_seen_prover_attestations = 2;
repeated CeremonyDroppedProverAttestation dropped_participant_attestations = 3;
repeated CeremonyTranscriptCommit commits = 4;
repeated CeremonyTranscriptShare shares = 5;
repeated quilibrium.node.keys.pb.Ed448PublicKey next_round_participants = 6;
}
message CeremonyValidatingState {
repeated CeremonyTranscriptCommit commits = 1;
CeremonyTranscript updated_transcript = 2;
repeated quilibrium.node.keys.pb.Ed448PublicKey next_round_participants = 3;
}

View File

@ -1,15 +1,19 @@
package protobufs
import (
"crypto"
"crypto/rand"
"encoding/binary"
"math/big"
"time"
"github.com/cloudflare/circl/sign/ed448"
"github.com/iden3/go-iden3-crypto/ff"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/vdf"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
)
func ProveMasterClockFrame(
@ -137,7 +141,7 @@ func (frame *ClockFrame) GetParentSelectorAndDistance() (
parentSelector := new(big.Int).SetBytes(frame.ParentSelector)
pubkey := []byte{}
var pubkey []byte
ed448PublicKey := frame.GetPublicKeySignatureEd448()
if ed448PublicKey != nil {
pubkey = ed448PublicKey.PublicKey.KeyValue
@ -179,7 +183,7 @@ func (frame *ClockFrame) GetPublicKey() ([]byte, error) {
if frame.FrameNumber == 0 {
return make([]byte, 32), nil
}
pubkey := []byte{}
var pubkey []byte
ed448PublicKey := frame.GetPublicKeySignatureEd448()
if ed448PublicKey != nil {
pubkey = ed448PublicKey.PublicKey.KeyValue
@ -197,7 +201,7 @@ func (frame *ClockFrame) GetAddress() ([]byte, error) {
if frame.FrameNumber == 0 {
return make([]byte, 32), nil
}
pubkey := []byte{}
var pubkey []byte
ed448PublicKey := frame.GetPublicKeySignatureEd448()
if ed448PublicKey != nil {
pubkey = ed448PublicKey.PublicKey.KeyValue
@ -217,3 +221,196 @@ func (frame *ClockFrame) GetAddress() ([]byte, error) {
return addressBytes, nil
}
func ProveDataClockFrame(
previousFrame *ClockFrame,
commitments [][]byte,
aggregateProofs []*InclusionAggregateProof,
provingKey crypto.Signer,
difficulty uint32,
) (*ClockFrame, error) {
var pubkey []byte
pubkeyType := keys.KeyTypeEd448
ed448PublicKey, ok := provingKey.Public().(ed448.PublicKey)
if ok {
pubkey = []byte(ed448PublicKey)
} else {
return nil, errors.Wrap(
errors.New("no valid signature provided"),
"prove clock frame",
)
}
h, err := poseidon.HashBytes(pubkey)
if err != nil {
return nil, errors.Wrap(
errors.New("could not hash proving key"),
"prove clock frame",
)
}
address := h.Bytes()
timestamp := time.Now().UnixMilli()
input := []byte{}
input = append(input, previousFrame.Filter...)
input = binary.BigEndian.AppendUint64(input, previousFrame.FrameNumber+1)
input = binary.BigEndian.AppendUint64(input, uint64(timestamp))
input = binary.BigEndian.AppendUint32(input, difficulty)
input = append(input, address...)
input = append(input, previousFrame.Output[:]...)
commitmentInput := []byte{}
for _, commitment := range commitments {
commitmentInput = append(commitmentInput, commitment...)
}
input = append(input, commitmentInput...)
b := sha3.Sum256(input)
v := vdf.New(difficulty, b)
v.Execute()
o := v.GetOutput()
// TODO: make this configurable for signing algorithms that allow
// user-supplied hash functions
signature, err := provingKey.Sign(
rand.Reader,
append(append([]byte{}, b[:]...), o[:]...),
crypto.Hash(0),
)
if err != nil {
return nil, errors.Wrap(
err,
"prove",
)
}
previousSelectorBytes := [516]byte{}
copy(previousSelectorBytes[:], previousFrame.Output[:516])
parent, err := poseidon.HashBytes(previousSelectorBytes[:])
if err != nil {
return nil, errors.Wrap(err, "prove clock frame")
}
frame := &ClockFrame{
Filter: previousFrame.Filter,
FrameNumber: previousFrame.FrameNumber + 1,
Timestamp: timestamp,
Difficulty: difficulty,
ParentSelector: parent.Bytes(),
Input: append(
append([]byte{}, previousFrame.Output...),
commitmentInput...,
),
AggregateProofs: aggregateProofs,
Output: o[:],
}
switch pubkeyType {
case keys.KeyTypeEd448:
frame.PublicKeySignature = &ClockFrame_PublicKeySignatureEd448{
PublicKeySignatureEd448: &Ed448Signature{
Signature: signature,
PublicKey: &Ed448PublicKey{
KeyValue: pubkey,
},
},
}
default:
return nil, errors.Wrap(
errors.New("unsupported proving key"),
"prove clock frame",
)
}
return frame, nil
}
func (frame *ClockFrame) VerifyDataClockFrame() error {
var pubkey []byte
var signature []byte
pubkeyType := keys.KeyTypeEd448
ed448PublicKey := frame.GetPublicKeySignatureEd448()
if ed448PublicKey != nil {
pubkey = ed448PublicKey.PublicKey.KeyValue
signature = ed448PublicKey.Signature
} else {
return errors.Wrap(
errors.New("no valid signature provided"),
"verify clock frame",
)
}
h, err := poseidon.HashBytes(pubkey)
if err != nil {
return errors.Wrap(
errors.New("could not hash proving key"),
"verify clock frame",
)
}
address := h.Bytes()
input := []byte{}
input = append(input, frame.Filter...)
input = binary.BigEndian.AppendUint64(input, frame.FrameNumber)
input = binary.BigEndian.AppendUint64(input, uint64(frame.Timestamp))
input = binary.BigEndian.AppendUint32(input, frame.Difficulty)
input = append(input, address...)
input = append(input, frame.Input...)
if len(frame.Input) < 516 {
return errors.Wrap(
errors.New("invalid input"),
"verify clock frame",
)
}
b := sha3.Sum256(input)
v := vdf.New(frame.Difficulty, b)
proof := [516]byte{}
copy(proof[:], frame.Output)
// TODO: make this configurable for signing algorithms that allow
// user-supplied hash functions
switch pubkeyType {
case keys.KeyTypeEd448:
if len(pubkey) != 57 || len(signature) != 114 || !ed448.VerifyAny(
pubkey,
append(append([]byte{}, b[:]...), frame.Output...),
signature,
crypto.Hash(0),
) {
return errors.Wrap(
errors.New("invalid signature for issuer"),
"verify clock frame",
)
}
}
if !v.Verify(proof) {
return errors.Wrap(
errors.New("invalid proof"),
"verify clock frame",
)
}
previousSelectorBytes := [516]byte{}
copy(previousSelectorBytes[:], frame.Input[:516])
parent, err := poseidon.HashBytes(previousSelectorBytes[:])
if err != nil {
return errors.Wrap(err, "verify clock frame")
}
selector := new(big.Int).SetBytes(frame.ParentSelector)
if parent.Cmp(selector) != 0 {
return errors.Wrap(
errors.New("selector did not match input"),
"verify clock frame",
)
}
return nil
}

View File

@ -598,6 +598,62 @@ func (x *BLS48581G2PrivateKey) GetPublicKey() *BLS48581G2PublicKey {
return nil
}
// Describes a raw BLS48-581 signature, minimal signature size variant
type BLS48581Signature struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` // 74 byte value
PublicKey *BLS48581G2PublicKey `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
}
func (x *BLS48581Signature) Reset() {
*x = BLS48581Signature{}
if protoimpl.UnsafeEnabled {
mi := &file_keys_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *BLS48581Signature) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BLS48581Signature) ProtoMessage() {}
func (x *BLS48581Signature) ProtoReflect() protoreflect.Message {
mi := &file_keys_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BLS48581Signature.ProtoReflect.Descriptor instead.
func (*BLS48581Signature) Descriptor() ([]byte, []int) {
return file_keys_proto_rawDescGZIP(), []int{11}
}
func (x *BLS48581Signature) GetSignature() []byte {
if x != nil {
return x.Signature
}
return nil
}
func (x *BLS48581Signature) GetPublicKey() *BLS48581G2PublicKey {
if x != nil {
return x.PublicKey
}
return nil
}
var File_keys_proto protoreflect.FileDescriptor
var file_keys_proto_rawDesc = []byte{
@ -664,6 +720,14 @@ var file_keys_proto_rawDesc = []byte{
0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34,
0x38, 0x35, 0x38, 0x31, 0x47, 0x32, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52,
0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x7e, 0x0a, 0x11, 0x42, 0x4c,
0x53, 0x34, 0x38, 0x35, 0x38, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12,
0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x4b, 0x0a,
0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x4c, 0x53, 0x34,
0x38, 0x35, 0x38, 0x31, 0x47, 0x32, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52,
0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d,
@ -683,7 +747,7 @@ func file_keys_proto_rawDescGZIP() []byte {
return file_keys_proto_rawDescData
}
var file_keys_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
var file_keys_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_keys_proto_goTypes = []interface{}{
(*Ed448PublicKey)(nil), // 0: quilibrium.node.keys.pb.Ed448PublicKey
(*Ed448PrivateKey)(nil), // 1: quilibrium.node.keys.pb.Ed448PrivateKey
@ -696,6 +760,7 @@ var file_keys_proto_goTypes = []interface{}{
(*BLS48581G1PrivateKey)(nil), // 8: quilibrium.node.keys.pb.BLS48581G1PrivateKey
(*BLS48581G2PublicKey)(nil), // 9: quilibrium.node.keys.pb.BLS48581G2PublicKey
(*BLS48581G2PrivateKey)(nil), // 10: quilibrium.node.keys.pb.BLS48581G2PrivateKey
(*BLS48581Signature)(nil), // 11: quilibrium.node.keys.pb.BLS48581Signature
}
var file_keys_proto_depIdxs = []int32{
0, // 0: quilibrium.node.keys.pb.Ed448PrivateKey.public_key:type_name -> quilibrium.node.keys.pb.Ed448PublicKey
@ -704,11 +769,12 @@ var file_keys_proto_depIdxs = []int32{
5, // 3: quilibrium.node.keys.pb.PCASPrivateKey.public_key:type_name -> quilibrium.node.keys.pb.PCASPublicKey
7, // 4: quilibrium.node.keys.pb.BLS48581G1PrivateKey.public_key:type_name -> quilibrium.node.keys.pb.BLS48581G1PublicKey
9, // 5: quilibrium.node.keys.pb.BLS48581G2PrivateKey.public_key:type_name -> quilibrium.node.keys.pb.BLS48581G2PublicKey
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
9, // 6: quilibrium.node.keys.pb.BLS48581Signature.public_key:type_name -> quilibrium.node.keys.pb.BLS48581G2PublicKey
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_keys_proto_init() }
@ -849,6 +915,18 @@ func file_keys_proto_init() {
return nil
}
}
file_keys_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BLS48581Signature); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
@ -856,7 +934,7 @@ func file_keys_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_keys_proto_rawDesc,
NumEnums: 0,
NumMessages: 11,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},

View File

@ -66,3 +66,9 @@ message BLS48581G2PrivateKey {
bytes key_value = 1; // 73 byte value
BLS48581G2PublicKey public_key = 2;
}
// Describes a raw BLS48-581 signature, minimal signature size variant
message BLS48581Signature {
bytes signature = 1; // 74 byte value
BLS48581G2PublicKey public_key = 2;
}

View File

@ -1,38 +1,51 @@
package protobufs
const (
TypeUrlPrefix = "types.quilibrium.com"
NamespacePrefix = TypeUrlPrefix + "/quilibrium.node."
AppPrefix = NamespacePrefix + "application.pb."
ChannelPrefix = NamespacePrefix + "channel.pb."
ClockPrefix = NamespacePrefix + "clock.pb."
KeysPrefix = NamespacePrefix + "keys.pb."
CeremonyPrefix = NamespacePrefix + "ceremony.pb."
CeremonyTranscriptType = CeremonyPrefix + "CeremonyTranscript"
ApplicationType = AppPrefix + "Application"
ExecutionContextType = AppPrefix + "ExecutionContext"
MessageType = AppPrefix + "Message"
P2PChannelEnvelopeType = ChannelPrefix + "P2PChannelEnvelope"
MessageCiphertextType = ChannelPrefix + "MessageCiphertext"
ProvingKeyAnnouncementType = ChannelPrefix + "ProvingKeyAnnouncement"
ProvingKeyRequestType = ChannelPrefix + "ProvingKeyRequest"
InclusionAggregateProofType = ChannelPrefix + "InclusionAggregateProof"
InclusionCommitmentType = ChannelPrefix + "InclusionCommitment"
KeyBundleAnnouncementType = ChannelPrefix + "KeyBundleAnnouncement"
IdentityKeyType = ChannelPrefix + "IdentityKey"
SignedPreKeyType = ChannelPrefix + "SignedPreKey"
ClockFrameType = ClockPrefix + "ClockFrame"
ClockFramesRequestType = ClockPrefix + "ClockFramesRequest"
ClockFramesResponseType = ClockPrefix + "ClockFramesResponse"
Ed448PublicKeyType = KeysPrefix + "Ed448PublicKey"
Ed448PrivateKeyType = KeysPrefix + "Ed448PrivateKey"
Ed448SignatureType = KeysPrefix + "Ed448Signature"
X448PublicKeyType = KeysPrefix + "X448PublicKey"
X448PrivateKeyType = KeysPrefix + "X448PrivateKey"
PCASPublicKeyType = KeysPrefix + "PCASPublicKey"
PCASPrivateKeyType = KeysPrefix + "PCASPrivateKey"
BLS48581G1PublicKeyType = KeysPrefix + "BLS48581G1PublicKey"
BLS48581G1PrivateKeyType = KeysPrefix + "BLS48581G1PrivateKey"
BLS48581G2PublicKeyType = KeysPrefix + "BLS48581G2PublicKey"
BLS48581G2PrivateKeyType = KeysPrefix + "BLS48581G2PrivateKey"
TypeUrlPrefix = "types.quilibrium.com"
NamespacePrefix = TypeUrlPrefix + "/quilibrium.node."
AppPrefix = NamespacePrefix + "application.pb."
ChannelPrefix = NamespacePrefix + "channel.pb."
ClockPrefix = NamespacePrefix + "clock.pb."
KeysPrefix = NamespacePrefix + "keys.pb."
CeremonyPrefix = NamespacePrefix + "ceremony.pb."
CeremonyTranscriptType = CeremonyPrefix + "CeremonyTranscript"
CeremonyLobbyStateType = CeremonyPrefix + "CeremonyLobbyState"
CeremonySeenProverAttestationType = CeremonyPrefix + "CeremonySeenProverAttestation"
CeremonyDroppedProverAttestationType = CeremonyPrefix + "CeremonyDroppedProverAttestation"
CeremonyTranscriptShareType = CeremonyPrefix + "CeremonyTranscriptShare"
CeremonyTranscriptCommitType = CeremonyPrefix + "CeremonyTranscriptCommit"
CeremonyAdvanceRoundType = CeremonyPrefix + "CeremonyAdvanceRound"
CeremonyLobbyJoinType = CeremonyPrefix + "CeremonyLobbyJoin"
CeremonyLobbyStateTransitionType = CeremonyPrefix + "CeremonyLobbyStateTransition"
CeremonyOpenStateType = CeremonyPrefix + "CeremonyOpenState"
CeremonyInProgressStateType = CeremonyPrefix + "CeremonyInProgressState"
CeremonyFinalizingStateType = CeremonyPrefix + "CeremonyFinalizingState"
CeremonyValidatingStateType = CeremonyPrefix + "CeremonyValidatingState"
ApplicationType = AppPrefix + "Application"
ExecutionContextType = AppPrefix + "ExecutionContext"
MessageType = AppPrefix + "Message"
IntrinsicExecutionOutputType = AppPrefix + "IntrinsicExecutionOutput"
P2PChannelEnvelopeType = ChannelPrefix + "P2PChannelEnvelope"
MessageCiphertextType = ChannelPrefix + "MessageCiphertext"
ProvingKeyAnnouncementType = ChannelPrefix + "ProvingKeyAnnouncement"
ProvingKeyRequestType = ChannelPrefix + "ProvingKeyRequest"
InclusionAggregateProofType = ChannelPrefix + "InclusionAggregateProof"
InclusionCommitmentType = ChannelPrefix + "InclusionCommitment"
KeyBundleAnnouncementType = ChannelPrefix + "KeyBundleAnnouncement"
IdentityKeyType = ChannelPrefix + "IdentityKey"
SignedPreKeyType = ChannelPrefix + "SignedPreKey"
ClockFrameType = ClockPrefix + "ClockFrame"
ClockFramesRequestType = ClockPrefix + "ClockFramesRequest"
ClockFramesResponseType = ClockPrefix + "ClockFramesResponse"
Ed448PublicKeyType = KeysPrefix + "Ed448PublicKey"
Ed448PrivateKeyType = KeysPrefix + "Ed448PrivateKey"
Ed448SignatureType = KeysPrefix + "Ed448Signature"
X448PublicKeyType = KeysPrefix + "X448PublicKey"
X448PrivateKeyType = KeysPrefix + "X448PrivateKey"
PCASPublicKeyType = KeysPrefix + "PCASPublicKey"
PCASPrivateKeyType = KeysPrefix + "PCASPrivateKey"
BLS48581G1PublicKeyType = KeysPrefix + "BLS48581G1PublicKey"
BLS48581G1PrivateKeyType = KeysPrefix + "BLS48581G1PrivateKey"
BLS48581G2PublicKeyType = KeysPrefix + "BLS48581G2PublicKey"
BLS48581G2PrivateKeyType = KeysPrefix + "BLS48581G2PrivateKey"
)

View File

@ -34,7 +34,7 @@ type ClockStore interface {
GetDataClockFrame(
filter []byte,
frameNumber uint64,
) (*protobufs.ClockFrame, error)
) (*protobufs.ClockFrame, *tries.RollingFrecencyCritbitTrie, error)
RangeDataClockFrames(
filter []byte,
startFrameNumber uint64,
@ -52,6 +52,10 @@ type ClockStore interface {
frame *protobufs.ClockFrame,
txn Transaction,
) error
GetCandidateDataClockFrames(
filter []byte,
frameNumber uint64,
) ([]*protobufs.ClockFrame, error)
GetParentDataClockFrame(
filter []byte,
frameNumber uint64,
@ -510,26 +514,39 @@ func (p *PebbleClockStore) PutMasterClockFrame(
func (p *PebbleClockStore) GetDataClockFrame(
filter []byte,
frameNumber uint64,
) (*protobufs.ClockFrame, error) {
) (*protobufs.ClockFrame, *tries.RollingFrecencyCritbitTrie, error) {
value, closer, err := p.db.Get(clockDataFrameKey(filter, frameNumber))
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return nil, ErrNotFound
return nil, nil, ErrNotFound
}
return nil, errors.Wrap(err, "get data clock frame")
return nil, nil, errors.Wrap(err, "get data clock frame")
}
defer closer.Close()
frame := &protobufs.ClockFrame{}
if err := proto.Unmarshal(value, frame); err != nil {
return nil, errors.Wrap(
return nil, nil, errors.Wrap(
errors.Wrap(err, ErrInvalidData.Error()),
"get data clock frame",
)
}
return frame, nil
proverTrie := &tries.RollingFrecencyCritbitTrie{}
trieData, closer, err := p.db.Get(clockProverTrieKey(filter, frameNumber))
if err != nil {
return nil, nil, errors.Wrap(err, "get latest data clock frame")
}
defer closer.Close()
if err := proverTrie.Deserialize(trieData); err != nil {
return nil, nil, errors.Wrap(err, "get latest data clock frame")
}
return frame, proverTrie, nil
}
// GetEarliestDataClockFrame implements ClockStore.
@ -547,7 +564,7 @@ func (p *PebbleClockStore) GetEarliestDataClockFrame(
defer closer.Close()
frameNumber := binary.BigEndian.Uint64(idxValue)
frame, err := p.GetDataClockFrame(filter, frameNumber)
frame, _, err := p.GetDataClockFrame(filter, frameNumber)
if err != nil {
return nil, errors.Wrap(err, "get earliest data clock frame")
}
@ -570,7 +587,7 @@ func (p *PebbleClockStore) GetLatestDataClockFrame(
}
frameNumber := binary.BigEndian.Uint64(idxValue)
frame, err := p.GetDataClockFrame(filter, frameNumber)
frame, _, err := p.GetDataClockFrame(filter, frameNumber)
if err != nil {
return nil, errors.Wrap(err, "get latest data clock frame")
}
@ -743,6 +760,66 @@ func (p *PebbleClockStore) PutDataClockFrame(
return nil
}
// GetCandidateDataClockFrames implements ClockStore.
// Distance is 32-byte aligned, so we just use a 0x00 * 32 -> 0xff * 32 range
func (p *PebbleClockStore) GetCandidateDataClockFrames(
filter []byte,
frameNumber uint64,
) ([]*protobufs.ClockFrame, error) {
iter := p.db.NewIter(&pebble.IterOptions{
LowerBound: clockDataCandidateFrameKey(
filter,
frameNumber,
[]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
[]byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
),
UpperBound: clockDataCandidateFrameKey(
filter,
frameNumber,
[]byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
},
[]byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
},
),
})
frames := []*protobufs.ClockFrame{}
i := &PebbleCandidateClockIterator{i: iter}
for i.First(); i.Valid(); i.Next() {
value, err := i.Value()
if err != nil {
return nil, errors.Wrap(err, "get candidate data clock frames")
}
frames = append(frames, value)
}
if err := i.Close(); err != nil {
return nil, errors.Wrap(err, "get candidate data clock frames")
}
return frames, nil
}
// RangeCandidateDataClockFrames implements ClockStore.
// Distance is 32-byte aligned, so we just use a 0x00 * 32 -> 0xff * 32 range
func (p *PebbleClockStore) RangeCandidateDataClockFrames(

View File

@ -0,0 +1,301 @@
package tries
import (
"bytes"
"encoding/gob"
"encoding/hex"
"strings"
"sync"
"github.com/pkg/errors"
)
type RewardNode struct {
Internal *RewardInternalNode
External *RewardExternalNode
}
type RewardInternalNode struct {
Child [2]RewardNode
ByteNumber uint32
Bits byte
}
type RewardExternalNode struct {
Key []byte
EarliestFrame uint64
LatestFrame uint64
Total uint64
}
type RewardCritbitTrie struct {
Root *RewardNode
mu sync.RWMutex
}
func (t *RewardCritbitTrie) Serialize() ([]byte, error) {
t.mu.RLock()
defer t.mu.RUnlock()
var b bytes.Buffer
enc := gob.NewEncoder(&b)
if err := enc.Encode(t.Root); err != nil {
return nil, errors.Wrap(err, "serialize")
}
return b.Bytes(), nil
}
func (t *RewardCritbitTrie) Deserialize(buf []byte) error {
t.mu.Lock()
defer t.mu.Unlock()
var b bytes.Buffer
b.Write(buf)
dec := gob.NewDecoder(&b)
if err := dec.Decode(&t.Root); err != nil {
return errors.Wrap(err, "deserialize")
}
return nil
}
func (t *RewardCritbitTrie) Contains(address []byte) bool {
t.mu.RLock()
defer t.mu.RUnlock()
p := t.findNearest(address)
return p != nil &&
p.External != nil &&
bytes.Equal(p.External.Key, address)
}
func (t *RewardCritbitTrie) Get(
address []byte,
) (earliestFrame uint64, latestFrame uint64, total uint64) {
t.mu.RLock()
defer t.mu.RUnlock()
p := t.findNearest(address)
if p != nil &&
p.External != nil &&
bytes.Equal(p.External.Key, address) {
return p.External.EarliestFrame, p.External.LatestFrame, p.External.Total
}
return 0, 0, 0
}
func (t *RewardCritbitTrie) findNearest(
address []byte,
) *RewardNode {
blen := uint32(len(address))
p := t.Root
if p == nil {
return nil
}
for p.Internal != nil {
right := p.Internal.ByteNumber < blen &&
address[p.Internal.ByteNumber]&p.Internal.Bits != 0
if right {
p = &p.Internal.Child[1]
} else {
p = &p.Internal.Child[0]
}
}
return p
}
func (t *RewardCritbitTrie) Add(
address []byte,
latestFrame uint64,
reward uint64,
) {
t.mu.Lock()
defer t.mu.Unlock()
if t.Root == nil {
t.Root = &RewardNode{
External: &RewardExternalNode{
Key: address,
EarliestFrame: latestFrame,
LatestFrame: latestFrame,
Total: reward,
},
}
return
}
p := t.findNearest(address)
byteNumber, bits := p.critBit(address)
if byteNumber < 0 {
if p.External.LatestFrame < latestFrame {
p.External.LatestFrame = latestFrame
}
if p.External.EarliestFrame > latestFrame {
p.External.EarliestFrame = latestFrame
}
p.External.Total += reward
return
}
node := &RewardInternalNode{
ByteNumber: uint32(byteNumber),
Bits: bits,
}
blen := uint32(len(address))
right := node.ByteNumber < blen &&
address[node.ByteNumber]&node.Bits != 0
e := &RewardExternalNode{
Key: address,
EarliestFrame: latestFrame,
LatestFrame: latestFrame,
Total: reward,
}
if right {
node.Child[1].External = e
} else {
node.Child[0].External = e
}
p = t.Root
for m := p.Internal; m != nil; m = p.Internal {
if m.ByteNumber > uint32(byteNumber) ||
m.ByteNumber == uint32(byteNumber) && m.Bits < bits {
break
}
if m.ByteNumber < blen && address[m.ByteNumber]&m.Bits != 0 {
p = &m.Child[1]
} else {
p = &m.Child[0]
}
}
if p.Internal != nil {
// inverse the direction
if right {
node.Child[0].Internal = p.Internal
} else {
node.Child[1].Internal = p.Internal
}
} else {
if right {
node.Child[0].External = p.External
} else {
node.Child[1].External = p.External
p.External = nil
}
}
p.Internal = node
}
func (t *RewardCritbitTrie) Remove(address []byte) {
t.mu.Lock()
defer t.mu.Unlock()
if t.Root == nil {
return
}
blen := uint32(len(address))
var gp *RewardNode
p := t.Root
var right bool
for m := p.Internal; m != nil; m = p.Internal {
right = p.Internal.ByteNumber < blen &&
address[p.Internal.ByteNumber]&p.Internal.Bits != 0
if right {
gp, p = p, &m.Child[1]
} else {
gp, p = p, &m.Child[0]
}
}
if !bytes.Equal(p.External.Key, address) {
return
}
if gp == nil {
p.External = nil
} else {
if right {
gp.External, gp.Internal = gp.Internal.Child[0].External,
gp.Internal.Child[0].Internal
} else {
gp.External, gp.Internal = gp.Internal.Child[1].External,
gp.Internal.Child[1].Internal
}
}
}
func (n *RewardNode) String() string {
if n.External != nil {
return hex.EncodeToString(n.External.Key)
} else {
nodes := []string{}
for i := range n.Internal.Child {
nodes = append(nodes, n.Internal.Child[i].String())
}
return strings.Join(nodes, ",")
}
}
func (n *RewardNode) Bits() []byte {
if n.External != nil {
return n.External.Key
} else {
return nil
}
}
func (n *RewardNode) Info() (latestFrame uint64, total uint64) {
if n.External != nil {
return n.External.LatestFrame, n.External.Total
} else {
return 0, 0
}
}
func (n *RewardNode) critBit(
address []byte,
) (byteNumber int, bits byte) {
smallestLen := len(n.External.Key)
if len(address) < smallestLen {
smallestLen = len(address)
}
for byteNumber = 0; byteNumber < smallestLen; byteNumber++ {
if l, r := address[byteNumber], n.External.Key[byteNumber]; l != r {
b := l ^ r
b |= b >> 1
b |= b >> 2
b |= b >> 4
bits = b &^ (b >> 1)
return
}
}
if len(n.External.Key) < len(address) {
b := address[byteNumber]
b |= b >> 1
b |= b >> 2
b |= b >> 4
bits = b &^ (b >> 1)
} else if len(n.External.Key) > len(address) {
b := n.External.Key[byteNumber]
b |= b >> 1
b |= b >> 2
b |= b >> 4
bits = b &^ (b >> 1)
} else {
byteNumber = -1
}
return
}