mirror of
https://source.quilibrium.com/quilibrium/ceremonyclient.git
synced 2024-11-20 15:15:18 +00:00
v1.2.0 (#31)
This commit is contained in:
parent
a0c1feb0bc
commit
2e2a1e4789
@ -7,6 +7,7 @@
|
|||||||
package curves
|
package curves
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"arena"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -47,9 +48,9 @@ func (s *ScalarBls48581) Random(reader io.Reader) Scalar {
|
|||||||
func (s *ScalarBls48581) Hash(bytes []byte) Scalar {
|
func (s *ScalarBls48581) Hash(bytes []byte) Scalar {
|
||||||
DST := []byte("BLS_SIG_BLS48581G1_XMD:SHA-512_SVDW_RO_NUL_")
|
DST := []byte("BLS_SIG_BLS48581G1_XMD:SHA-512_SVDW_RO_NUL_")
|
||||||
u := bls48581.Hash_to_field(ext.MC_SHA2, bls48581.HASH_TYPE, DST, bytes, 2)
|
u := bls48581.Hash_to_field(ext.MC_SHA2, bls48581.HASH_TYPE, DST, bytes, 2)
|
||||||
u[0].Add(u[1])
|
u[0].Add(u[1], nil)
|
||||||
b := u[0].Redc()
|
b := u[0].Redc(nil)
|
||||||
b.Mod(bls48581.NewBIGints(bls48581.CURVE_Order))
|
b.Mod(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: b,
|
Value: b,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -58,14 +59,14 @@ func (s *ScalarBls48581) Hash(bytes []byte) Scalar {
|
|||||||
|
|
||||||
func (s *ScalarBls48581) Zero() Scalar {
|
func (s *ScalarBls48581) Zero() Scalar {
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: bls48581.NewBIGint(0),
|
Value: bls48581.NewBIGint(0, nil),
|
||||||
point: s.point,
|
point: s.point,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) One() Scalar {
|
func (s *ScalarBls48581) One() Scalar {
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: bls48581.NewBIGint(1),
|
Value: bls48581.NewBIGint(1, nil),
|
||||||
point: s.point,
|
point: s.point,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -75,7 +76,7 @@ func (s *ScalarBls48581) IsZero() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) IsOne() bool {
|
func (s *ScalarBls48581) IsOne() bool {
|
||||||
t := bls48581.NewBIGint(1)
|
t := bls48581.NewBIGint(1, nil)
|
||||||
t.Sub(s.Value)
|
t.Sub(s.Value)
|
||||||
return t.IsZero()
|
return t.IsZero()
|
||||||
}
|
}
|
||||||
@ -94,15 +95,15 @@ func (s *ScalarBls48581) IsEven() bool {
|
|||||||
|
|
||||||
func (s *ScalarBls48581) New(value int) Scalar {
|
func (s *ScalarBls48581) New(value int) Scalar {
|
||||||
if value > 0 {
|
if value > 0 {
|
||||||
t := bls48581.NewBIGint(value)
|
t := bls48581.NewBIGint(value, nil)
|
||||||
t.Mod(bls48581.NewBIGints(bls48581.CURVE_Order))
|
t.Mod(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: t,
|
Value: t,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
t := bls48581.NewBIGint(-value)
|
t := bls48581.NewBIGint(-value, nil)
|
||||||
v := bls48581.NewBIGints(bls48581.CURVE_Order)
|
v := bls48581.NewBIGints(bls48581.CURVE_Order, nil)
|
||||||
v.Sub(t)
|
v.Sub(t)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: v,
|
Value: v,
|
||||||
@ -121,8 +122,8 @@ func (s *ScalarBls48581) Cmp(rhs Scalar) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) Square() Scalar {
|
func (s *ScalarBls48581) Square() Scalar {
|
||||||
sqr := bls48581.NewBIGcopy(s.Value)
|
sqr := bls48581.NewBIGcopy(s.Value, nil)
|
||||||
sqr = bls48581.Modsqr(sqr, bls48581.NewBIGints(bls48581.CURVE_Order))
|
sqr = bls48581.Modsqr(sqr, bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: sqr,
|
Value: sqr,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -130,8 +131,13 @@ func (s *ScalarBls48581) Square() Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) Double() Scalar {
|
func (s *ScalarBls48581) Double() Scalar {
|
||||||
dbl := bls48581.NewBIGcopy(s.Value)
|
dbl := bls48581.NewBIGcopy(s.Value, nil)
|
||||||
dbl = bls48581.Modmul(dbl, bls48581.NewBIGint(2), bls48581.NewBIGints(bls48581.CURVE_Order))
|
dbl = bls48581.Modmul(
|
||||||
|
dbl,
|
||||||
|
bls48581.NewBIGint(2, nil),
|
||||||
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil),
|
||||||
|
nil,
|
||||||
|
)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: dbl,
|
Value: dbl,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -139,8 +145,8 @@ func (s *ScalarBls48581) Double() Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) Invert() (Scalar, error) {
|
func (s *ScalarBls48581) Invert() (Scalar, error) {
|
||||||
v := bls48581.NewBIGcopy(s.Value)
|
v := bls48581.NewBIGcopy(s.Value, nil)
|
||||||
v.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order))
|
v.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order, nil))
|
||||||
if v == nil {
|
if v == nil {
|
||||||
return nil, fmt.Errorf("inverse doesn't exist")
|
return nil, fmt.Errorf("inverse doesn't exist")
|
||||||
}
|
}
|
||||||
@ -155,9 +161,9 @@ func (s *ScalarBls48581) Sqrt() (Scalar, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) Cube() Scalar {
|
func (s *ScalarBls48581) Cube() Scalar {
|
||||||
value := bls48581.NewBIGcopy(s.Value)
|
value := bls48581.NewBIGcopy(s.Value, nil)
|
||||||
value = bls48581.Modsqr(value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
value = bls48581.Modsqr(value, bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
|
||||||
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -167,8 +173,11 @@ func (s *ScalarBls48581) Cube() Scalar {
|
|||||||
func (s *ScalarBls48581) Add(rhs Scalar) Scalar {
|
func (s *ScalarBls48581) Add(rhs Scalar) Scalar {
|
||||||
r, ok := rhs.(*ScalarBls48581)
|
r, ok := rhs.(*ScalarBls48581)
|
||||||
if ok {
|
if ok {
|
||||||
value := bls48581.NewBIGcopy(s.Value)
|
mem := arena.NewArena()
|
||||||
value = bls48581.ModAdd(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
defer mem.Free()
|
||||||
|
value := bls48581.NewBIGcopy(s.Value, mem)
|
||||||
|
value = bls48581.ModAdd(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
|
||||||
|
value = bls48581.NewBIGcopy(value, nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -181,9 +190,12 @@ func (s *ScalarBls48581) Add(rhs Scalar) Scalar {
|
|||||||
func (s *ScalarBls48581) Sub(rhs Scalar) Scalar {
|
func (s *ScalarBls48581) Sub(rhs Scalar) Scalar {
|
||||||
r, ok := rhs.(*ScalarBls48581)
|
r, ok := rhs.(*ScalarBls48581)
|
||||||
if ok {
|
if ok {
|
||||||
value := bls48581.NewBIGcopy(r.Value)
|
mem := arena.NewArena()
|
||||||
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
defer mem.Free()
|
||||||
value = bls48581.ModAdd(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
value := bls48581.NewBIGcopy(r.Value, mem)
|
||||||
|
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
|
||||||
|
value = bls48581.ModAdd(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
|
||||||
|
value = bls48581.NewBIGcopy(value, nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -196,8 +208,11 @@ func (s *ScalarBls48581) Sub(rhs Scalar) Scalar {
|
|||||||
func (s *ScalarBls48581) Mul(rhs Scalar) Scalar {
|
func (s *ScalarBls48581) Mul(rhs Scalar) Scalar {
|
||||||
r, ok := rhs.(*ScalarBls48581)
|
r, ok := rhs.(*ScalarBls48581)
|
||||||
if ok {
|
if ok {
|
||||||
value := bls48581.NewBIGcopy(s.Value)
|
mem := arena.NewArena()
|
||||||
value = bls48581.Modmul(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
defer mem.Free()
|
||||||
|
value := bls48581.NewBIGcopy(s.Value, mem)
|
||||||
|
value = bls48581.Modmul(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
|
||||||
|
value = bls48581.NewBIGcopy(value, nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -214,9 +229,12 @@ func (s *ScalarBls48581) MulAdd(y, z Scalar) Scalar {
|
|||||||
func (s *ScalarBls48581) Div(rhs Scalar) Scalar {
|
func (s *ScalarBls48581) Div(rhs Scalar) Scalar {
|
||||||
r, ok := rhs.(*ScalarBls48581)
|
r, ok := rhs.(*ScalarBls48581)
|
||||||
if ok {
|
if ok {
|
||||||
value := bls48581.NewBIGcopy(r.Value)
|
mem := arena.NewArena()
|
||||||
value.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order))
|
defer mem.Free()
|
||||||
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
value := bls48581.NewBIGcopy(r.Value, mem)
|
||||||
|
value.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order, mem))
|
||||||
|
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
|
||||||
|
value = bls48581.NewBIGcopy(value, nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -227,8 +245,11 @@ func (s *ScalarBls48581) Div(rhs Scalar) Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) Neg() Scalar {
|
func (s *ScalarBls48581) Neg() Scalar {
|
||||||
value := bls48581.NewBIGcopy(s.Value)
|
mem := arena.NewArena()
|
||||||
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order))
|
defer mem.Free()
|
||||||
|
value := bls48581.NewBIGcopy(s.Value, mem)
|
||||||
|
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
|
||||||
|
value = bls48581.NewBIGcopy(value, nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -244,7 +265,7 @@ func (s *ScalarBls48581) SetBigInt(v *big.Int) (Scalar, error) {
|
|||||||
copy(t[bls48581.MODBYTES-uint(len(b)):], b)
|
copy(t[bls48581.MODBYTES-uint(len(b)):], b)
|
||||||
|
|
||||||
i := bls48581.FromBytes(t)
|
i := bls48581.FromBytes(t)
|
||||||
i.Mod(bls48581.NewBIGints(bls48581.CURVE_Order))
|
i.Mod(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: i,
|
Value: i,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -298,7 +319,7 @@ func (s *ScalarBls48581) Point() Point {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) Clone() Scalar {
|
func (s *ScalarBls48581) Clone() Scalar {
|
||||||
value := bls48581.NewBIGcopy(s.Value)
|
value := bls48581.NewBIGcopy(s.Value, nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: s.point,
|
point: s.point,
|
||||||
@ -306,7 +327,7 @@ func (s *ScalarBls48581) Clone() Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) SetPoint(p Point) PairingScalar {
|
func (s *ScalarBls48581) SetPoint(p Point) PairingScalar {
|
||||||
value := bls48581.NewBIGcopy(s.Value)
|
value := bls48581.NewBIGcopy(s.Value, nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: p,
|
point: p,
|
||||||
@ -314,7 +335,7 @@ func (s *ScalarBls48581) SetPoint(p Point) PairingScalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581) Order() *big.Int {
|
func (s *ScalarBls48581) Order() *big.Int {
|
||||||
b := bls48581.NewBIGints(bls48581.CURVE_Order)
|
b := bls48581.NewBIGints(bls48581.CURVE_Order, nil)
|
||||||
bytes := make([]byte, bls48581.MODBYTES)
|
bytes := make([]byte, bls48581.MODBYTES)
|
||||||
b.ToBytes(bytes)
|
b.ToBytes(bytes)
|
||||||
return new(big.Int).SetBytes(bytes)
|
return new(big.Int).SetBytes(bytes)
|
||||||
@ -369,7 +390,7 @@ func (p *PointBls48581G1) Hash(bytes []byte) Point {
|
|||||||
|
|
||||||
func (p *PointBls48581G1) Identity() Point {
|
func (p *PointBls48581G1) Identity() Point {
|
||||||
g1 := bls48581.ECP_generator()
|
g1 := bls48581.ECP_generator()
|
||||||
g1 = g1.Mul(bls48581.NewBIGint(0))
|
g1 = g1.Mul(bls48581.NewBIGint(0, nil), nil, nil)
|
||||||
return &PointBls48581G1{
|
return &PointBls48581G1{
|
||||||
Value: g1,
|
Value: g1,
|
||||||
}
|
}
|
||||||
@ -384,7 +405,7 @@ func (p *PointBls48581G1) Generator() Point {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) IsIdentity() bool {
|
func (p *PointBls48581G1) IsIdentity() bool {
|
||||||
return p.Value.Is_infinity()
|
return p.Value.Is_infinity(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) IsNegative() bool {
|
func (p *PointBls48581G1) IsNegative() bool {
|
||||||
@ -395,18 +416,18 @@ func (p *PointBls48581G1) IsNegative() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) IsOnCurve() bool {
|
func (p *PointBls48581G1) IsOnCurve() bool {
|
||||||
return bls48581.G1member(p.Value)
|
return bls48581.G1member(p.Value, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) Double() Point {
|
func (p *PointBls48581G1) Double() Point {
|
||||||
v := bls48581.NewECP()
|
v := bls48581.NewECP(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Dbl()
|
v.Dbl(nil)
|
||||||
return &PointBls48581G1{v}
|
return &PointBls48581G1{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) Scalar() Scalar {
|
func (p *PointBls48581G1) Scalar() Scalar {
|
||||||
value := bls48581.NewBIG()
|
value := bls48581.NewBIG(nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: new(PointBls48581G1),
|
point: new(PointBls48581G1),
|
||||||
@ -414,9 +435,9 @@ func (p *PointBls48581G1) Scalar() Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) Neg() Point {
|
func (p *PointBls48581G1) Neg() Point {
|
||||||
v := bls48581.NewECP()
|
v := bls48581.NewECP(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Neg()
|
v.Neg(nil)
|
||||||
return &PointBls48581G1{v}
|
return &PointBls48581G1{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -426,9 +447,9 @@ func (p *PointBls48581G1) Add(rhs Point) Point {
|
|||||||
}
|
}
|
||||||
r, ok := rhs.(*PointBls48581G1)
|
r, ok := rhs.(*PointBls48581G1)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewECP()
|
v := bls48581.NewECP(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Add(r.Value)
|
v.Add(r.Value, nil)
|
||||||
return &PointBls48581G1{v}
|
return &PointBls48581G1{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -441,9 +462,9 @@ func (p *PointBls48581G1) Sub(rhs Point) Point {
|
|||||||
}
|
}
|
||||||
r, ok := rhs.(*PointBls48581G1)
|
r, ok := rhs.(*PointBls48581G1)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewECP()
|
v := bls48581.NewECP(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Sub(r.Value)
|
v.Sub(r.Value, nil)
|
||||||
return &PointBls48581G1{v}
|
return &PointBls48581G1{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -456,9 +477,11 @@ func (p *PointBls48581G1) Mul(rhs Scalar) Point {
|
|||||||
}
|
}
|
||||||
r, ok := rhs.(*ScalarBls48581)
|
r, ok := rhs.(*ScalarBls48581)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewECP()
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
v := bls48581.NewECP(mem)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v = v.Mul(r.Value)
|
v = v.Mul(r.Value, nil, mem)
|
||||||
return &PointBls48581G1{v}
|
return &PointBls48581G1{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -481,7 +504,7 @@ func (p *PointBls48581G1) Set(x, y *big.Int) (Point, error) {
|
|||||||
y.FillBytes(yBytes)
|
y.FillBytes(yBytes)
|
||||||
xBig := bls48581.FromBytes(xBytes)
|
xBig := bls48581.FromBytes(xBytes)
|
||||||
yBig := bls48581.FromBytes(yBytes)
|
yBig := bls48581.FromBytes(yBytes)
|
||||||
v := bls48581.NewECPbigs(xBig, yBig)
|
v := bls48581.NewECPbigs(xBig, yBig, nil)
|
||||||
if v == nil {
|
if v == nil {
|
||||||
return nil, fmt.Errorf("invalid coordinates")
|
return nil, fmt.Errorf("invalid coordinates")
|
||||||
}
|
}
|
||||||
@ -504,7 +527,7 @@ func (p *PointBls48581G1) FromAffineCompressed(bytes []byte) (Point, error) {
|
|||||||
var b [bls48581.MODBYTES + 1]byte
|
var b [bls48581.MODBYTES + 1]byte
|
||||||
copy(b[:], bytes)
|
copy(b[:], bytes)
|
||||||
value := bls48581.ECP_fromBytes(b[:])
|
value := bls48581.ECP_fromBytes(b[:])
|
||||||
if value == nil || value.Is_infinity() {
|
if value == nil || value.Is_infinity(nil) {
|
||||||
return nil, errors.New("could not decode")
|
return nil, errors.New("could not decode")
|
||||||
}
|
}
|
||||||
return &PointBls48581G1{value}, nil
|
return &PointBls48581G1{value}, nil
|
||||||
@ -514,7 +537,7 @@ func (p *PointBls48581G1) FromAffineUncompressed(bytes []byte) (Point, error) {
|
|||||||
var b [bls48581.MODBYTES*2 + 1]byte
|
var b [bls48581.MODBYTES*2 + 1]byte
|
||||||
copy(b[:], bytes)
|
copy(b[:], bytes)
|
||||||
value := bls48581.ECP_fromBytes(b[:])
|
value := bls48581.ECP_fromBytes(b[:])
|
||||||
if value == nil || value.Is_infinity() {
|
if value == nil || value.Is_infinity(nil) {
|
||||||
return nil, errors.New("could not decode")
|
return nil, errors.New("could not decode")
|
||||||
}
|
}
|
||||||
return &PointBls48581G1{value}, nil
|
return &PointBls48581G1{value}, nil
|
||||||
@ -541,8 +564,10 @@ func (p *PointBls48581G1) SumOfProducts(points []Point, scalars []Scalar) Point
|
|||||||
}
|
}
|
||||||
nScalars[i] = s.Value
|
nScalars[i] = s.Value
|
||||||
}
|
}
|
||||||
value := bls48581.ECP_muln(len(points), nPoints, nScalars)
|
mem := arena.NewArena()
|
||||||
if value == nil || value.Is_infinity() {
|
defer mem.Free()
|
||||||
|
value := bls48581.ECP_muln(len(points), nPoints, nScalars, mem)
|
||||||
|
if value == nil || value.Is_infinity(mem) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &PointBls48581G1{value}
|
return &PointBls48581G1{value}
|
||||||
@ -563,77 +588,60 @@ func (p *PointBls48581G1) Pairing(rhs PairingPoint) Scalar {
|
|||||||
return &ScalarBls48581Gt{pair}
|
return &ScalarBls48581Gt{pair}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *PointBls48581G1) Ate2Pairing(
|
||||||
|
rhs *PointBls48581G2,
|
||||||
|
lhs2 *PointBls48581G1,
|
||||||
|
rhs2 *PointBls48581G2,
|
||||||
|
) Scalar {
|
||||||
|
ate2 := bls48581.Ate2(rhs2.Value, p.Value, rhs2.Value, lhs2.Value)
|
||||||
|
|
||||||
|
return &ScalarBls48581Gt{ate2}
|
||||||
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) MultiPairing(points ...PairingPoint) Scalar {
|
func (p *PointBls48581G1) MultiPairing(points ...PairingPoint) Scalar {
|
||||||
return bls48multiPairing(points...)
|
return bls48multiPairing(points...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) X() *big.Int {
|
func (p *PointBls48581G1) X() *big.Int {
|
||||||
bytes := make([]byte, bls48581.MODBYTES)
|
bytes := make([]byte, bls48581.MODBYTES)
|
||||||
p.Value.GetX().ToBytes(bytes[:])
|
p.Value.GetX(nil).ToBytes(bytes[:])
|
||||||
return new(big.Int).SetBytes(bytes)
|
return new(big.Int).SetBytes(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) Y() *big.Int {
|
func (p *PointBls48581G1) Y() *big.Int {
|
||||||
bytes := make([]byte, bls48581.MODBYTES)
|
bytes := make([]byte, bls48581.MODBYTES)
|
||||||
p.Value.GetY().ToBytes(bytes[:])
|
p.Value.GetY(nil).ToBytes(bytes[:])
|
||||||
return new(big.Int).SetBytes(bytes)
|
return new(big.Int).SetBytes(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) Modulus() *big.Int {
|
func (p *PointBls48581G1) Modulus() *big.Int {
|
||||||
b := bls48581.NewBIGints(bls48581.Modulus)
|
b := bls48581.NewBIGints(bls48581.Modulus, nil)
|
||||||
bytes := make([]byte, bls48581.MODBYTES)
|
bytes := make([]byte, bls48581.MODBYTES)
|
||||||
b.ToBytes(bytes)
|
b.ToBytes(bytes)
|
||||||
return new(big.Int).SetBytes(bytes)
|
return new(big.Int).SetBytes(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) MarshalBinary() ([]byte, error) {
|
func (p *PointBls48581G1) MarshalBinary() ([]byte, error) {
|
||||||
return pointMarshalBinary(p)
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) UnmarshalBinary(input []byte) error {
|
func (p *PointBls48581G1) UnmarshalBinary(input []byte) error {
|
||||||
pt, err := pointUnmarshalBinary(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ppt, ok := pt.(*PointBls48581G1)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid point")
|
|
||||||
}
|
|
||||||
p.Value = ppt.Value
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) MarshalText() ([]byte, error) {
|
func (p *PointBls48581G1) MarshalText() ([]byte, error) {
|
||||||
return pointMarshalText(p)
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) UnmarshalText(input []byte) error {
|
func (p *PointBls48581G1) UnmarshalText(input []byte) error {
|
||||||
pt, err := pointUnmarshalText(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ppt, ok := pt.(*PointBls48581G1)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid point")
|
|
||||||
}
|
|
||||||
p.Value = ppt.Value
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) MarshalJSON() ([]byte, error) {
|
func (p *PointBls48581G1) MarshalJSON() ([]byte, error) {
|
||||||
return pointMarshalJson(p)
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G1) UnmarshalJSON(input []byte) error {
|
func (p *PointBls48581G1) UnmarshalJSON(input []byte) error {
|
||||||
pt, err := pointUnmarshalJson(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
P, ok := pt.(*PointBls48581G1)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid type")
|
|
||||||
}
|
|
||||||
p.Value = P.Value
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -646,15 +654,15 @@ func (p *PointBls48581G2) Random(reader io.Reader) Point {
|
|||||||
func (p *PointBls48581G2) Hash(bytes []byte) Point {
|
func (p *PointBls48581G2) Hash(bytes []byte) Point {
|
||||||
DST := []byte("BLS_SIG_BLS48581G2_XMD:SHA-512_SVDW_RO_NUL_")
|
DST := []byte("BLS_SIG_BLS48581G2_XMD:SHA-512_SVDW_RO_NUL_")
|
||||||
u := bls48581.Hash_to_field(ext.MC_SHA2, bls48581.HASH_TYPE, DST, bytes, 2)
|
u := bls48581.Hash_to_field(ext.MC_SHA2, bls48581.HASH_TYPE, DST, bytes, 2)
|
||||||
u[0].Add(u[1])
|
u[0].Add(u[1], nil)
|
||||||
fp8 := bls48581.NewFP8fp(u[0])
|
fp8 := bls48581.NewFP8fp(u[0], nil)
|
||||||
v := bls48581.ECP8_map2point(fp8)
|
v := bls48581.ECP8_map2point(fp8)
|
||||||
return &PointBls48581G2{v}
|
return &PointBls48581G2{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) Identity() Point {
|
func (p *PointBls48581G2) Identity() Point {
|
||||||
g2 := bls48581.ECP8_generator()
|
g2 := bls48581.ECP8_generator()
|
||||||
g2 = g2.Mul(bls48581.NewBIGint(0))
|
g2 = g2.Mul(bls48581.NewBIGint(0, nil), nil)
|
||||||
return &PointBls48581G2{
|
return &PointBls48581G2{
|
||||||
Value: g2,
|
Value: g2,
|
||||||
}
|
}
|
||||||
@ -669,7 +677,7 @@ func (p *PointBls48581G2) Generator() Point {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) IsIdentity() bool {
|
func (p *PointBls48581G2) IsIdentity() bool {
|
||||||
return p.Value.Is_infinity()
|
return p.Value.Is_infinity(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) IsNegative() bool {
|
func (p *PointBls48581G2) IsNegative() bool {
|
||||||
@ -680,18 +688,18 @@ func (p *PointBls48581G2) IsNegative() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) IsOnCurve() bool {
|
func (p *PointBls48581G2) IsOnCurve() bool {
|
||||||
return bls48581.G2member(p.Value)
|
return bls48581.G2member(p.Value, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) Double() Point {
|
func (p *PointBls48581G2) Double() Point {
|
||||||
v := bls48581.NewECP8()
|
v := bls48581.NewECP8(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Dbl()
|
v.Dbl(nil)
|
||||||
return &PointBls48581G2{v}
|
return &PointBls48581G2{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) Scalar() Scalar {
|
func (p *PointBls48581G2) Scalar() Scalar {
|
||||||
value := bls48581.NewBIG()
|
value := bls48581.NewBIG(nil)
|
||||||
return &ScalarBls48581{
|
return &ScalarBls48581{
|
||||||
Value: value,
|
Value: value,
|
||||||
point: new(PointBls48581G2),
|
point: new(PointBls48581G2),
|
||||||
@ -699,9 +707,9 @@ func (p *PointBls48581G2) Scalar() Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) Neg() Point {
|
func (p *PointBls48581G2) Neg() Point {
|
||||||
v := bls48581.NewECP8()
|
v := bls48581.NewECP8(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Neg()
|
v.Neg(nil)
|
||||||
return &PointBls48581G2{v}
|
return &PointBls48581G2{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -711,9 +719,9 @@ func (p *PointBls48581G2) Add(rhs Point) Point {
|
|||||||
}
|
}
|
||||||
r, ok := rhs.(*PointBls48581G2)
|
r, ok := rhs.(*PointBls48581G2)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewECP8()
|
v := bls48581.NewECP8(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Add(r.Value)
|
v.Add(r.Value, nil)
|
||||||
return &PointBls48581G2{v}
|
return &PointBls48581G2{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -726,9 +734,9 @@ func (p *PointBls48581G2) Sub(rhs Point) Point {
|
|||||||
}
|
}
|
||||||
r, ok := rhs.(*PointBls48581G2)
|
r, ok := rhs.(*PointBls48581G2)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewECP8()
|
v := bls48581.NewECP8(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
v.Sub(r.Value)
|
v.Sub(r.Value, nil)
|
||||||
return &PointBls48581G2{v}
|
return &PointBls48581G2{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -741,11 +749,11 @@ func (p *PointBls48581G2) Mul(rhs Scalar) Point {
|
|||||||
}
|
}
|
||||||
r, ok := rhs.(*ScalarBls48581)
|
r, ok := rhs.(*ScalarBls48581)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewECP8()
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
v := bls48581.NewECP8(nil)
|
||||||
v.Copy(p.Value)
|
v.Copy(p.Value)
|
||||||
bytes := make([]byte, bls48581.MODBYTES)
|
v = v.Mul(r.Value, mem)
|
||||||
r.Value.ToBytes(bytes)
|
|
||||||
v = v.Mul(bls48581.FromBytes(bytes))
|
|
||||||
return &PointBls48581G2{v}
|
return &PointBls48581G2{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -768,8 +776,8 @@ func (p *PointBls48581G2) Set(x, y *big.Int) (Point, error) {
|
|||||||
y.FillBytes(yBytes)
|
y.FillBytes(yBytes)
|
||||||
xBig := bls48581.FP8_fromBytes(xBytes)
|
xBig := bls48581.FP8_fromBytes(xBytes)
|
||||||
yBig := bls48581.FP8_fromBytes(yBytes)
|
yBig := bls48581.FP8_fromBytes(yBytes)
|
||||||
v := bls48581.NewECP8fp8s(xBig, yBig)
|
v := bls48581.NewECP8fp8s(xBig, yBig, nil)
|
||||||
if v == nil || v.Is_infinity() {
|
if v == nil || v.Is_infinity(nil) {
|
||||||
return nil, fmt.Errorf("invalid coordinates")
|
return nil, fmt.Errorf("invalid coordinates")
|
||||||
}
|
}
|
||||||
return &PointBls48581G2{v}, nil
|
return &PointBls48581G2{v}, nil
|
||||||
@ -791,7 +799,7 @@ func (p *PointBls48581G2) FromAffineCompressed(bytes []byte) (Point, error) {
|
|||||||
var b [bls48581.MODBYTES*8 + 1]byte
|
var b [bls48581.MODBYTES*8 + 1]byte
|
||||||
copy(b[:], bytes)
|
copy(b[:], bytes)
|
||||||
value := bls48581.ECP8_fromBytes(b[:])
|
value := bls48581.ECP8_fromBytes(b[:])
|
||||||
if value == nil || value.Is_infinity() {
|
if value == nil || value.Is_infinity(nil) {
|
||||||
return nil, errors.New("could not decode")
|
return nil, errors.New("could not decode")
|
||||||
}
|
}
|
||||||
return &PointBls48581G2{value}, nil
|
return &PointBls48581G2{value}, nil
|
||||||
@ -801,7 +809,7 @@ func (p *PointBls48581G2) FromAffineUncompressed(bytes []byte) (Point, error) {
|
|||||||
var b [bls48581.MODBYTES*16 + 1]byte
|
var b [bls48581.MODBYTES*16 + 1]byte
|
||||||
copy(b[:], bytes)
|
copy(b[:], bytes)
|
||||||
value := bls48581.ECP8_fromBytes(b[:])
|
value := bls48581.ECP8_fromBytes(b[:])
|
||||||
if value == nil || value.Is_infinity() {
|
if value == nil || value.Is_infinity(nil) {
|
||||||
return nil, errors.New("could not decode")
|
return nil, errors.New("could not decode")
|
||||||
}
|
}
|
||||||
return &PointBls48581G2{value}, nil
|
return &PointBls48581G2{value}, nil
|
||||||
@ -828,8 +836,8 @@ func (p *PointBls48581G2) SumOfProducts(points []Point, scalars []Scalar) Point
|
|||||||
}
|
}
|
||||||
nScalars[i] = s.Value
|
nScalars[i] = s.Value
|
||||||
}
|
}
|
||||||
value := bls48581.Mul16(nPoints, nScalars)
|
value := bls48581.Mul16(nPoints, nScalars, nil)
|
||||||
if value == nil || value.Is_infinity() {
|
if value == nil || value.Is_infinity(nil) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &PointBls48581G2{value}
|
return &PointBls48581G2{value}
|
||||||
@ -855,74 +863,47 @@ func (p *PointBls48581G2) MultiPairing(points ...PairingPoint) Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) X() *big.Int {
|
func (p *PointBls48581G2) X() *big.Int {
|
||||||
x := p.Value.GetX()
|
x := p.Value.GetX(nil)
|
||||||
bytes := make([]byte, 8*bls48581.MODBYTES)
|
bytes := make([]byte, 8*bls48581.MODBYTES)
|
||||||
x.ToBytes(bytes)
|
x.ToBytes(bytes)
|
||||||
return new(big.Int).SetBytes(bytes)
|
return new(big.Int).SetBytes(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) Y() *big.Int {
|
func (p *PointBls48581G2) Y() *big.Int {
|
||||||
y := p.Value.GetY()
|
y := p.Value.GetY(nil)
|
||||||
bytes := make([]byte, 8*bls48581.MODBYTES)
|
bytes := make([]byte, 8*bls48581.MODBYTES)
|
||||||
y.ToBytes(bytes)
|
y.ToBytes(bytes)
|
||||||
return new(big.Int).SetBytes(bytes)
|
return new(big.Int).SetBytes(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) Modulus() *big.Int {
|
func (p *PointBls48581G2) Modulus() *big.Int {
|
||||||
b := bls48581.NewBIGints(bls48581.Modulus)
|
b := bls48581.NewBIGints(bls48581.Modulus, nil)
|
||||||
bytes := make([]byte, bls48581.MODBYTES)
|
bytes := make([]byte, bls48581.MODBYTES)
|
||||||
b.ToBytes(bytes)
|
b.ToBytes(bytes)
|
||||||
return new(big.Int).SetBytes(bytes)
|
return new(big.Int).SetBytes(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) MarshalBinary() ([]byte, error) {
|
func (p *PointBls48581G2) MarshalBinary() ([]byte, error) {
|
||||||
return pointMarshalBinary(p)
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) UnmarshalBinary(input []byte) error {
|
func (p *PointBls48581G2) UnmarshalBinary(input []byte) error {
|
||||||
pt, err := pointUnmarshalBinary(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ppt, ok := pt.(*PointBls48581G2)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid point")
|
|
||||||
}
|
|
||||||
p.Value = ppt.Value
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) MarshalText() ([]byte, error) {
|
func (p *PointBls48581G2) MarshalText() ([]byte, error) {
|
||||||
return pointMarshalText(p)
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) UnmarshalText(input []byte) error {
|
func (p *PointBls48581G2) UnmarshalText(input []byte) error {
|
||||||
pt, err := pointUnmarshalText(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ppt, ok := pt.(*PointBls48581G2)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid point")
|
|
||||||
}
|
|
||||||
p.Value = ppt.Value
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) MarshalJSON() ([]byte, error) {
|
func (p *PointBls48581G2) MarshalJSON() ([]byte, error) {
|
||||||
return pointMarshalJson(p)
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PointBls48581G2) UnmarshalJSON(input []byte) error {
|
func (p *PointBls48581G2) UnmarshalJSON(input []byte) error {
|
||||||
pt, err := pointUnmarshalJson(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
P, ok := pt.(*PointBls48581G2)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid type")
|
|
||||||
}
|
|
||||||
p.Value = P.Value
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -931,21 +912,25 @@ func bls48multiPairing(points ...PairingPoint) Scalar {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
valid := true
|
valid := true
|
||||||
r := bls48581.Initmp()
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
r := bls48581.Initmp(mem)
|
||||||
for i := 0; i < len(points); i += 2 {
|
for i := 0; i < len(points); i += 2 {
|
||||||
pt1, ok := points[i].(*PointBls48581G1)
|
pt1, ok := points[i].(*PointBls48581G1)
|
||||||
valid = valid && ok
|
valid = valid && ok
|
||||||
pt2, ok := points[i+1].(*PointBls48581G2)
|
pt2, ok := points[i+1].(*PointBls48581G2)
|
||||||
valid = valid && ok
|
valid = valid && ok
|
||||||
if valid {
|
if valid {
|
||||||
bls48581.Another(r, pt2.Value, pt1.Value)
|
inner := arena.NewArena()
|
||||||
|
bls48581.Another(r, pt2.Value, pt1.Value, inner)
|
||||||
|
inner.Free()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !valid {
|
if !valid {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
v := bls48581.Miller(r)
|
v := bls48581.Miller(r, mem)
|
||||||
v = bls48581.Fexp(v)
|
v = bls48581.Fexp(v)
|
||||||
return &ScalarBls48581Gt{v}
|
return &ScalarBls48581Gt{v}
|
||||||
}
|
}
|
||||||
@ -973,15 +958,15 @@ func (s *ScalarBls48581Gt) Hash(bytes []byte) Scalar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) Zero() Scalar {
|
func (s *ScalarBls48581Gt) Zero() Scalar {
|
||||||
return &ScalarBls48581Gt{bls48581.NewFP48int(0)}
|
return &ScalarBls48581Gt{bls48581.NewFP48int(0, nil)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) One() Scalar {
|
func (s *ScalarBls48581Gt) One() Scalar {
|
||||||
return &ScalarBls48581Gt{bls48581.NewFP48int(1)}
|
return &ScalarBls48581Gt{bls48581.NewFP48int(1, nil)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) IsZero() bool {
|
func (s *ScalarBls48581Gt) IsZero() bool {
|
||||||
return s.Value.IsZero()
|
return s.Value.IsZero(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) IsOne() bool {
|
func (s *ScalarBls48581Gt) IsOne() bool {
|
||||||
@ -1034,7 +1019,7 @@ func (s *ScalarBls48581Gt) IsEven() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) New(input int) Scalar {
|
func (s *ScalarBls48581Gt) New(input int) Scalar {
|
||||||
fp := bls48581.NewFP48int(input)
|
fp := bls48581.NewFP48int(input, nil)
|
||||||
return &ScalarBls48581Gt{fp}
|
return &ScalarBls48581Gt{fp}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1048,20 +1033,20 @@ func (s *ScalarBls48581Gt) Cmp(rhs Scalar) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) Square() Scalar {
|
func (s *ScalarBls48581Gt) Square() Scalar {
|
||||||
v := bls48581.NewFP48copy(s.Value)
|
v := bls48581.NewFP48copy(s.Value, nil)
|
||||||
v.Sqr()
|
v.Sqr(nil)
|
||||||
return &ScalarBls48581Gt{v}
|
return &ScalarBls48581Gt{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) Double() Scalar {
|
func (s *ScalarBls48581Gt) Double() Scalar {
|
||||||
v := bls48581.NewFP48copy(s.Value)
|
v := bls48581.NewFP48copy(s.Value, nil)
|
||||||
v.Mul(bls48581.NewFP48int(2))
|
v.Mul(bls48581.NewFP48int(2, nil), nil)
|
||||||
return &ScalarBls48581Gt{v}
|
return &ScalarBls48581Gt{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) Invert() (Scalar, error) {
|
func (s *ScalarBls48581Gt) Invert() (Scalar, error) {
|
||||||
v := bls48581.NewFP48copy(s.Value)
|
v := bls48581.NewFP48copy(s.Value, nil)
|
||||||
v.Invert()
|
v.Invert(nil)
|
||||||
if v == nil {
|
if v == nil {
|
||||||
return nil, fmt.Errorf("not invertible")
|
return nil, fmt.Errorf("not invertible")
|
||||||
}
|
}
|
||||||
@ -1074,9 +1059,9 @@ func (s *ScalarBls48581Gt) Sqrt() (Scalar, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) Cube() Scalar {
|
func (s *ScalarBls48581Gt) Cube() Scalar {
|
||||||
v := bls48581.NewFP48copy(s.Value)
|
v := bls48581.NewFP48copy(s.Value, nil)
|
||||||
v.Sqr()
|
v.Sqr(nil)
|
||||||
v.Mul(s.Value)
|
v.Mul(s.Value, nil)
|
||||||
return &ScalarBls48581Gt{v}
|
return &ScalarBls48581Gt{v}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1093,8 +1078,8 @@ func (s *ScalarBls48581Gt) Sub(rhs Scalar) Scalar {
|
|||||||
func (s *ScalarBls48581Gt) Mul(rhs Scalar) Scalar {
|
func (s *ScalarBls48581Gt) Mul(rhs Scalar) Scalar {
|
||||||
r, ok := rhs.(*ScalarBls48581Gt)
|
r, ok := rhs.(*ScalarBls48581Gt)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewFP48copy(s.Value)
|
v := bls48581.NewFP48copy(s.Value, nil)
|
||||||
v.Mul(r.Value)
|
v.Mul(r.Value, nil)
|
||||||
return &ScalarBls48581Gt{v}
|
return &ScalarBls48581Gt{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -1108,9 +1093,9 @@ func (s *ScalarBls48581Gt) MulAdd(y, z Scalar) Scalar {
|
|||||||
func (s *ScalarBls48581Gt) Div(rhs Scalar) Scalar {
|
func (s *ScalarBls48581Gt) Div(rhs Scalar) Scalar {
|
||||||
r, ok := rhs.(*ScalarBls48581Gt)
|
r, ok := rhs.(*ScalarBls48581Gt)
|
||||||
if ok {
|
if ok {
|
||||||
v := bls48581.NewFP48copy(r.Value)
|
v := bls48581.NewFP48copy(r.Value, nil)
|
||||||
v.Invert()
|
v.Invert(nil)
|
||||||
v.Mul(s.Value)
|
v.Mul(s.Value, nil)
|
||||||
return &ScalarBls48581Gt{v}
|
return &ScalarBls48581Gt{v}
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -1160,7 +1145,7 @@ func (s *ScalarBls48581Gt) SetBytesWide(bytes []byte) (Scalar, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ScalarBls48581Gt) Clone() Scalar {
|
func (s *ScalarBls48581Gt) Clone() Scalar {
|
||||||
fp := bls48581.NewFP48copy(s.Value)
|
fp := bls48581.NewFP48copy(s.Value, nil)
|
||||||
return &ScalarBls48581Gt{
|
return &ScalarBls48581Gt{
|
||||||
Value: fp,
|
Value: fp,
|
||||||
}
|
}
|
||||||
|
@ -78,9 +78,9 @@ func TestScalarBls48581G1Invert(t *testing.T) {
|
|||||||
nine := bls48581G1.Scalar.New(9)
|
nine := bls48581G1.Scalar.New(9)
|
||||||
actual, _ := nine.Invert()
|
actual, _ := nine.Invert()
|
||||||
sa, _ := actual.(*ScalarBls48581)
|
sa, _ := actual.(*ScalarBls48581)
|
||||||
expected, err := bls48581G1.Scalar.SetBigInt(bhex("ab22a52d6e7108e9eabb0e17e8139cf4b9392413a05486ec3dcef3b90bea3db988c1478b9ec2b4f1382ab890f18c0c9a0f85d504cc493f9b79f8c84e41d01ae5070000000000000000"))
|
expected, err := bls48581G1.Scalar.SetBigInt(bhex("000000000000000007e51ad0414ec8f8799b3f49cc04d5850f9a0c8cf190b82a38f1b4c29e8b47c188b93dea0bb9f3ce3dec8654a0132439b9f49c13e8170ebbeae908716e2da522ab"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, sa.Cmp(expected), 0)
|
require.Equal(t, sa.Value.ToString(), expected.(*ScalarBls48581).Value.ToString())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestScalarBls48581G1Add(t *testing.T) {
|
func TestScalarBls48581G1Add(t *testing.T) {
|
||||||
@ -91,11 +91,11 @@ func TestScalarBls48581G1Add(t *testing.T) {
|
|||||||
require.NotNil(t, fifteen)
|
require.NotNil(t, fifteen)
|
||||||
expected := bls48581G1.Scalar.New(15)
|
expected := bls48581G1.Scalar.New(15)
|
||||||
require.Equal(t, expected.Cmp(fifteen), 0)
|
require.Equal(t, expected.Cmp(fifteen), 0)
|
||||||
qq := bls48581.NewBIGints(bls48581.CURVE_Order)
|
qq := bls48581.NewBIGints(bls48581.CURVE_Order, nil)
|
||||||
qq.Sub(bls48581.NewBIGint(3))
|
qq.Sub(bls48581.NewBIGint(3, nil))
|
||||||
|
|
||||||
upper := &ScalarBls48581{
|
upper := &ScalarBls48581{
|
||||||
Value: bls48581.NewBIGcopy(qq),
|
Value: bls48581.NewBIGcopy(qq, nil),
|
||||||
}
|
}
|
||||||
actual := upper.Add(nine)
|
actual := upper.Add(nine)
|
||||||
require.NotNil(t, actual)
|
require.NotNil(t, actual)
|
||||||
@ -106,8 +106,8 @@ func TestScalarBls48581G1Sub(t *testing.T) {
|
|||||||
bls48581G1 := BLS48581G1()
|
bls48581G1 := BLS48581G1()
|
||||||
nine := bls48581G1.Scalar.New(9)
|
nine := bls48581G1.Scalar.New(9)
|
||||||
six := bls48581G1.Scalar.New(6)
|
six := bls48581G1.Scalar.New(6)
|
||||||
n := bls48581.NewFPbig(bls48581.NewBIGints(bls48581.CURVE_Order))
|
n := bls48581.NewFPbig(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
|
||||||
n.Sub(bls48581.NewFPint(3))
|
n.Sub(bls48581.NewFPint(3, nil), nil)
|
||||||
|
|
||||||
expected := bls48581G1.Scalar.New(0).Sub(bls48581G1.Scalar.New(3))
|
expected := bls48581G1.Scalar.New(0).Sub(bls48581G1.Scalar.New(3))
|
||||||
actual := six.Sub(nine)
|
actual := six.Sub(nine)
|
||||||
@ -138,7 +138,7 @@ func TestScalarBls48581G1Serialize(t *testing.T) {
|
|||||||
sc := bls48581G1.Scalar.New(255)
|
sc := bls48581G1.Scalar.New(255)
|
||||||
sequence := sc.Bytes()
|
sequence := sc.Bytes()
|
||||||
require.Equal(t, len(sequence), 73)
|
require.Equal(t, len(sequence), 73)
|
||||||
require.Equal(t, sequence, []byte{0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
|
require.Equal(t, sequence, []byte{0x00, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff})
|
||||||
ret, err := bls48581G1.Scalar.SetBytes(sequence)
|
ret, err := bls48581G1.Scalar.SetBytes(sequence)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, ret.Cmp(sc), 0)
|
require.Equal(t, ret.Cmp(sc), 0)
|
||||||
|
@ -575,11 +575,11 @@ func BLS48581G1() *Curve {
|
|||||||
func bls48581g1Init() {
|
func bls48581g1Init() {
|
||||||
bls48581g1 = Curve{
|
bls48581g1 = Curve{
|
||||||
Scalar: &ScalarBls48581{
|
Scalar: &ScalarBls48581{
|
||||||
Value: bls48581.NewBIGint(1),
|
Value: bls48581.NewBIGint(1, nil),
|
||||||
point: new(PointBls48581G1),
|
point: new(PointBls48581G1),
|
||||||
},
|
},
|
||||||
Point: new(PointBls48581G1).Identity(),
|
Point: new(PointBls48581G1).Identity(),
|
||||||
Name: BLS12381G1Name,
|
Name: BLS48581G1Name,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -592,7 +592,7 @@ func BLS48581G2() *Curve {
|
|||||||
func bls48581g2Init() {
|
func bls48581g2Init() {
|
||||||
bls48581g2 = Curve{
|
bls48581g2 = Curve{
|
||||||
Scalar: &ScalarBls48581{
|
Scalar: &ScalarBls48581{
|
||||||
Value: bls48581.NewBIGint(1),
|
Value: bls48581.NewBIGint(1, nil),
|
||||||
point: new(PointBls48581G2),
|
point: new(PointBls48581G2),
|
||||||
},
|
},
|
||||||
Point: new(PointBls48581G2).Identity(),
|
Point: new(PointBls48581G2).Identity(),
|
||||||
@ -603,7 +603,7 @@ func bls48581g2Init() {
|
|||||||
func BLS48581(preferredPoint Point) *PairingCurve {
|
func BLS48581(preferredPoint Point) *PairingCurve {
|
||||||
return &PairingCurve{
|
return &PairingCurve{
|
||||||
Scalar: &ScalarBls48581{
|
Scalar: &ScalarBls48581{
|
||||||
Value: bls48581.NewBIG(),
|
Value: bls48581.NewBIG(nil),
|
||||||
point: preferredPoint,
|
point: preferredPoint,
|
||||||
},
|
},
|
||||||
PointG1: &PointBls48581G1{
|
PointG1: &PointBls48581G1{
|
||||||
@ -613,7 +613,7 @@ func BLS48581(preferredPoint Point) *PairingCurve {
|
|||||||
Value: bls48581.ECP8_generator(),
|
Value: bls48581.ECP8_generator(),
|
||||||
},
|
},
|
||||||
GT: &ScalarBls48581Gt{
|
GT: &ScalarBls48581Gt{
|
||||||
Value: bls48581.NewFP48int(1),
|
Value: bls48581.NewFP48int(1, nil),
|
||||||
},
|
},
|
||||||
Name: BLS48581Name,
|
Name: BLS48581Name,
|
||||||
}
|
}
|
||||||
@ -863,38 +863,40 @@ type sswuParams struct {
|
|||||||
// Let `n` be a number of point-scalar pairs.
|
// Let `n` be a number of point-scalar pairs.
|
||||||
// Let `w` be a window of bits (6..8, chosen based on `n`, see cost factor).
|
// Let `w` be a window of bits (6..8, chosen based on `n`, see cost factor).
|
||||||
//
|
//
|
||||||
// 1. Prepare `2^(w-1) - 1` buckets with indices `[1..2^(w-1))` initialized with identity points.
|
// 1. Prepare `2^(w-1) - 1` buckets with indices `[1..2^(w-1))` initialized with identity points.
|
||||||
// Bucket 0 is not needed as it would contain points multiplied by 0.
|
// Bucket 0 is not needed as it would contain points multiplied by 0.
|
||||||
// 2. Convert scalars to a radix-`2^w` representation with signed digits in `[-2^w/2, 2^w/2]`.
|
// 2. Convert scalars to a radix-`2^w` representation with signed digits in `[-2^w/2, 2^w/2]`.
|
||||||
// Note: only the last digit may equal `2^w/2`.
|
// Note: only the last digit may equal `2^w/2`.
|
||||||
// 3. Starting with the last window, for each point `i=[0..n)` add it to a a bucket indexed by
|
// 3. Starting with the last window, for each point `i=[0..n)` add it to a a bucket indexed by
|
||||||
// the point's scalar's value in the window.
|
// the point's scalar's value in the window.
|
||||||
// 4. Once all points in a window are sorted into buckets, add buckets by multiplying each
|
// 4. Once all points in a window are sorted into buckets, add buckets by multiplying each
|
||||||
// by their index. Efficient way of doing it is to start with the last bucket and compute two sums:
|
// by their index. Efficient way of doing it is to start with the last bucket and compute two sums:
|
||||||
// intermediate sum from the last to the first, and the full sum made of all intermediate sums.
|
// intermediate sum from the last to the first, and the full sum made of all intermediate sums.
|
||||||
// 5. Shift the resulting sum of buckets by `w` bits by using `w` doublings.
|
// 5. Shift the resulting sum of buckets by `w` bits by using `w` doublings.
|
||||||
// 6. Add to the return value.
|
// 6. Add to the return value.
|
||||||
// 7. Repeat the loop.
|
// 7. Repeat the loop.
|
||||||
//
|
//
|
||||||
// Approximate cost w/o wNAF optimizations (A = addition, D = doubling):
|
// Approximate cost w/o wNAF optimizations (A = addition, D = doubling):
|
||||||
//
|
//
|
||||||
// ```ascii
|
// ```ascii
|
||||||
// cost = (n*A + 2*(2^w/2)*A + w*D + A)*256/w
|
// cost = (n*A + 2*(2^w/2)*A + w*D + A)*256/w
|
||||||
// | | | | |
|
//
|
||||||
// | | | | looping over 256/w windows
|
// | | | | |
|
||||||
// | | | adding to the result
|
// | | | | looping over 256/w windows
|
||||||
// sorting points | shifting the sum by w bits (to the next window, starting from last window)
|
// | | | adding to the result
|
||||||
// one by one |
|
// sorting points | shifting the sum by w bits (to the next window, starting from last window)
|
||||||
// into buckets adding/subtracting all buckets
|
// one by one |
|
||||||
// multiplied by their indexes
|
// into buckets adding/subtracting all buckets
|
||||||
// using a sum of intermediate sums
|
// multiplied by their indexes
|
||||||
|
// using a sum of intermediate sums
|
||||||
|
//
|
||||||
// ```
|
// ```
|
||||||
//
|
//
|
||||||
// For large `n`, dominant factor is (n*256/w) additions.
|
// For large `n`, dominant factor is (n*256/w) additions.
|
||||||
// However, if `w` is too big and `n` is not too big, then `(2^w/2)*A` could dominate.
|
// However, if `w` is too big and `n` is not too big, then `(2^w/2)*A` could dominate.
|
||||||
// Therefore, the optimal choice of `w` grows slowly as `n` grows.
|
// Therefore, the optimal choice of `w` grows slowly as `n` grows.
|
||||||
//
|
//
|
||||||
// For constant time we use a fixed window of 6
|
// # For constant time we use a fixed window of 6
|
||||||
//
|
//
|
||||||
// This algorithm is adapted from section 4 of <https://eprint.iacr.org/2012/549.pdf>.
|
// This algorithm is adapted from section 4 of <https://eprint.iacr.org/2012/549.pdf>.
|
||||||
// and https://cacr.uwaterloo.ca/techreports/2010/cacr2010-26.pdf
|
// and https://cacr.uwaterloo.ca/techreports/2010/cacr2010-26.pdf
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build js && wasm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
*
|
*
|
28
nekryptology/pkg/core/curves/native/bls48581/arch_64.go
Normal file
28
nekryptology/pkg/core/curves/native/bls48581/arch_64.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
//go:build !js && !wasm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
|
*
|
||||||
|
* This file is part of MIRACL Core
|
||||||
|
* (see https://github.com/miracl/core).
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* core BIG number class */
|
||||||
|
|
||||||
|
package bls48581
|
||||||
|
|
||||||
|
type Chunk int64
|
||||||
|
|
||||||
|
const CHUNK int = 64 /* Set word size */
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build js && wasm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
*
|
*
|
999
nekryptology/pkg/core/curves/native/bls48581/big_64.go
Normal file
999
nekryptology/pkg/core/curves/native/bls48581/big_64.go
Normal file
@ -0,0 +1,999 @@
|
|||||||
|
//go:build !js && !wasm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
|
*
|
||||||
|
* This file is part of MIRACL Core
|
||||||
|
* (see https://github.com/miracl/core).
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* core BIG number class */
|
||||||
|
|
||||||
|
package bls48581
|
||||||
|
|
||||||
|
import (
|
||||||
|
"arena"
|
||||||
|
"math/bits"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
||||||
|
)
|
||||||
|
|
||||||
|
//import "fmt"
|
||||||
|
|
||||||
|
type BIG struct {
|
||||||
|
w [NLEN]Chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
type DBIG struct {
|
||||||
|
w [2 * NLEN]Chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
/***************** 64-bit specific code ****************/
|
||||||
|
|
||||||
|
/* First the 32/64-bit dependent BIG code */
|
||||||
|
/* Note that because of the lack of a 128-bit integer, 32 and 64-bit code needs to be done differently */
|
||||||
|
|
||||||
|
/* return a*b as DBIG */
|
||||||
|
func mul(a *BIG, b *BIG, mem *arena.Arena) *DBIG {
|
||||||
|
c := NewDBIG(mem)
|
||||||
|
carry := Chunk(0)
|
||||||
|
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
carry = 0
|
||||||
|
for j := 0; j < NLEN; j++ {
|
||||||
|
carry, c.w[i+j] = mulAdd(a.w[i], b.w[j], carry, c.w[i+j])
|
||||||
|
}
|
||||||
|
c.w[NLEN+i] = carry
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return a^2 as DBIG */
|
||||||
|
func sqr(a *BIG, mem *arena.Arena) *DBIG {
|
||||||
|
c := NewDBIG(mem)
|
||||||
|
carry := Chunk(0)
|
||||||
|
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
carry = 0
|
||||||
|
for j := i + 1; j < NLEN; j++ {
|
||||||
|
//if a.w[i]<0 {fmt.Printf("Negative m i in sqr\n")}
|
||||||
|
//if a.w[j]<0 {fmt.Printf("Negative m j in sqr\n")}
|
||||||
|
carry, c.w[i+j] = mulAdd(2*a.w[i], a.w[j], carry, c.w[i+j])
|
||||||
|
}
|
||||||
|
c.w[NLEN+i] = carry
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
//if a.w[i]<0 {fmt.Printf("Negative m s in sqr\n")}
|
||||||
|
top, bot := mulAdd(a.w[i], a.w[i], 0, c.w[2*i])
|
||||||
|
|
||||||
|
c.w[2*i] = bot
|
||||||
|
c.w[2*i+1] += top
|
||||||
|
}
|
||||||
|
c.norm()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func monty(md *BIG, mc Chunk, d *DBIG, mem *arena.Arena) *BIG {
|
||||||
|
carry := Chunk(0)
|
||||||
|
m := Chunk(0)
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
if mc == -1 {
|
||||||
|
m = (-d.w[i]) & BMASK
|
||||||
|
} else {
|
||||||
|
if mc == 1 {
|
||||||
|
m = d.w[i]
|
||||||
|
} else {
|
||||||
|
m = (mc * d.w[i]) & BMASK
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
carry = 0
|
||||||
|
for j := 0; j < NLEN; j++ {
|
||||||
|
carry, d.w[i+j] = mulAdd(m, md.w[j], carry, d.w[i+j])
|
||||||
|
//if m<0 {fmt.Printf("Negative m in monty\n")}
|
||||||
|
//if md.w[j]<0 {fmt.Printf("Negative m in monty\n")}
|
||||||
|
}
|
||||||
|
d.w[NLEN+i] += carry
|
||||||
|
}
|
||||||
|
|
||||||
|
b := NewBIG(mem)
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
b.w[i] = d.w[NLEN+i]
|
||||||
|
}
|
||||||
|
b.norm()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
/* set this[i]+=x*y+c, and return high part */
|
||||||
|
func mulAdd(a Chunk, b Chunk, c Chunk, r Chunk) (Chunk, Chunk) {
|
||||||
|
|
||||||
|
tp, bt := bits.Mul64(uint64(a), uint64(b)) // use math/bits intrinsic
|
||||||
|
bot := Chunk(bt & uint64(BMASK))
|
||||||
|
top := Chunk((tp << (64 - BASEBITS)) | (bt >> BASEBITS))
|
||||||
|
bot += c
|
||||||
|
bot += r
|
||||||
|
carry := bot >> BASEBITS
|
||||||
|
bot &= BMASK
|
||||||
|
top += carry
|
||||||
|
return top, bot
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/************************************************************/
|
||||||
|
|
||||||
|
func (r *BIG) get(i int) Chunk {
|
||||||
|
return r.w[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) set(i int, x Chunk) {
|
||||||
|
r.w[i] = x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) xortop(x Chunk) {
|
||||||
|
r.w[NLEN-1] ^= x
|
||||||
|
}
|
||||||
|
|
||||||
|
/* normalise BIG - force all digits < 2^BASEBITS */
|
||||||
|
func (r *BIG) norm() Chunk {
|
||||||
|
carry := Chunk(0)
|
||||||
|
for i := 0; i < NLEN-1; i++ {
|
||||||
|
d := r.w[i] + carry
|
||||||
|
r.w[i] = d & BMASK
|
||||||
|
carry = d >> BASEBITS
|
||||||
|
}
|
||||||
|
r.w[NLEN-1] = (r.w[NLEN-1] + carry)
|
||||||
|
return (r.w[NLEN-1] >> ((8 * MODBYTES) % BASEBITS))
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Shift right by less than a word */
|
||||||
|
func (r *BIG) fshr(k uint) int {
|
||||||
|
w := r.w[0] & ((Chunk(1) << k) - 1) /* shifted out part */
|
||||||
|
for i := 0; i < NLEN-1; i++ {
|
||||||
|
r.w[i] = (r.w[i] >> k) | ((r.w[i+1] << (BASEBITS - k)) & BMASK)
|
||||||
|
}
|
||||||
|
r.w[NLEN-1] = r.w[NLEN-1] >> k
|
||||||
|
return int(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Shift right by less than a word */
|
||||||
|
func (r *BIG) fshl(k uint) int {
|
||||||
|
r.w[NLEN-1] = (r.w[NLEN-1] << k) | (r.w[NLEN-2] >> (BASEBITS - k))
|
||||||
|
for i := NLEN - 2; i > 0; i-- {
|
||||||
|
r.w[i] = ((r.w[i] << k) & BMASK) | (r.w[i-1] >> (BASEBITS - k))
|
||||||
|
}
|
||||||
|
r.w[0] = (r.w[0] << k) & BMASK
|
||||||
|
return int(r.w[NLEN-1] >> ((8 * MODBYTES) % BASEBITS)) /* return excess - only used in ff.c */
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBIG(mem *arena.Arena) *BIG {
|
||||||
|
var b *BIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[BIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(BIG)
|
||||||
|
}
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
b.w[i] = 0
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBIGints(x [NLEN]Chunk, mem *arena.Arena) *BIG {
|
||||||
|
var b *BIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[BIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(BIG)
|
||||||
|
}
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
b.w[i] = x[i]
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBIGint(x int, mem *arena.Arena) *BIG {
|
||||||
|
var b *BIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[BIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(BIG)
|
||||||
|
}
|
||||||
|
b.w[0] = Chunk(x)
|
||||||
|
for i := 1; i < NLEN; i++ {
|
||||||
|
b.w[i] = 0
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBIGcopy(x *BIG, mem *arena.Arena) *BIG {
|
||||||
|
var b *BIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[BIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(BIG)
|
||||||
|
}
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
b.w[i] = x.w[i]
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBIGdcopy(x *DBIG, mem *arena.Arena) *BIG {
|
||||||
|
var b *BIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[BIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(BIG)
|
||||||
|
}
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
b.w[i] = x.w[i]
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
/* test for zero */
|
||||||
|
func (r *BIG) IsZero() bool {
|
||||||
|
d := Chunk(0)
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
d |= r.w[i]
|
||||||
|
}
|
||||||
|
return (1 & ((d - 1) >> BASEBITS)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/* set to zero */
|
||||||
|
func (r *BIG) zero() {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Test for equal to one */
|
||||||
|
func (r *BIG) isunity() bool {
|
||||||
|
d := Chunk(0)
|
||||||
|
for i := 1; i < NLEN; i++ {
|
||||||
|
d |= r.w[i]
|
||||||
|
}
|
||||||
|
return (1 & ((d - 1) >> BASEBITS) & (((r.w[0] ^ 1) - 1) >> BASEBITS)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/* set to one */
|
||||||
|
func (r *BIG) one() {
|
||||||
|
r.w[0] = 1
|
||||||
|
for i := 1; i < NLEN; i++ {
|
||||||
|
r.w[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Copy from another BIG */
|
||||||
|
func (r *BIG) copy(x *BIG) {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] = x.w[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Copy from another DBIG */
|
||||||
|
func (r *BIG) dcopy(x *DBIG) {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] = x.w[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Conditional swap of two bigs depending on d using XOR - no branches */
|
||||||
|
func (r *BIG) cswap(b *BIG, d int) Chunk {
|
||||||
|
c := Chunk(-d)
|
||||||
|
s := Chunk(0)
|
||||||
|
v := r.w[0] ^ b.w[1]
|
||||||
|
va := v + v
|
||||||
|
va >>= 1
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
t := c & (r.w[i] ^ b.w[i])
|
||||||
|
t ^= v
|
||||||
|
e := r.w[i] ^ t
|
||||||
|
s ^= e // to force calculation of e
|
||||||
|
r.w[i] = e ^ va
|
||||||
|
e = b.w[i] ^ t
|
||||||
|
s ^= e
|
||||||
|
b.w[i] = e ^ va
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) cmove(g *BIG, d int) Chunk {
|
||||||
|
b := Chunk(-d)
|
||||||
|
s := Chunk(0)
|
||||||
|
v := r.w[0] ^ g.w[1]
|
||||||
|
va := v + v
|
||||||
|
va >>= 1
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
t := (r.w[i] ^ g.w[i]) & b
|
||||||
|
t ^= v
|
||||||
|
e := r.w[i] ^ t
|
||||||
|
s ^= e
|
||||||
|
r.w[i] = e ^ va
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
/* general shift right */
|
||||||
|
func (r *BIG) shr(k uint) {
|
||||||
|
n := (k % BASEBITS)
|
||||||
|
m := int(k / BASEBITS)
|
||||||
|
for i := 0; i < NLEN-m-1; i++ {
|
||||||
|
r.w[i] = (r.w[m+i] >> n) | ((r.w[m+i+1] << (BASEBITS - n)) & BMASK)
|
||||||
|
}
|
||||||
|
r.w[NLEN-m-1] = r.w[NLEN-1] >> n
|
||||||
|
for i := NLEN - m; i < NLEN; i++ {
|
||||||
|
r.w[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* general shift left */
|
||||||
|
func (r *BIG) shl(k uint) {
|
||||||
|
n := k % BASEBITS
|
||||||
|
m := int(k / BASEBITS)
|
||||||
|
|
||||||
|
r.w[NLEN-1] = (r.w[NLEN-1-m] << n)
|
||||||
|
if NLEN >= m+2 {
|
||||||
|
r.w[NLEN-1] |= (r.w[NLEN-m-2] >> (BASEBITS - n))
|
||||||
|
}
|
||||||
|
for i := NLEN - 2; i > m; i-- {
|
||||||
|
r.w[i] = ((r.w[i-m] << n) & BMASK) | (r.w[i-m-1] >> (BASEBITS - n))
|
||||||
|
}
|
||||||
|
r.w[m] = (r.w[0] << n) & BMASK
|
||||||
|
for i := 0; i < m; i++ {
|
||||||
|
r.w[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return number of bits */
|
||||||
|
func (r *BIG) nbits() int {
|
||||||
|
t := NewBIGcopy(r, nil)
|
||||||
|
k := NLEN - 1
|
||||||
|
t.norm()
|
||||||
|
for k >= 0 && t.w[k] == 0 {
|
||||||
|
k--
|
||||||
|
}
|
||||||
|
if k < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
bts := int(BASEBITS) * k
|
||||||
|
c := t.w[k]
|
||||||
|
for c != 0 {
|
||||||
|
c /= 2
|
||||||
|
bts++
|
||||||
|
}
|
||||||
|
return bts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) Nbits() int {
|
||||||
|
return r.nbits()
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Convert to Hex String */
|
||||||
|
func (r *BIG) ToString() string {
|
||||||
|
s := ""
|
||||||
|
len := r.nbits()
|
||||||
|
|
||||||
|
if len%4 == 0 {
|
||||||
|
len /= 4
|
||||||
|
} else {
|
||||||
|
len /= 4
|
||||||
|
len++
|
||||||
|
|
||||||
|
}
|
||||||
|
MB := int(MODBYTES * 2)
|
||||||
|
if len < MB {
|
||||||
|
len = MB
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := len - 1; i >= 0; i-- {
|
||||||
|
b := NewBIGcopy(r, nil)
|
||||||
|
|
||||||
|
b.shr(uint(i * 4))
|
||||||
|
s += strconv.FormatInt(int64(b.w[0]&15), 16)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) Add(x *BIG) {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] = r.w[i] + x.w[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) or(x *BIG) {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] = r.w[i] | x.w[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return this+x */
|
||||||
|
func (r *BIG) Plus(x *BIG) *BIG {
|
||||||
|
s := new(BIG)
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
s.w[i] = r.w[i] + x.w[i]
|
||||||
|
}
|
||||||
|
s.norm()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this+=x, where x is int */
|
||||||
|
func (r *BIG) inc(x int) {
|
||||||
|
r.norm()
|
||||||
|
r.w[0] += Chunk(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this*=c and catch overflow in DBIG */
|
||||||
|
func (r *BIG) pxmul(c int, mem *arena.Arena) *DBIG {
|
||||||
|
m := NewDBIG(mem)
|
||||||
|
carry := Chunk(0)
|
||||||
|
for j := 0; j < NLEN; j++ {
|
||||||
|
carry, m.w[j] = mulAdd(r.w[j], Chunk(c), carry, m.w[j])
|
||||||
|
//if c<0 {fmt.Printf("Negative c in pxmul\n")}
|
||||||
|
//if r.w[j]<0 {fmt.Printf("Negative c in pxmul\n")}
|
||||||
|
}
|
||||||
|
m.w[NLEN] = carry
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return this-x */
|
||||||
|
func (r *BIG) Minus(x *BIG) *BIG {
|
||||||
|
d := new(BIG)
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
d.w[i] = r.w[i] - x.w[i]
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this-=x */
|
||||||
|
func (r *BIG) Sub(x *BIG) {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] = r.w[i] - x.w[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* reverse subtract this=x-this */
|
||||||
|
func (r *BIG) rsub(x *BIG) {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] = x.w[i] - r.w[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this-=x, where x is int */
|
||||||
|
func (r *BIG) dec(x int) {
|
||||||
|
r.norm()
|
||||||
|
r.w[0] -= Chunk(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this*=x, where x is small int<NEXCESS */
|
||||||
|
func (r *BIG) imul(c int) {
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
r.w[i] *= Chunk(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this*=x, where x is >NEXCESS */
|
||||||
|
func (r *BIG) pmul(c int) Chunk {
|
||||||
|
carry := Chunk(0)
|
||||||
|
// r.norm();
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
ak := r.w[i]
|
||||||
|
r.w[i] = 0
|
||||||
|
carry, r.w[i] = mulAdd(ak, Chunk(c), carry, r.w[i])
|
||||||
|
//if c<0 {fmt.Printf("Negative c in pmul\n")}
|
||||||
|
//if ak<0 {fmt.Printf("Negative c in pmul\n")}
|
||||||
|
}
|
||||||
|
return carry
|
||||||
|
}
|
||||||
|
|
||||||
|
/* convert this BIG to byte array */
|
||||||
|
func (r *BIG) tobytearray(b []byte, n int) {
|
||||||
|
//r.norm();
|
||||||
|
c := NewBIGcopy(r, nil)
|
||||||
|
c.norm()
|
||||||
|
|
||||||
|
for i := int(MODBYTES) - 1; i >= 0; i-- {
|
||||||
|
b[i+n] = byte(c.w[0])
|
||||||
|
c.fshr(8)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* convert from byte array to BIG */
|
||||||
|
func frombytearray(b []byte, n int) *BIG {
|
||||||
|
m := NewBIG(nil)
|
||||||
|
l := len(b)
|
||||||
|
for i := 0; i < int(MODBYTES); i++ {
|
||||||
|
m.fshl(8)
|
||||||
|
if i < l {
|
||||||
|
m.w[0] += Chunk(int(b[i+n] & 0xff))
|
||||||
|
} else {
|
||||||
|
m.w[0] += Chunk(int(0 & 0xff))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) ToBytes(b []byte) {
|
||||||
|
r.tobytearray(b, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromBytes(b []byte) *BIG {
|
||||||
|
return frombytearray(b, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* divide by 3 */
|
||||||
|
func (r *BIG) div3() int {
|
||||||
|
carry := Chunk(0)
|
||||||
|
r.norm()
|
||||||
|
base := (Chunk(1) << BASEBITS)
|
||||||
|
for i := NLEN - 1; i >= 0; i-- {
|
||||||
|
ak := (carry*base + r.w[i])
|
||||||
|
r.w[i] = ak / 3
|
||||||
|
carry = ak % 3
|
||||||
|
}
|
||||||
|
return int(carry)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return a*b where result fits in a BIG */
|
||||||
|
func smul(a *BIG, b *BIG) *BIG {
|
||||||
|
carry := Chunk(0)
|
||||||
|
c := NewBIG(nil)
|
||||||
|
for i := 0; i < NLEN; i++ {
|
||||||
|
carry = 0
|
||||||
|
for j := 0; j < NLEN; j++ {
|
||||||
|
if i+j < NLEN {
|
||||||
|
carry, c.w[i+j] = mulAdd(a.w[i], b.w[j], carry, c.w[i+j])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Compare a and b, return 0 if a==b, -1 if a<b, +1 if a>b. Inputs must be normalised */
|
||||||
|
func Comp(a *BIG, b *BIG) int {
|
||||||
|
gt := Chunk(0)
|
||||||
|
eq := Chunk(1)
|
||||||
|
for i := NLEN - 1; i >= 0; i-- {
|
||||||
|
gt |= ((b.w[i] - a.w[i]) >> BASEBITS) & eq
|
||||||
|
eq &= ((b.w[i] ^ a.w[i]) - 1) >> BASEBITS
|
||||||
|
}
|
||||||
|
return int(gt + gt + eq - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return parity */
|
||||||
|
func (r *BIG) parity() int {
|
||||||
|
return int(r.w[0] % 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return n-th bit */
|
||||||
|
func (r *BIG) bit(n int) int {
|
||||||
|
return int((r.w[n/int(BASEBITS)] & (Chunk(1) << (uint(n) % BASEBITS))) >> (uint(n) % BASEBITS))
|
||||||
|
// if (r.w[n/int(BASEBITS)] & (Chunk(1) << (uint(n) % BASEBITS))) > 0 {
|
||||||
|
// return 1
|
||||||
|
// }
|
||||||
|
// return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return n last bits */
|
||||||
|
func (r *BIG) lastbits(n int) int {
|
||||||
|
msk := (1 << uint(n)) - 1
|
||||||
|
r.norm()
|
||||||
|
return (int(r.w[0])) & msk
|
||||||
|
}
|
||||||
|
|
||||||
|
/* set x = x mod 2^m */
|
||||||
|
func (r *BIG) mod2m(m uint) {
|
||||||
|
wd := int(m / BASEBITS)
|
||||||
|
bt := m % BASEBITS
|
||||||
|
msk := (Chunk(1) << bt) - 1
|
||||||
|
r.w[wd] &= msk
|
||||||
|
for i := wd + 1; i < NLEN; i++ {
|
||||||
|
r.w[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* a=1/a mod 2^256. This is very fast! */
|
||||||
|
func (r *BIG) invmod2m() {
|
||||||
|
U := NewBIG(nil)
|
||||||
|
b := NewBIG(nil)
|
||||||
|
c := NewBIG(nil)
|
||||||
|
|
||||||
|
U.inc(invmod256(r.lastbits(8)))
|
||||||
|
|
||||||
|
for i := 8; i < BIGBITS; i <<= 1 {
|
||||||
|
U.norm()
|
||||||
|
ui := uint(i)
|
||||||
|
b.copy(r)
|
||||||
|
b.mod2m(ui)
|
||||||
|
t1 := smul(U, b)
|
||||||
|
t1.shr(ui)
|
||||||
|
c.copy(r)
|
||||||
|
c.shr(ui)
|
||||||
|
c.mod2m(ui)
|
||||||
|
|
||||||
|
t2 := smul(U, c)
|
||||||
|
t2.mod2m(ui)
|
||||||
|
t1.Add(t2)
|
||||||
|
t1.norm()
|
||||||
|
b = smul(t1, U)
|
||||||
|
t1.copy(b)
|
||||||
|
t1.mod2m(ui)
|
||||||
|
|
||||||
|
t2.one()
|
||||||
|
t2.shl(ui)
|
||||||
|
t1.rsub(t2)
|
||||||
|
t1.norm()
|
||||||
|
t1.shl(ui)
|
||||||
|
U.Add(t1)
|
||||||
|
}
|
||||||
|
U.mod2m(8 * MODBYTES)
|
||||||
|
r.copy(U)
|
||||||
|
r.norm()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) ctmod(m *BIG, bd uint, mem *arena.Arena) {
|
||||||
|
k := bd
|
||||||
|
sr := NewBIG(mem)
|
||||||
|
c := NewBIGcopy(m, mem)
|
||||||
|
r.norm()
|
||||||
|
|
||||||
|
c.shl(k)
|
||||||
|
|
||||||
|
for {
|
||||||
|
sr.copy(r)
|
||||||
|
sr.Sub(c)
|
||||||
|
sr.norm()
|
||||||
|
r.cmove(sr, int(1-((sr.w[NLEN-1]>>uint(CHUNK-1))&1)))
|
||||||
|
if k == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.fshr(1)
|
||||||
|
k -= 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* reduce this mod m */
|
||||||
|
func (r *BIG) Mod(m *BIG, mem *arena.Arena) {
|
||||||
|
k := r.nbits() - m.nbits()
|
||||||
|
if k < 0 {
|
||||||
|
k = 0
|
||||||
|
}
|
||||||
|
r.ctmod(m, uint(k), mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *BIG) ctdiv(m *BIG, bd uint, mem *arena.Arena) {
|
||||||
|
k := bd
|
||||||
|
e := NewBIGint(1, mem)
|
||||||
|
sr := NewBIG(mem)
|
||||||
|
a := NewBIGcopy(r, mem)
|
||||||
|
c := NewBIGcopy(m, mem)
|
||||||
|
r.norm()
|
||||||
|
r.zero()
|
||||||
|
|
||||||
|
c.shl(k)
|
||||||
|
e.shl(k)
|
||||||
|
|
||||||
|
for {
|
||||||
|
sr.copy(a)
|
||||||
|
sr.Sub(c)
|
||||||
|
sr.norm()
|
||||||
|
d := int(1 - ((sr.w[NLEN-1] >> uint(CHUNK-1)) & 1))
|
||||||
|
a.cmove(sr, d)
|
||||||
|
sr.copy(r)
|
||||||
|
sr.Add(e)
|
||||||
|
sr.norm()
|
||||||
|
r.cmove(sr, d)
|
||||||
|
if k == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.fshr(1)
|
||||||
|
e.fshr(1)
|
||||||
|
k -= 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* divide this by m */
|
||||||
|
func (r *BIG) div(m *BIG, mem *arena.Arena) {
|
||||||
|
k := r.nbits() - m.nbits()
|
||||||
|
if k < 0 {
|
||||||
|
k = 0
|
||||||
|
}
|
||||||
|
r.ctdiv(m, uint(k), mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* get 8*MODBYTES size random number */
|
||||||
|
func Random(rng *ext.RAND) *BIG {
|
||||||
|
m := NewBIG(nil)
|
||||||
|
var j int = 0
|
||||||
|
var r byte = 0
|
||||||
|
/* generate random BIG */
|
||||||
|
for i := 0; i < 8*int(MODBYTES); i++ {
|
||||||
|
if j == 0 {
|
||||||
|
r = rng.GetByte()
|
||||||
|
} else {
|
||||||
|
r >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
b := Chunk(int(r & 1))
|
||||||
|
m.shl(1)
|
||||||
|
m.w[0] += b
|
||||||
|
j++
|
||||||
|
j &= 7
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Create random BIG in portable way, one bit at a time */
|
||||||
|
func Randomnum(q *BIG, rng *ext.RAND) *BIG {
|
||||||
|
d := NewDBIG(nil)
|
||||||
|
var j int = 0
|
||||||
|
var r byte = 0
|
||||||
|
for i := 0; i < 2*q.nbits(); i++ {
|
||||||
|
if j == 0 {
|
||||||
|
r = rng.GetByte()
|
||||||
|
} else {
|
||||||
|
r >>= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
b := Chunk(int(r & 1))
|
||||||
|
d.shl(1)
|
||||||
|
d.w[0] += b
|
||||||
|
j++
|
||||||
|
j &= 7
|
||||||
|
}
|
||||||
|
m := d.Mod(q, nil)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Randtrunc(q *BIG, trunc int, rng *ext.RAND) *BIG {
|
||||||
|
m := Randomnum(q, rng)
|
||||||
|
if q.nbits() > trunc {
|
||||||
|
m.mod2m(uint(trunc))
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return a*b mod m */
|
||||||
|
func Modmul(a1, b1, m *BIG, mem *arena.Arena) *BIG {
|
||||||
|
a := NewBIGcopy(a1, mem)
|
||||||
|
b := NewBIGcopy(b1, mem)
|
||||||
|
a.Mod(m, mem)
|
||||||
|
b.Mod(m, mem)
|
||||||
|
d := mul(a, b, mem)
|
||||||
|
return d.ctmod(m, uint(m.nbits()), mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return a^2 mod m */
|
||||||
|
func Modsqr(a1, m *BIG, mem *arena.Arena) *BIG {
|
||||||
|
a := NewBIGcopy(a1, mem)
|
||||||
|
a.Mod(m, mem)
|
||||||
|
d := sqr(a, mem)
|
||||||
|
return d.ctmod(m, uint(m.nbits()), mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return -a mod m */
|
||||||
|
func Modneg(a1, m *BIG, mem *arena.Arena) *BIG {
|
||||||
|
a := NewBIGcopy(a1, mem)
|
||||||
|
a.Mod(m, mem)
|
||||||
|
a.rsub(m)
|
||||||
|
a.norm()
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return a+b mod m */
|
||||||
|
func ModAdd(a1, b1, m *BIG, mem *arena.Arena) *BIG {
|
||||||
|
a := NewBIGcopy(a1, mem)
|
||||||
|
b := NewBIGcopy(b1, mem)
|
||||||
|
a.Mod(m, mem)
|
||||||
|
b.Mod(m, mem)
|
||||||
|
a.Add(b)
|
||||||
|
a.norm()
|
||||||
|
a.ctmod(m, 1, mem)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Jacobi Symbol (this/p). Returns 0, 1 or -1 */
|
||||||
|
func (r *BIG) Jacobi(p *BIG) int {
|
||||||
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
m := 0
|
||||||
|
t := NewBIGint(0, mem)
|
||||||
|
x := NewBIGint(0, mem)
|
||||||
|
n := NewBIGint(0, mem)
|
||||||
|
zilch := NewBIGint(0, mem)
|
||||||
|
one := NewBIGint(1, mem)
|
||||||
|
if p.parity() == 0 || Comp(r, zilch) == 0 || Comp(p, one) <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
r.norm()
|
||||||
|
x.copy(r)
|
||||||
|
n.copy(p)
|
||||||
|
x.Mod(p, mem)
|
||||||
|
|
||||||
|
for Comp(n, one) > 0 {
|
||||||
|
if Comp(x, zilch) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
n8 := n.lastbits(3)
|
||||||
|
k := 0
|
||||||
|
for x.parity() == 0 {
|
||||||
|
k++
|
||||||
|
x.shr(1)
|
||||||
|
}
|
||||||
|
if k%2 == 1 {
|
||||||
|
m += (n8*n8 - 1) / 8
|
||||||
|
}
|
||||||
|
m += (n8 - 1) * (x.lastbits(2) - 1) / 4
|
||||||
|
t.copy(n)
|
||||||
|
t.Mod(x, mem)
|
||||||
|
n.copy(x)
|
||||||
|
x.copy(t)
|
||||||
|
m %= 2
|
||||||
|
|
||||||
|
}
|
||||||
|
if m == 0 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this=1/this mod p. Binary method */
|
||||||
|
func (r *BIG) Invmodp(p *BIG) {
|
||||||
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
r.Mod(p, mem)
|
||||||
|
if r.IsZero() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
u := NewBIGcopy(r, mem)
|
||||||
|
v := NewBIGcopy(p, mem)
|
||||||
|
x1 := NewBIGint(1, mem)
|
||||||
|
x2 := NewBIGint(0, mem)
|
||||||
|
t := NewBIGint(0, mem)
|
||||||
|
one := NewBIGint(1, mem)
|
||||||
|
for Comp(u, one) != 0 && Comp(v, one) != 0 {
|
||||||
|
for u.parity() == 0 {
|
||||||
|
u.fshr(1)
|
||||||
|
t.copy(x1)
|
||||||
|
t.Add(p)
|
||||||
|
x1.cmove(t, x1.parity())
|
||||||
|
x1.norm()
|
||||||
|
x1.fshr(1)
|
||||||
|
}
|
||||||
|
for v.parity() == 0 {
|
||||||
|
v.fshr(1)
|
||||||
|
t.copy(x2)
|
||||||
|
t.Add(p)
|
||||||
|
x2.cmove(t, x2.parity())
|
||||||
|
x2.norm()
|
||||||
|
x2.fshr(1)
|
||||||
|
}
|
||||||
|
if Comp(u, v) >= 0 {
|
||||||
|
u.Sub(v)
|
||||||
|
u.norm()
|
||||||
|
t.copy(x1)
|
||||||
|
t.Add(p)
|
||||||
|
x1.cmove(t, (Comp(x1, x2)>>1)&1)
|
||||||
|
x1.Sub(x2)
|
||||||
|
x1.norm()
|
||||||
|
} else {
|
||||||
|
v.Sub(u)
|
||||||
|
v.norm()
|
||||||
|
t.copy(x2)
|
||||||
|
t.Add(p)
|
||||||
|
x2.cmove(t, (Comp(x2, x1)>>1)&1)
|
||||||
|
x2.Sub(x1)
|
||||||
|
x2.norm()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.copy(x1)
|
||||||
|
r.cmove(x2, Comp(u, one)&1)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* return this^e mod m */
|
||||||
|
func (r *BIG) Powmod(e1 *BIG, m *BIG, mem *arena.Arena) *BIG {
|
||||||
|
e := NewBIGcopy(e1, mem)
|
||||||
|
r.norm()
|
||||||
|
e.norm()
|
||||||
|
a := NewBIGint(1, mem)
|
||||||
|
z := NewBIGcopy(e, mem)
|
||||||
|
s := NewBIGcopy(r, mem)
|
||||||
|
for true {
|
||||||
|
bt := z.parity()
|
||||||
|
z.fshr(1)
|
||||||
|
if bt == 1 {
|
||||||
|
a = Modmul(a, s, m, mem)
|
||||||
|
}
|
||||||
|
if z.IsZero() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s = Modsqr(s, m, mem)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Arazi and Qi inversion mod 256 */
|
||||||
|
func invmod256(a int) int {
|
||||||
|
var t1 int = 0
|
||||||
|
c := (a >> 1) & 1
|
||||||
|
t1 += c
|
||||||
|
t1 &= 1
|
||||||
|
t1 = 2 - t1
|
||||||
|
t1 <<= 1
|
||||||
|
U := t1 + 1
|
||||||
|
|
||||||
|
// i=2
|
||||||
|
b := a & 3
|
||||||
|
t1 = U * b
|
||||||
|
t1 >>= 2
|
||||||
|
c = (a >> 2) & 3
|
||||||
|
t2 := (U * c) & 3
|
||||||
|
t1 += t2
|
||||||
|
t1 *= U
|
||||||
|
t1 &= 3
|
||||||
|
t1 = 4 - t1
|
||||||
|
t1 <<= 2
|
||||||
|
U += t1
|
||||||
|
|
||||||
|
// i=4
|
||||||
|
b = a & 15
|
||||||
|
t1 = U * b
|
||||||
|
t1 >>= 4
|
||||||
|
c = (a >> 4) & 15
|
||||||
|
t2 = (U * c) & 15
|
||||||
|
t1 += t2
|
||||||
|
t1 *= U
|
||||||
|
t1 &= 15
|
||||||
|
t1 = 16 - t1
|
||||||
|
t1 <<= 4
|
||||||
|
U += t1
|
||||||
|
|
||||||
|
return U
|
||||||
|
}
|
||||||
|
|
||||||
|
func logb2(w uint32) uint {
|
||||||
|
v := w
|
||||||
|
v |= (v >> 1)
|
||||||
|
v |= (v >> 2)
|
||||||
|
v |= (v >> 4)
|
||||||
|
v |= (v >> 8)
|
||||||
|
v |= (v >> 16)
|
||||||
|
|
||||||
|
v = v - ((v >> 1) & 0x55555555)
|
||||||
|
v = (v & 0x33333333) + ((v >> 2) & 0x33333333)
|
||||||
|
r := uint((((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24)
|
||||||
|
return (r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optimized combined shift, subtract and norm
|
||||||
|
func ssn(r *BIG, a *BIG, m *BIG) int {
|
||||||
|
n := NLEN - 1
|
||||||
|
m.w[0] = (m.w[0] >> 1) | ((m.w[1] << (BASEBITS - 1)) & BMASK)
|
||||||
|
r.w[0] = a.w[0] - m.w[0]
|
||||||
|
carry := r.w[0] >> BASEBITS
|
||||||
|
r.w[0] &= BMASK
|
||||||
|
for i := 1; i < n; i++ {
|
||||||
|
m.w[i] = (m.w[i] >> 1) | ((m.w[i+1] << (BASEBITS - 1)) & BMASK)
|
||||||
|
r.w[i] = a.w[i] - m.w[i] + carry
|
||||||
|
carry = r.w[i] >> BASEBITS
|
||||||
|
r.w[i] &= BMASK
|
||||||
|
}
|
||||||
|
m.w[n] >>= 1
|
||||||
|
r.w[n] = a.w[n] - m.w[n] + carry
|
||||||
|
return int((r.w[n] >> uint(CHUNK-1)) & 1)
|
||||||
|
}
|
@ -42,7 +42,7 @@ func ceil(a int, b int) int {
|
|||||||
|
|
||||||
/* output u \in F_p */
|
/* output u \in F_p */
|
||||||
func Hash_to_field(hash int, hlen int, DST []byte, M []byte, ctr int) []*FP {
|
func Hash_to_field(hash int, hlen int, DST []byte, M []byte, ctr int) []*FP {
|
||||||
q := NewBIGints(Modulus)
|
q := NewBIGints(Modulus, nil)
|
||||||
nbq := q.nbits()
|
nbq := q.nbits()
|
||||||
L := ceil(nbq+AESKEY*8, 8)
|
L := ceil(nbq+AESKEY*8, 8)
|
||||||
var u []*FP
|
var u []*FP
|
||||||
@ -53,7 +53,7 @@ func Hash_to_field(hash int, hlen int, DST []byte, M []byte, ctr int) []*FP {
|
|||||||
for j := 0; j < L; j++ {
|
for j := 0; j < L; j++ {
|
||||||
fd[j] = OKM[i*L+j]
|
fd[j] = OKM[i*L+j]
|
||||||
}
|
}
|
||||||
u = append(u, NewFPbig(DBIG_fromBytes(fd).ctmod(q, uint(8*L-nbq))))
|
u = append(u, NewFPbig(DBIG_fromBytes(fd).ctmod(q, uint(8*L-nbq), nil), nil))
|
||||||
}
|
}
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
@ -65,15 +65,15 @@ func Bls256_hash_to_point(M []byte) *ECP {
|
|||||||
|
|
||||||
P := ECP_map2point(u[0])
|
P := ECP_map2point(u[0])
|
||||||
P1 := ECP_map2point(u[1])
|
P1 := ECP_map2point(u[1])
|
||||||
P.Add(P1)
|
P.Add(P1, nil)
|
||||||
P.Cfp()
|
P.Cfp()
|
||||||
P.Affine()
|
P.Affine(nil)
|
||||||
return P
|
return P
|
||||||
}
|
}
|
||||||
|
|
||||||
func Init() int {
|
func Init() int {
|
||||||
G := ECP8_generator()
|
G := ECP8_generator()
|
||||||
if G.Is_infinity() {
|
if G.Is_infinity(nil) {
|
||||||
return BLS_FAIL
|
return BLS_FAIL
|
||||||
}
|
}
|
||||||
G2_TAB = precomp(G)
|
G2_TAB = precomp(G)
|
||||||
@ -82,7 +82,7 @@ func Init() int {
|
|||||||
|
|
||||||
/* generate key pair, private key S, public key W */
|
/* generate key pair, private key S, public key W */
|
||||||
func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
|
func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
|
||||||
r := NewBIGints(CURVE_Order)
|
r := NewBIGints(CURVE_Order, nil)
|
||||||
nbr := r.nbits()
|
nbr := r.nbits()
|
||||||
L := ceil(3*ceil(nbr, 8), 2)
|
L := ceil(3*ceil(nbr, 8), 2)
|
||||||
LEN := ext.InttoBytes(L, 2)
|
LEN := ext.InttoBytes(L, 2)
|
||||||
@ -93,7 +93,7 @@ func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
|
|||||||
AIKM[len(IKM)] = 0
|
AIKM[len(IKM)] = 0
|
||||||
|
|
||||||
G := ECP8_generator()
|
G := ECP8_generator()
|
||||||
if G.Is_infinity() {
|
if G.Is_infinity(nil) {
|
||||||
return BLS_FAIL
|
return BLS_FAIL
|
||||||
}
|
}
|
||||||
SALT := []byte("BLS-SIG-KEYGEN-SALT-")
|
SALT := []byte("BLS-SIG-KEYGEN-SALT-")
|
||||||
@ -101,10 +101,10 @@ func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
|
|||||||
OKM := ext.HKDF_Expand(ext.MC_SHA2, HASH_TYPE, L, PRK, LEN)
|
OKM := ext.HKDF_Expand(ext.MC_SHA2, HASH_TYPE, L, PRK, LEN)
|
||||||
|
|
||||||
dx := DBIG_fromBytes(OKM[:])
|
dx := DBIG_fromBytes(OKM[:])
|
||||||
s := dx.ctmod(r, uint(8*L-nbr))
|
s := dx.ctmod(r, uint(8*L-nbr), nil)
|
||||||
s.ToBytes(S)
|
s.ToBytes(S)
|
||||||
// SkToPk
|
// SkToPk
|
||||||
G = G2mul(G, s)
|
G = G2mul(G, s, nil)
|
||||||
G.ToBytes(W, true)
|
G.ToBytes(W, true)
|
||||||
return BLS_OK
|
return BLS_OK
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
|
|||||||
func Core_Sign(SIG []byte, M []byte, S []byte) int {
|
func Core_Sign(SIG []byte, M []byte, S []byte) int {
|
||||||
D := Bls256_hash_to_point(M)
|
D := Bls256_hash_to_point(M)
|
||||||
s := FromBytes(S)
|
s := FromBytes(S)
|
||||||
D = G1mul(D, s)
|
D = G1mul(D, s, nil)
|
||||||
D.ToBytes(SIG, true)
|
D.ToBytes(SIG, true)
|
||||||
return BLS_OK
|
return BLS_OK
|
||||||
}
|
}
|
||||||
@ -124,21 +124,21 @@ func Core_Verify(SIG []byte, M []byte, W []byte) int {
|
|||||||
HM := Bls256_hash_to_point(M)
|
HM := Bls256_hash_to_point(M)
|
||||||
|
|
||||||
D := ECP_fromBytes(SIG)
|
D := ECP_fromBytes(SIG)
|
||||||
if !G1member(D) {
|
if !G1member(D, nil) {
|
||||||
return BLS_FAIL
|
return BLS_FAIL
|
||||||
}
|
}
|
||||||
D.Neg()
|
D.Neg(nil)
|
||||||
|
|
||||||
PK := ECP8_fromBytes(W)
|
PK := ECP8_fromBytes(W)
|
||||||
if !G2member(PK) {
|
if !G2member(PK, nil) {
|
||||||
return BLS_FAIL
|
return BLS_FAIL
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use new multi-pairing mechanism
|
// Use new multi-pairing mechanism
|
||||||
r := Initmp()
|
r := Initmp(nil)
|
||||||
Another_pc(r, G2_TAB, D)
|
Another_pc(r, G2_TAB, D)
|
||||||
Another(r, PK, HM)
|
Another(r, PK, HM, nil)
|
||||||
v := Miller(r)
|
v := Miller(r, nil)
|
||||||
|
|
||||||
//.. or alternatively
|
//.. or alternatively
|
||||||
// G := ECP8_generator()
|
// G := ECP8_generator()
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build js && wasm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
*
|
*
|
@ -0,0 +1,36 @@
|
|||||||
|
//go:build !js && !wasm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
|
*
|
||||||
|
* This file is part of MIRACL Core
|
||||||
|
* (see https://github.com/miracl/core).
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package bls48581
|
||||||
|
|
||||||
|
// BIG length in bytes and number base
|
||||||
|
const MODBYTES uint = 73
|
||||||
|
const BASEBITS uint = 60
|
||||||
|
|
||||||
|
// BIG lengths and Masks
|
||||||
|
const NLEN int = int((1 + ((8*MODBYTES - 1) / BASEBITS)))
|
||||||
|
const DNLEN int = 2 * NLEN
|
||||||
|
const BMASK Chunk = ((Chunk(1) << BASEBITS) - 1)
|
||||||
|
const HBITS uint = (BASEBITS / 2)
|
||||||
|
const HMASK Chunk = ((Chunk(1) << HBITS) - 1)
|
||||||
|
const NEXCESS int = (1 << (uint(CHUNK) - BASEBITS - 1))
|
||||||
|
|
||||||
|
const BIGBITS int = int(MODBYTES * 8)
|
@ -19,11 +19,6 @@
|
|||||||
|
|
||||||
package bls48581
|
package bls48581
|
||||||
|
|
||||||
// Curve types
|
|
||||||
const WEIERSTRASS int = 0
|
|
||||||
const EDWARDS int = 1
|
|
||||||
const MONTGOMERY int = 2
|
|
||||||
|
|
||||||
// Pairing Friendly?
|
// Pairing Friendly?
|
||||||
const NOT int = 0
|
const NOT int = 0
|
||||||
const BN int = 1
|
const BN int = 1
|
||||||
@ -31,10 +26,6 @@ const BLS12 int = 2
|
|||||||
const BLS24 int = 3
|
const BLS24 int = 3
|
||||||
const BLS48 int = 4
|
const BLS48 int = 4
|
||||||
|
|
||||||
// Pairing Twist type
|
|
||||||
const D_TYPE int = 0
|
|
||||||
const M_TYPE int = 1
|
|
||||||
|
|
||||||
// Sparsity
|
// Sparsity
|
||||||
const FP_ZERO int = 0
|
const FP_ZERO int = 0
|
||||||
const FP_ONE int = 1
|
const FP_ONE int = 1
|
||||||
@ -43,34 +34,16 @@ const FP_SPARSER int = 3
|
|||||||
const FP_SPARSE int = 4
|
const FP_SPARSE int = 4
|
||||||
const FP_DENSE int = 5
|
const FP_DENSE int = 5
|
||||||
|
|
||||||
// Pairing x parameter sign
|
|
||||||
const POSITIVEX int = 0
|
|
||||||
const NEGATIVEX int = 1
|
|
||||||
|
|
||||||
// Curve type
|
|
||||||
|
|
||||||
const CURVETYPE int = WEIERSTRASS
|
|
||||||
const CURVE_A int = 0
|
const CURVE_A int = 0
|
||||||
const CURVE_PAIRING_TYPE int = BLS48
|
|
||||||
|
|
||||||
// Pairings only
|
|
||||||
|
|
||||||
const SEXTIC_TWIST int = D_TYPE
|
|
||||||
const SIGN_OF_X int = NEGATIVEX
|
|
||||||
const ATE_BITS int = 33
|
const ATE_BITS int = 33
|
||||||
const G2_TABLE int = 36
|
const G2_TABLE int = 36
|
||||||
const HTC_ISO int = 0
|
const HTC_ISO int = 0
|
||||||
const HTC_ISO_G2 int = 0
|
const HTC_ISO_G2 int = 0
|
||||||
|
|
||||||
// associated hash function and AES key size
|
|
||||||
|
|
||||||
const HASH_TYPE int = 64
|
const HASH_TYPE int = 64
|
||||||
const AESKEY int = 32
|
const AESKEY int = 32
|
||||||
|
|
||||||
const ALLOW_ALT_COMPRESS bool = false
|
|
||||||
|
|
||||||
// These are manually decided policy decisions. To block any potential patent issues set to false.
|
|
||||||
|
|
||||||
const USE_GLV bool = true
|
const USE_GLV bool = true
|
||||||
const USE_GS_G2 bool = true
|
const USE_GS_G2 bool = true
|
||||||
const USE_GS_GT bool = true
|
const USE_GS_GT bool = true
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build js && wasm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
*
|
*
|
@ -0,0 +1,49 @@
|
|||||||
|
//go:build !js && !wasm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
|
*
|
||||||
|
* This file is part of MIRACL Core
|
||||||
|
* (see https://github.com/miracl/core).
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package bls48581
|
||||||
|
|
||||||
|
// Modulus types
|
||||||
|
const NOT_SPECIAL int = 0
|
||||||
|
const PSEUDO_MERSENNE int = 1
|
||||||
|
const MONTGOMERY_FRIENDLY int = 2
|
||||||
|
const GENERALISED_MERSENNE int = 3
|
||||||
|
|
||||||
|
const NEGATOWER int = 0
|
||||||
|
const POSITOWER int = 1
|
||||||
|
|
||||||
|
// Modulus details
|
||||||
|
const MODBITS uint = 581 /* Number of bits in Modulus */
|
||||||
|
const PM1D2 uint = 1 /* Modulus mod 8 */
|
||||||
|
const RIADZ int = 2 /* hash-to-point Z */
|
||||||
|
const RIADZG2A int = 2 /* G2 hash-to-point Z */
|
||||||
|
const RIADZG2B int = 0 /* G2 hash-to-point Z */
|
||||||
|
const MODTYPE int = NOT_SPECIAL //NOT_SPECIAL
|
||||||
|
const QNRI int = 0 // Fp2 QNR
|
||||||
|
const TOWER int = POSITOWER // Tower type
|
||||||
|
const FEXCESS int32 = ((int32(1) << 19) - 1)
|
||||||
|
|
||||||
|
// Modulus Masks
|
||||||
|
const OMASK Chunk = ((Chunk(-1)) << (MODBITS % BASEBITS))
|
||||||
|
const TBITS uint = MODBITS % BASEBITS // Number of active bits in top word
|
||||||
|
const TMASK Chunk = (Chunk(1) << TBITS) - 1
|
||||||
|
|
||||||
|
const BIG_ENDIAN_SIGN bool = false
|
@ -21,28 +21,46 @@
|
|||||||
|
|
||||||
package bls48581
|
package bls48581
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"arena"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
//import "fmt"
|
//import "fmt"
|
||||||
|
|
||||||
func NewDBIG() *DBIG {
|
func NewDBIG(mem *arena.Arena) *DBIG {
|
||||||
b := new(DBIG)
|
var b *DBIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[DBIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(DBIG)
|
||||||
|
}
|
||||||
for i := 0; i < DNLEN; i++ {
|
for i := 0; i < DNLEN; i++ {
|
||||||
b.w[i] = 0
|
b.w[i] = 0
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDBIGcopy(x *DBIG) *DBIG {
|
func NewDBIGcopy(x *DBIG, mem *arena.Arena) *DBIG {
|
||||||
b := new(DBIG)
|
var b *DBIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[DBIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(DBIG)
|
||||||
|
}
|
||||||
for i := 0; i < DNLEN; i++ {
|
for i := 0; i < DNLEN; i++ {
|
||||||
b.w[i] = x.w[i]
|
b.w[i] = x.w[i]
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDBIGscopy(x *BIG) *DBIG {
|
func NewDBIGscopy(x *BIG, mem *arena.Arena) *DBIG {
|
||||||
b := new(DBIG)
|
var b *DBIG
|
||||||
|
if mem != nil {
|
||||||
|
b = arena.New[DBIG](mem)
|
||||||
|
} else {
|
||||||
|
b = new(DBIG)
|
||||||
|
}
|
||||||
for i := 0; i < NLEN-1; i++ {
|
for i := 0; i < NLEN-1; i++ {
|
||||||
b.w[i] = x.w[i]
|
b.w[i] = x.w[i]
|
||||||
}
|
}
|
||||||
@ -67,8 +85,8 @@ func (r *DBIG) norm() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* split DBIG at position n, return higher half, keep lower half */
|
/* split DBIG at position n, return higher half, keep lower half */
|
||||||
func (r *DBIG) split(n uint) *BIG {
|
func (r *DBIG) split(n uint, mem *arena.Arena) *BIG {
|
||||||
t := NewBIG()
|
t := NewBIG(mem)
|
||||||
m := n % BASEBITS
|
m := n % BASEBITS
|
||||||
carry := r.w[DNLEN-1] << (BASEBITS - m)
|
carry := r.w[DNLEN-1] << (BASEBITS - m)
|
||||||
|
|
||||||
@ -173,11 +191,11 @@ func (r *DBIG) shr(k uint) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DBIG) ctmod(m *BIG, bd uint) *BIG {
|
func (r *DBIG) ctmod(m *BIG, bd uint, mem *arena.Arena) *BIG {
|
||||||
k := bd
|
k := bd
|
||||||
r.norm()
|
r.norm()
|
||||||
c := NewDBIGscopy(m)
|
c := NewDBIGscopy(m, mem)
|
||||||
dr := NewDBIG()
|
dr := NewDBIG(mem)
|
||||||
|
|
||||||
c.shl(k)
|
c.shl(k)
|
||||||
|
|
||||||
@ -192,25 +210,25 @@ func (r *DBIG) ctmod(m *BIG, bd uint) *BIG {
|
|||||||
k -= 1
|
k -= 1
|
||||||
c.shr(1)
|
c.shr(1)
|
||||||
}
|
}
|
||||||
return NewBIGdcopy(r)
|
return NewBIGdcopy(r, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reduces this DBIG mod a BIG, and returns the BIG */
|
/* reduces this DBIG mod a BIG, and returns the BIG */
|
||||||
func (r *DBIG) Mod(m *BIG) *BIG {
|
func (r *DBIG) Mod(m *BIG, mem *arena.Arena) *BIG {
|
||||||
k := r.nbits() - m.nbits()
|
k := r.nbits() - m.nbits()
|
||||||
if k < 0 {
|
if k < 0 {
|
||||||
k = 0
|
k = 0
|
||||||
}
|
}
|
||||||
return r.ctmod(m, uint(k))
|
return r.ctmod(m, uint(k), mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *DBIG) ctdiv(m *BIG, bd uint) *BIG {
|
func (r *DBIG) ctdiv(m *BIG, bd uint, mem *arena.Arena) *BIG {
|
||||||
k := bd
|
k := bd
|
||||||
c := NewDBIGscopy(m)
|
c := NewDBIGscopy(m, mem)
|
||||||
a := NewBIGint(0)
|
a := NewBIGint(0, mem)
|
||||||
e := NewBIGint(1)
|
e := NewBIGint(1, mem)
|
||||||
sr := NewBIG()
|
sr := NewBIG(mem)
|
||||||
dr := NewDBIG()
|
dr := NewDBIG(mem)
|
||||||
r.norm()
|
r.norm()
|
||||||
|
|
||||||
c.shl(k)
|
c.shl(k)
|
||||||
@ -237,12 +255,12 @@ func (r *DBIG) ctdiv(m *BIG, bd uint) *BIG {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* return this/c */
|
/* return this/c */
|
||||||
func (r *DBIG) div(m *BIG) *BIG {
|
func (r *DBIG) div(m *BIG, mem *arena.Arena) *BIG {
|
||||||
k := r.nbits() - m.nbits()
|
k := r.nbits() - m.nbits()
|
||||||
if k < 0 {
|
if k < 0 {
|
||||||
k = 0
|
k = 0
|
||||||
}
|
}
|
||||||
return r.ctdiv(m, uint(k))
|
return r.ctdiv(m, uint(k), mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Convert to Hex String */
|
/* Convert to Hex String */
|
||||||
@ -259,7 +277,7 @@ func (r *DBIG) toString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := len - 1; i >= 0; i-- {
|
for i := len - 1; i >= 0; i-- {
|
||||||
b := NewDBIGcopy(r)
|
b := NewDBIGcopy(r, nil)
|
||||||
|
|
||||||
b.shr(uint(i * 4))
|
b.shr(uint(i * 4))
|
||||||
s += strconv.FormatInt(int64(b.w[0]&15), 16)
|
s += strconv.FormatInt(int64(b.w[0]&15), 16)
|
||||||
@ -270,7 +288,7 @@ func (r *DBIG) toString() string {
|
|||||||
/* return number of bits */
|
/* return number of bits */
|
||||||
func (r *DBIG) nbits() int {
|
func (r *DBIG) nbits() int {
|
||||||
k := DNLEN - 1
|
k := DNLEN - 1
|
||||||
t := NewDBIGcopy(r)
|
t := NewDBIGcopy(r, nil)
|
||||||
t.norm()
|
t.norm()
|
||||||
for k >= 0 && t.w[k] == 0 {
|
for k >= 0 && t.w[k] == 0 {
|
||||||
k--
|
k--
|
||||||
@ -289,7 +307,7 @@ func (r *DBIG) nbits() int {
|
|||||||
|
|
||||||
/* convert from byte array to BIG */
|
/* convert from byte array to BIG */
|
||||||
func DBIG_fromBytes(b []byte) *DBIG {
|
func DBIG_fromBytes(b []byte) *DBIG {
|
||||||
m := NewDBIG()
|
m := NewDBIG(nil)
|
||||||
for i := 0; i < len(b); i++ {
|
for i := 0; i < len(b); i++ {
|
||||||
m.shl(8)
|
m.shl(8)
|
||||||
m.w[0] += Chunk(int(b[i] & 0xff))
|
m.w[0] += Chunk(int(b[i] & 0xff))
|
||||||
|
@ -1,381 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
|
||||||
*
|
|
||||||
* This file is part of MIRACL Core
|
|
||||||
* (see https://github.com/miracl/ext..
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* ECDH/ECIES/ECDSA API Functions */
|
|
||||||
|
|
||||||
package bls48581
|
|
||||||
|
|
||||||
//import "fmt"
|
|
||||||
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
|
||||||
|
|
||||||
const INVALID_PUBLIC_KEY int = -2
|
|
||||||
const ERROR int = -3
|
|
||||||
|
|
||||||
//const INVALID int = -4
|
|
||||||
const EFS int = int(MODBYTES)
|
|
||||||
const EGS int = int(MODBYTES)
|
|
||||||
|
|
||||||
// Transform a point multiplier to RFC7748 form
|
|
||||||
func RFC7748(r *BIG) {
|
|
||||||
lg := 0
|
|
||||||
t := NewBIGint(1)
|
|
||||||
c := CURVE_Cof_I
|
|
||||||
for c != 1 {
|
|
||||||
lg++
|
|
||||||
c /= 2
|
|
||||||
}
|
|
||||||
n := uint(8*EGS - lg + 1)
|
|
||||||
r.mod2m(n)
|
|
||||||
t.shl(n)
|
|
||||||
r.Add(t)
|
|
||||||
c = r.lastbits(lg)
|
|
||||||
r.dec(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* return true if S is in ranger 0 < S < order , else return false */
|
|
||||||
func ECDH_IN_RANGE(S []byte) bool {
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
s := FromBytes(S)
|
|
||||||
if s.IsZero() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if Comp(s, r) >= 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Calculate a public/private EC GF(p) key pair W,S where W=S.G mod EC(p),
|
|
||||||
* where S is the secret key and W is the public key
|
|
||||||
* and G is fixed generator.
|
|
||||||
* If RNG is NULL then the private key is provided externally in S
|
|
||||||
* otherwise it is generated randomly internally */
|
|
||||||
func ECDH_KEY_PAIR_GENERATE(RNG *ext.RAND, S []byte, W []byte) int {
|
|
||||||
res := 0
|
|
||||||
var s *BIG
|
|
||||||
var G *ECP
|
|
||||||
|
|
||||||
G = ECP_generator()
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
|
|
||||||
if RNG == nil {
|
|
||||||
s = FromBytes(S)
|
|
||||||
} else {
|
|
||||||
if CURVETYPE != WEIERSTRASS {
|
|
||||||
s = Random(RNG) // from random bytes
|
|
||||||
} else {
|
|
||||||
s = Randomnum(r, RNG) // Removes biases
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if CURVETYPE != WEIERSTRASS {
|
|
||||||
RFC7748(s) // For Montgomery or Edwards, apply RFC7748 transformation
|
|
||||||
}
|
|
||||||
|
|
||||||
s.ToBytes(S)
|
|
||||||
WP := G.clmul(s, r)
|
|
||||||
WP.ToBytes(W, false) // To use point compression on public keys, change to true
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
/* validate public key */
|
|
||||||
func ECDH_PUBLIC_KEY_VALIDATE(W []byte) int {
|
|
||||||
WP := ECP_fromBytes(W)
|
|
||||||
res := 0
|
|
||||||
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
|
|
||||||
if WP.Is_infinity() {
|
|
||||||
res = INVALID_PUBLIC_KEY
|
|
||||||
}
|
|
||||||
if res == 0 {
|
|
||||||
|
|
||||||
q := NewBIGints(Modulus)
|
|
||||||
nb := q.nbits()
|
|
||||||
k := NewBIGint(1)
|
|
||||||
k.shl(uint((nb + 4) / 2))
|
|
||||||
k.Add(q)
|
|
||||||
k.div(r)
|
|
||||||
|
|
||||||
for k.parity() == 0 {
|
|
||||||
k.shr(1)
|
|
||||||
WP.Dbl()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !k.isunity() {
|
|
||||||
WP = WP.lmul(k)
|
|
||||||
}
|
|
||||||
if WP.Is_infinity() {
|
|
||||||
res = INVALID_PUBLIC_KEY
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
/* IEEE-1363 Diffie-Hellman online calculation Z=S.WD */
|
|
||||||
// type = 0 is just x coordinate output
|
|
||||||
// type = 1 for standard compressed output
|
|
||||||
// type = 2 for standard uncompress output 04|x|y
|
|
||||||
func ECDH_ECPSVDP_DH(S []byte, WD []byte, Z []byte, typ int) int {
|
|
||||||
res := 0
|
|
||||||
|
|
||||||
s := FromBytes(S)
|
|
||||||
|
|
||||||
W := ECP_fromBytes(WD)
|
|
||||||
if W.Is_infinity() {
|
|
||||||
res = ERROR
|
|
||||||
}
|
|
||||||
|
|
||||||
if res == 0 {
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
W = W.clmul(s, r)
|
|
||||||
if W.Is_infinity() {
|
|
||||||
res = ERROR
|
|
||||||
} else {
|
|
||||||
if CURVETYPE != MONTGOMERY {
|
|
||||||
if typ > 0 {
|
|
||||||
if typ == 1 {
|
|
||||||
W.ToBytes(Z, true)
|
|
||||||
} else {
|
|
||||||
W.ToBytes(Z, false)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
W.GetX().ToBytes(Z)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
} else {
|
|
||||||
W.GetX().ToBytes(Z)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
/* IEEE ECDSA Signature, C and D are signature on F using private key S */
|
|
||||||
func ECDH_ECPSP_DSA(sha int, RNG *ext.RAND, S []byte, F []byte, C []byte, D []byte) int {
|
|
||||||
var T [EGS]byte
|
|
||||||
|
|
||||||
B := ext.GPhashit(ext.MC_SHA2, sha, EGS, 0, F, -1, nil)
|
|
||||||
G := ECP_generator()
|
|
||||||
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
s := FromBytes(S)
|
|
||||||
f := FromBytes(B[:])
|
|
||||||
|
|
||||||
c := NewBIGint(0)
|
|
||||||
d := NewBIGint(0)
|
|
||||||
V := NewECP()
|
|
||||||
|
|
||||||
for d.IsZero() {
|
|
||||||
u := Randomnum(r, RNG)
|
|
||||||
w := Randomnum(r, RNG) /* IMPORTANT - side channel masking to protect invmodp() */
|
|
||||||
|
|
||||||
V.Copy(G)
|
|
||||||
V = V.clmul(u, r)
|
|
||||||
vx := V.GetX()
|
|
||||||
c.copy(vx)
|
|
||||||
c.Mod(r)
|
|
||||||
if c.IsZero() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
u.copy(Modmul(u, w, r))
|
|
||||||
u.Invmodp(r)
|
|
||||||
d.copy(Modmul(s, c, r))
|
|
||||||
d.copy(ModAdd(d, f, r))
|
|
||||||
d.copy(Modmul(d, w, r))
|
|
||||||
d.copy(Modmul(u, d, r))
|
|
||||||
}
|
|
||||||
|
|
||||||
c.ToBytes(T[:])
|
|
||||||
for i := 0; i < EGS; i++ {
|
|
||||||
C[i] = T[i]
|
|
||||||
}
|
|
||||||
d.ToBytes(T[:])
|
|
||||||
for i := 0; i < EGS; i++ {
|
|
||||||
D[i] = T[i]
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* IEEE1363 ECDSA Signature Verification. Signature C and D on F is verified using public key W */
|
|
||||||
func ECDH_ECPVP_DSA(sha int, W []byte, F []byte, C []byte, D []byte) int {
|
|
||||||
res := 0
|
|
||||||
|
|
||||||
B := ext.GPhashit(ext.MC_SHA2, sha, EGS, 0, F, -1, nil)
|
|
||||||
|
|
||||||
G := ECP_generator()
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
|
|
||||||
c := FromBytes(C)
|
|
||||||
d := FromBytes(D)
|
|
||||||
f := FromBytes(B[:])
|
|
||||||
|
|
||||||
if c.IsZero() || Comp(c, r) >= 0 || d.IsZero() || Comp(d, r) >= 0 {
|
|
||||||
res = ERROR
|
|
||||||
}
|
|
||||||
|
|
||||||
if res == 0 {
|
|
||||||
d.Invmodp(r)
|
|
||||||
f.copy(Modmul(f, d, r))
|
|
||||||
h2 := Modmul(c, d, r)
|
|
||||||
|
|
||||||
WP := ECP_fromBytes(W)
|
|
||||||
if WP.Is_infinity() {
|
|
||||||
res = ERROR
|
|
||||||
} else {
|
|
||||||
P := NewECP()
|
|
||||||
P.Copy(WP)
|
|
||||||
|
|
||||||
P = P.Mul2(h2, G, f)
|
|
||||||
|
|
||||||
if P.Is_infinity() {
|
|
||||||
res = ERROR
|
|
||||||
} else {
|
|
||||||
d = P.GetX()
|
|
||||||
d.Mod(r)
|
|
||||||
|
|
||||||
if Comp(d, c) != 0 {
|
|
||||||
res = ERROR
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
/* IEEE1363 ECIES encryption. Encryption of plaintext M uses public key W and produces ciphertext V,C,T */
|
|
||||||
func ECDH_ECIES_ENCRYPT(sha int, P1 []byte, P2 []byte, RNG *ext.RAND, W []byte, M []byte, V []byte, T []byte) []byte {
|
|
||||||
var Z [EFS]byte
|
|
||||||
var VZ [3*EFS + 1]byte
|
|
||||||
var K1 [AESKEY]byte
|
|
||||||
var K2 [AESKEY]byte
|
|
||||||
var U [EGS]byte
|
|
||||||
|
|
||||||
if ECDH_KEY_PAIR_GENERATE(RNG, U[:], V) != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ECDH_ECPSVDP_DH(U[:], W, Z[:], 0) != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 2*EFS+1; i++ {
|
|
||||||
VZ[i] = V[i]
|
|
||||||
}
|
|
||||||
for i := 0; i < EFS; i++ {
|
|
||||||
VZ[2*EFS+1+i] = Z[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
K := ext.KDF2(ext.MC_SHA2, sha, VZ[:], P1, 2*AESKEY)
|
|
||||||
|
|
||||||
for i := 0; i < AESKEY; i++ {
|
|
||||||
K1[i] = K[i]
|
|
||||||
K2[i] = K[AESKEY+i]
|
|
||||||
}
|
|
||||||
|
|
||||||
C := ext.AES_CBC_IV0_ENCRYPT(K1[:], M)
|
|
||||||
|
|
||||||
L2 := ext.InttoBytes(len(P2), 8)
|
|
||||||
|
|
||||||
var AC []byte
|
|
||||||
|
|
||||||
for i := 0; i < len(C); i++ {
|
|
||||||
AC = append(AC, C[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(P2); i++ {
|
|
||||||
AC = append(AC, P2[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
AC = append(AC, L2[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
ext.HMAC(ext.MC_SHA2, sha, T, len(T), K2[:], AC)
|
|
||||||
|
|
||||||
return C
|
|
||||||
}
|
|
||||||
|
|
||||||
/* constant time n-byte compare */
|
|
||||||
func ncomp(T1 []byte, T2 []byte, n int) bool {
|
|
||||||
res := 0
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
res |= int(T1[i] ^ T2[i])
|
|
||||||
}
|
|
||||||
if res == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
/* IEEE1363 ECIES decryption. Decryption of ciphertext V,C,T using private key U outputs plaintext M */
|
|
||||||
func ECDH_ECIES_DECRYPT(sha int, P1 []byte, P2 []byte, V []byte, C []byte, T []byte, U []byte) []byte {
|
|
||||||
var Z [EFS]byte
|
|
||||||
var VZ [3*EFS + 1]byte
|
|
||||||
var K1 [AESKEY]byte
|
|
||||||
var K2 [AESKEY]byte
|
|
||||||
|
|
||||||
var TAG []byte = T[:]
|
|
||||||
|
|
||||||
if ECDH_ECPSVDP_DH(U, V, Z[:], 0) != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 2*EFS+1; i++ {
|
|
||||||
VZ[i] = V[i]
|
|
||||||
}
|
|
||||||
for i := 0; i < EFS; i++ {
|
|
||||||
VZ[2*EFS+1+i] = Z[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
K := ext.KDF2(ext.MC_SHA2, sha, VZ[:], P1, 2*AESKEY)
|
|
||||||
|
|
||||||
for i := 0; i < AESKEY; i++ {
|
|
||||||
K1[i] = K[i]
|
|
||||||
K2[i] = K[AESKEY+i]
|
|
||||||
}
|
|
||||||
|
|
||||||
M := ext.AES_CBC_IV0_DECRYPT(K1[:], C)
|
|
||||||
|
|
||||||
if M == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
L2 := ext.InttoBytes(len(P2), 8)
|
|
||||||
|
|
||||||
var AC []byte
|
|
||||||
|
|
||||||
for i := 0; i < len(C); i++ {
|
|
||||||
AC = append(AC, C[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(P2); i++ {
|
|
||||||
AC = append(AC, P2[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
AC = append(AC, L2[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
ext.HMAC(ext.MC_SHA2, sha, TAG, len(TAG), K2[:], AC)
|
|
||||||
|
|
||||||
if !ncomp(T, TAG, len(T)) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return M
|
|
||||||
}
|
|
@ -22,7 +22,11 @@
|
|||||||
|
|
||||||
package bls48581
|
package bls48581
|
||||||
|
|
||||||
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
import (
|
||||||
|
"arena"
|
||||||
|
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
||||||
|
)
|
||||||
|
|
||||||
type FP struct {
|
type FP struct {
|
||||||
x *BIG
|
x *BIG
|
||||||
@ -31,84 +35,119 @@ type FP struct {
|
|||||||
|
|
||||||
/* Constructors */
|
/* Constructors */
|
||||||
|
|
||||||
func NewFP() *FP {
|
func NewFP(mem *arena.Arena) *FP {
|
||||||
F := new(FP)
|
if mem != nil {
|
||||||
F.x = NewBIG()
|
F := arena.New[FP](mem)
|
||||||
F.XES = 1
|
F.x = NewBIG(mem)
|
||||||
return F
|
F.XES = 1
|
||||||
}
|
return F
|
||||||
|
|
||||||
func NewFPint(a int) *FP {
|
|
||||||
F := new(FP)
|
|
||||||
if a < 0 {
|
|
||||||
m := NewBIGints(Modulus)
|
|
||||||
m.inc(a)
|
|
||||||
m.norm()
|
|
||||||
F.x = NewBIGcopy(m)
|
|
||||||
} else {
|
} else {
|
||||||
F.x = NewBIGint(a)
|
F := new(FP)
|
||||||
|
F.x = NewBIG(nil)
|
||||||
|
F.XES = 1
|
||||||
|
return F
|
||||||
}
|
}
|
||||||
F.nres()
|
|
||||||
return F
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFPbig(a *BIG) *FP {
|
func NewFPint(a int, mem *arena.Arena) *FP {
|
||||||
F := new(FP)
|
if mem != nil {
|
||||||
F.x = NewBIGcopy(a)
|
F := arena.New[FP](mem)
|
||||||
F.nres()
|
if a < 0 {
|
||||||
return F
|
m := NewBIGints(Modulus, mem)
|
||||||
|
m.inc(a)
|
||||||
|
m.norm()
|
||||||
|
F.x = NewBIGcopy(m, mem)
|
||||||
|
} else {
|
||||||
|
F.x = NewBIGint(a, mem)
|
||||||
|
}
|
||||||
|
F.nres(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP)
|
||||||
|
if a < 0 {
|
||||||
|
m := NewBIGints(Modulus, nil)
|
||||||
|
m.inc(a)
|
||||||
|
m.norm()
|
||||||
|
F.x = NewBIGcopy(m, nil)
|
||||||
|
} else {
|
||||||
|
F.x = NewBIGint(a, nil)
|
||||||
|
}
|
||||||
|
F.nres(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFPcopy(a *FP) *FP {
|
func NewFPbig(a *BIG, mem *arena.Arena) *FP {
|
||||||
F := new(FP)
|
if mem != nil {
|
||||||
F.x = NewBIGcopy(a.x)
|
F := arena.New[FP](mem)
|
||||||
F.XES = a.XES
|
F.x = NewBIGcopy(a, mem)
|
||||||
return F
|
F.nres(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP)
|
||||||
|
F.x = NewBIGcopy(a, nil)
|
||||||
|
F.nres(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFPcopy(a *FP, mem *arena.Arena) *FP {
|
||||||
|
if mem != nil {
|
||||||
|
F := arena.New[FP](mem)
|
||||||
|
F.x = NewBIGcopy(a.x, mem)
|
||||||
|
F.XES = a.XES
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP)
|
||||||
|
F.x = NewBIGcopy(a.x, nil)
|
||||||
|
F.XES = a.XES
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFPrand(rng *ext.RAND) *FP {
|
func NewFPrand(rng *ext.RAND) *FP {
|
||||||
m := NewBIGints(Modulus)
|
m := NewBIGints(Modulus, nil)
|
||||||
w := Randomnum(m, rng)
|
w := Randomnum(m, rng)
|
||||||
F := NewFPbig(w)
|
F := NewFPbig(w, nil)
|
||||||
return F
|
return F
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) ToString() string {
|
func (F *FP) ToString() string {
|
||||||
F.reduce()
|
F.reduce(nil)
|
||||||
return F.Redc().ToString()
|
return F.Redc(nil).ToString()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* convert to Montgomery n-residue form */
|
/* convert to Montgomery n-residue form */
|
||||||
func (F *FP) nres() {
|
func (F *FP) nres(mem *arena.Arena) {
|
||||||
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
|
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
|
||||||
r := NewBIGints(R2modp)
|
r := NewBIGints(R2modp, mem)
|
||||||
d := mul(F.x, r)
|
d := mul(F.x, r, mem)
|
||||||
F.x.copy(mod(d))
|
F.x.copy(mod(d, mem))
|
||||||
F.XES = 2
|
F.XES = 2
|
||||||
} else {
|
} else {
|
||||||
md := NewBIGints(Modulus)
|
md := NewBIGints(Modulus, mem)
|
||||||
F.x.Mod(md)
|
F.x.Mod(md, mem)
|
||||||
F.XES = 1
|
F.XES = 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* convert back to regular form */
|
/* convert back to regular form */
|
||||||
func (F *FP) Redc() *BIG {
|
func (F *FP) Redc(mem *arena.Arena) *BIG {
|
||||||
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
|
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
|
||||||
d := NewDBIGscopy(F.x)
|
d := NewDBIGscopy(F.x, mem)
|
||||||
return mod(d)
|
return mod(d, mem)
|
||||||
} else {
|
} else {
|
||||||
r := NewBIGcopy(F.x)
|
r := NewBIGcopy(F.x, mem)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reduce a DBIG to a BIG using the appropriate form of the modulus */
|
/* reduce a DBIG to a BIG using the appropriate form of the modulus */
|
||||||
|
|
||||||
func mod(d *DBIG) *BIG {
|
func mod(d *DBIG, mem *arena.Arena) *BIG {
|
||||||
if MODTYPE == PSEUDO_MERSENNE {
|
if MODTYPE == PSEUDO_MERSENNE {
|
||||||
t := d.split(MODBITS)
|
t := d.split(MODBITS, mem)
|
||||||
b := NewBIGdcopy(d)
|
b := NewBIGdcopy(d, mem)
|
||||||
|
|
||||||
v := t.pmul(int(MConst))
|
v := t.pmul(int(MConst))
|
||||||
|
|
||||||
@ -128,7 +167,7 @@ func mod(d *DBIG) *BIG {
|
|||||||
d.w[NLEN+i-1] = bot
|
d.w[NLEN+i-1] = bot
|
||||||
d.w[NLEN+i] += top
|
d.w[NLEN+i] += top
|
||||||
}
|
}
|
||||||
b := NewBIG()
|
b := NewBIG(mem)
|
||||||
|
|
||||||
for i := 0; i < NLEN; i++ {
|
for i := 0; i < NLEN; i++ {
|
||||||
b.w[i] = d.w[NLEN+i]
|
b.w[i] = d.w[NLEN+i]
|
||||||
@ -138,14 +177,14 @@ func mod(d *DBIG) *BIG {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if MODTYPE == GENERALISED_MERSENNE { // GoldiLocks only
|
if MODTYPE == GENERALISED_MERSENNE { // GoldiLocks only
|
||||||
t := d.split(MODBITS)
|
t := d.split(MODBITS, mem)
|
||||||
b := NewBIGdcopy(d)
|
b := NewBIGdcopy(d, mem)
|
||||||
b.Add(t)
|
b.Add(t)
|
||||||
dd := NewDBIGscopy(t)
|
dd := NewDBIGscopy(t, mem)
|
||||||
dd.shl(MODBITS / 2)
|
dd.shl(MODBITS / 2)
|
||||||
|
|
||||||
tt := dd.split(MODBITS)
|
tt := dd.split(MODBITS, mem)
|
||||||
lo := NewBIGdcopy(dd)
|
lo := NewBIGdcopy(dd, mem)
|
||||||
b.Add(tt)
|
b.Add(tt)
|
||||||
b.Add(lo)
|
b.Add(lo)
|
||||||
b.norm()
|
b.norm()
|
||||||
@ -163,10 +202,10 @@ func mod(d *DBIG) *BIG {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if MODTYPE == NOT_SPECIAL {
|
if MODTYPE == NOT_SPECIAL {
|
||||||
md := NewBIGints(Modulus)
|
md := NewBIGints(Modulus, mem)
|
||||||
return monty(md, MConst, d)
|
return monty(md, MConst, d, mem)
|
||||||
}
|
}
|
||||||
return NewBIG()
|
return NewBIG(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
// find appoximation to quotient of a/m
|
// find appoximation to quotient of a/m
|
||||||
@ -189,9 +228,9 @@ func quo(n *BIG, m *BIG) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* reduce this mod Modulus */
|
/* reduce this mod Modulus */
|
||||||
func (F *FP) reduce() {
|
func (F *FP) reduce(mem *arena.Arena) {
|
||||||
m := NewBIGints(Modulus)
|
m := NewBIGints(Modulus, mem)
|
||||||
r := NewBIGints(Modulus)
|
r := NewBIGints(Modulus, mem)
|
||||||
var sb uint
|
var sb uint
|
||||||
F.x.norm()
|
F.x.norm()
|
||||||
|
|
||||||
@ -217,43 +256,49 @@ func (F *FP) reduce() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* test this=0? */
|
/* test this=0? */
|
||||||
func (F *FP) IsZero() bool {
|
func (F *FP) IsZero(mem *arena.Arena) bool {
|
||||||
W := NewFPcopy(F)
|
W := NewFPcopy(F, mem)
|
||||||
W.reduce()
|
W.reduce(mem)
|
||||||
return W.x.IsZero()
|
return W.x.IsZero()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) IsOne() bool {
|
func (F *FP) IsOne() bool {
|
||||||
W := NewFPcopy(F)
|
mem := arena.NewArena()
|
||||||
W.reduce()
|
defer mem.Free()
|
||||||
T := NewFPint(1)
|
W := NewFPcopy(F, mem)
|
||||||
|
W.reduce(mem)
|
||||||
|
T := NewFPint(1, mem)
|
||||||
return W.Equals(T)
|
return W.Equals(T)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) islarger() int {
|
func (F *FP) islarger() int {
|
||||||
if F.IsZero() {
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
if F.IsZero(mem) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
sx := NewBIGints(Modulus)
|
sx := NewBIGints(Modulus, mem)
|
||||||
fx := F.Redc()
|
fx := F.Redc(mem)
|
||||||
sx.Sub(fx)
|
sx.Sub(fx)
|
||||||
sx.norm()
|
sx.norm()
|
||||||
return Comp(fx, sx)
|
return Comp(fx, sx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) ToBytes(b []byte) {
|
func (F *FP) ToBytes(b []byte) {
|
||||||
F.Redc().ToBytes(b)
|
F.Redc(nil).ToBytes(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func FP_fromBytes(b []byte) *FP {
|
func FP_fromBytes(b []byte) *FP {
|
||||||
t := FromBytes(b)
|
t := FromBytes(b)
|
||||||
return NewFPbig(t)
|
return NewFPbig(t, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) isunity() bool {
|
func (F *FP) isunity() bool {
|
||||||
W := NewFPcopy(F)
|
mem := arena.NewArena()
|
||||||
W.reduce()
|
defer mem.Free()
|
||||||
return W.Redc().isunity()
|
W := NewFPcopy(F, mem)
|
||||||
|
W.reduce(mem)
|
||||||
|
return W.Redc(mem).isunity()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy from FP b */
|
/* copy from FP b */
|
||||||
@ -270,25 +315,27 @@ func (F *FP) zero() {
|
|||||||
|
|
||||||
/* set this=1 */
|
/* set this=1 */
|
||||||
func (F *FP) one() {
|
func (F *FP) one() {
|
||||||
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
F.x.one()
|
F.x.one()
|
||||||
F.nres()
|
F.nres(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return sign */
|
/* return sign */
|
||||||
func (F *FP) sign() int {
|
func (F *FP) sign(mem *arena.Arena) int {
|
||||||
if BIG_ENDIAN_SIGN {
|
if BIG_ENDIAN_SIGN {
|
||||||
m := NewBIGints(Modulus)
|
m := NewBIGints(Modulus, mem)
|
||||||
m.dec(1)
|
m.dec(1)
|
||||||
m.fshr(1)
|
m.fshr(1)
|
||||||
n := NewFPcopy(F)
|
n := NewFPcopy(F, mem)
|
||||||
n.reduce()
|
n.reduce(mem)
|
||||||
w := n.Redc()
|
w := n.Redc(mem)
|
||||||
cp := Comp(w, m)
|
cp := Comp(w, m)
|
||||||
return ((cp + 1) & 2) >> 1
|
return ((cp + 1) & 2) >> 1
|
||||||
} else {
|
} else {
|
||||||
W := NewFPcopy(F)
|
W := NewFPcopy(F, mem)
|
||||||
W.reduce()
|
W.reduce(mem)
|
||||||
return W.Redc().parity()
|
return W.Redc(mem).parity()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,20 +362,20 @@ func (F *FP) cmove(b *FP, d int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* this*=b mod Modulus */
|
/* this*=b mod Modulus */
|
||||||
func (F *FP) Mul(b *FP) {
|
func (F *FP) Mul(b *FP, mem *arena.Arena) {
|
||||||
|
|
||||||
if int64(F.XES)*int64(b.XES) > int64(FEXCESS) {
|
if int64(F.XES)*int64(b.XES) > int64(FEXCESS) {
|
||||||
F.reduce()
|
F.reduce(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
d := mul(F.x, b.x)
|
d := mul(F.x, b.x, mem)
|
||||||
F.x.copy(mod(d))
|
F.x.copy(mod(d, mem))
|
||||||
F.XES = 2
|
F.XES = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this = -this mod Modulus */
|
/* this = -this mod Modulus */
|
||||||
func (F *FP) Neg() {
|
func (F *FP) Neg(mem *arena.Arena) {
|
||||||
m := NewBIGints(Modulus)
|
m := NewBIGints(Modulus, mem)
|
||||||
sb := logb2(uint32(F.XES - 1))
|
sb := logb2(uint32(F.XES - 1))
|
||||||
|
|
||||||
m.fshl(sb)
|
m.fshl(sb)
|
||||||
@ -336,12 +383,12 @@ func (F *FP) Neg() {
|
|||||||
|
|
||||||
F.XES = (1 << sb) + 1
|
F.XES = (1 << sb) + 1
|
||||||
if F.XES > FEXCESS {
|
if F.XES > FEXCESS {
|
||||||
F.reduce()
|
F.reduce(mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=c mod Modulus, where c is a small int */
|
/* this*=c mod Modulus, where c is a small int */
|
||||||
func (F *FP) imul(c int) {
|
func (F *FP) imul(c int, mem *arena.Arena) {
|
||||||
// F.norm()
|
// F.norm()
|
||||||
s := false
|
s := false
|
||||||
if c < 0 {
|
if c < 0 {
|
||||||
@ -350,60 +397,60 @@ func (F *FP) imul(c int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
|
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
|
||||||
d := F.x.pxmul(c)
|
d := F.x.pxmul(c, mem)
|
||||||
F.x.copy(mod(d))
|
F.x.copy(mod(d, mem))
|
||||||
F.XES = 2
|
F.XES = 2
|
||||||
} else {
|
} else {
|
||||||
if F.XES*int32(c) <= FEXCESS {
|
if F.XES*int32(c) <= FEXCESS {
|
||||||
F.x.pmul(c)
|
F.x.pmul(c)
|
||||||
F.XES *= int32(c)
|
F.XES *= int32(c)
|
||||||
} else {
|
} else {
|
||||||
n := NewFPint(c)
|
n := NewFPint(c, mem)
|
||||||
F.Mul(n)
|
F.Mul(n, mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if s {
|
if s {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=this mod Modulus */
|
/* this*=this mod Modulus */
|
||||||
func (F *FP) Sqr() {
|
func (F *FP) Sqr(mem *arena.Arena) {
|
||||||
if int64(F.XES)*int64(F.XES) > int64(FEXCESS) {
|
if int64(F.XES)*int64(F.XES) > int64(FEXCESS) {
|
||||||
F.reduce()
|
F.reduce(mem)
|
||||||
}
|
}
|
||||||
d := sqr(F.x)
|
d := sqr(F.x, mem)
|
||||||
F.x.copy(mod(d))
|
F.x.copy(mod(d, mem))
|
||||||
F.XES = 2
|
F.XES = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this+=b */
|
/* this+=b */
|
||||||
func (F *FP) Add(b *FP) {
|
func (F *FP) Add(b *FP, mem *arena.Arena) {
|
||||||
F.x.Add(b.x)
|
F.x.Add(b.x)
|
||||||
F.XES += b.XES
|
F.XES += b.XES
|
||||||
if F.XES > FEXCESS {
|
if F.XES > FEXCESS {
|
||||||
F.reduce()
|
F.reduce(mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=b */
|
/* this-=b */
|
||||||
func (F *FP) Sub(b *FP) {
|
func (F *FP) Sub(b *FP, mem *arena.Arena) {
|
||||||
n := NewFPcopy(b)
|
n := NewFPcopy(b, mem)
|
||||||
n.Neg()
|
n.Neg(mem)
|
||||||
F.Add(n)
|
F.Add(n, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) rsub(b *FP) {
|
func (F *FP) rsub(b *FP, mem *arena.Arena) {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.Add(b)
|
F.Add(b, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this/=2 mod Modulus */
|
/* this/=2 mod Modulus */
|
||||||
func (F *FP) div2() {
|
func (F *FP) div2(mem *arena.Arena) {
|
||||||
p := NewBIGints(Modulus)
|
p := NewBIGints(Modulus, mem)
|
||||||
pr := F.x.parity()
|
pr := F.x.parity()
|
||||||
w := NewBIGcopy(F.x)
|
w := NewBIGcopy(F.x, mem)
|
||||||
F.x.fshr(1)
|
F.x.fshr(1)
|
||||||
w.Add(p)
|
w.Add(p)
|
||||||
w.norm()
|
w.norm()
|
||||||
@ -413,18 +460,22 @@ func (F *FP) div2() {
|
|||||||
|
|
||||||
/* return jacobi symbol (this/Modulus) */
|
/* return jacobi symbol (this/Modulus) */
|
||||||
func (F *FP) jacobi() int {
|
func (F *FP) jacobi() int {
|
||||||
w := F.Redc()
|
mem := arena.NewArena()
|
||||||
p := NewBIGints(Modulus)
|
defer mem.Free()
|
||||||
|
w := F.Redc(mem)
|
||||||
|
p := NewBIGints(Modulus, mem)
|
||||||
return w.Jacobi(p)
|
return w.Jacobi(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return TRUE if this==a */
|
/* return TRUE if this==a */
|
||||||
func (F *FP) Equals(a *FP) bool {
|
func (F *FP) Equals(a *FP) bool {
|
||||||
f := NewFPcopy(F)
|
mem := arena.NewArena()
|
||||||
s := NewFPcopy(a)
|
defer mem.Free()
|
||||||
|
f := NewFPcopy(F, mem)
|
||||||
|
s := NewFPcopy(a, mem)
|
||||||
|
|
||||||
s.reduce()
|
s.reduce(mem)
|
||||||
f.reduce()
|
f.reduce(mem)
|
||||||
if Comp(s.x, f.x) == 0 {
|
if Comp(s.x, f.x) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -432,20 +483,22 @@ func (F *FP) Equals(a *FP) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) Comp(a *FP) int {
|
func (F *FP) Comp(a *FP) int {
|
||||||
f := NewFPcopy(F)
|
mem := arena.NewArena()
|
||||||
s := NewFPcopy(a)
|
defer mem.Free()
|
||||||
|
f := NewFPcopy(F, mem)
|
||||||
|
s := NewFPcopy(a, mem)
|
||||||
|
|
||||||
s.reduce()
|
s.reduce(mem)
|
||||||
f.reduce()
|
f.reduce(mem)
|
||||||
|
|
||||||
return Comp(s.x, f.x)
|
return Comp(s.x, f.x)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) pow(e *BIG) *FP {
|
func (F *FP) pow(e *BIG, mem *arena.Arena) *FP {
|
||||||
var tb []*FP
|
var tb []*FP
|
||||||
var w [1 + (NLEN*int(BASEBITS)+3)/4]int8
|
var w [1 + (NLEN*int(BASEBITS)+3)/4]int8
|
||||||
F.norm()
|
F.norm()
|
||||||
t := NewBIGcopy(e)
|
t := NewBIGcopy(e, mem)
|
||||||
t.norm()
|
t.norm()
|
||||||
nb := 1 + (t.nbits()+3)/4
|
nb := 1 + (t.nbits()+3)/4
|
||||||
|
|
||||||
@ -456,51 +509,51 @@ func (F *FP) pow(e *BIG) *FP {
|
|||||||
w[i] = int8(lsbs)
|
w[i] = int8(lsbs)
|
||||||
t.fshr(4)
|
t.fshr(4)
|
||||||
}
|
}
|
||||||
tb = append(tb, NewFPint(1))
|
tb = append(tb, NewFPint(1, mem))
|
||||||
tb = append(tb, NewFPcopy(F))
|
tb = append(tb, NewFPcopy(F, mem))
|
||||||
for i := 2; i < 16; i++ {
|
for i := 2; i < 16; i++ {
|
||||||
tb = append(tb, NewFPcopy(tb[i-1]))
|
tb = append(tb, NewFPcopy(tb[i-1], mem))
|
||||||
tb[i].Mul(F)
|
tb[i].Mul(F, mem)
|
||||||
}
|
}
|
||||||
r := NewFPcopy(tb[w[nb-1]])
|
r := NewFPcopy(tb[w[nb-1]], mem)
|
||||||
for i := nb - 2; i >= 0; i-- {
|
for i := nb - 2; i >= 0; i-- {
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
r.Mul(tb[w[i]])
|
r.Mul(tb[w[i]], mem)
|
||||||
}
|
}
|
||||||
r.reduce()
|
r.reduce(mem)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// See https://eprint.iacr.org/2018/1038
|
// See https://eprint.iacr.org/2018/1038
|
||||||
// return this^(p-3)/4 or this^(p-5)/8
|
// return this^(p-3)/4 or this^(p-5)/8
|
||||||
func (F *FP) fpow() *FP {
|
func (F *FP) fpow(mem *arena.Arena) *FP {
|
||||||
ac := [11]int{1, 2, 3, 6, 12, 15, 30, 60, 120, 240, 255}
|
ac := [11]int{1, 2, 3, 6, 12, 15, 30, 60, 120, 240, 255}
|
||||||
var xp []*FP
|
xp := arena.MakeSlice[*FP](mem, 11, 11)
|
||||||
// phase 1
|
// phase 1
|
||||||
xp = append(xp, NewFPcopy(F))
|
xp[0] = NewFPcopy(F, mem)
|
||||||
xp = append(xp, NewFPcopy(F))
|
xp[1] = NewFPcopy(F, mem)
|
||||||
xp[1].Sqr()
|
xp[1].Sqr(mem)
|
||||||
xp = append(xp, NewFPcopy(xp[1]))
|
xp[2] = NewFPcopy(xp[1], mem)
|
||||||
xp[2].Mul(F)
|
xp[2].Mul(F, mem)
|
||||||
xp = append(xp, NewFPcopy(xp[2]))
|
xp[3] = NewFPcopy(xp[2], mem)
|
||||||
xp[3].Sqr()
|
xp[3].Sqr(mem)
|
||||||
xp = append(xp, NewFPcopy(xp[3]))
|
xp[4] = NewFPcopy(xp[3], mem)
|
||||||
xp[4].Sqr()
|
xp[4].Sqr(mem)
|
||||||
xp = append(xp, NewFPcopy(xp[4]))
|
xp[5] = NewFPcopy(xp[4], mem)
|
||||||
xp[5].Mul(xp[2])
|
xp[5].Mul(xp[2], mem)
|
||||||
xp = append(xp, NewFPcopy(xp[5]))
|
xp[6] = NewFPcopy(xp[5], mem)
|
||||||
xp[6].Sqr()
|
xp[6].Sqr(mem)
|
||||||
xp = append(xp, NewFPcopy(xp[6]))
|
xp[7] = NewFPcopy(xp[6], mem)
|
||||||
xp[7].Sqr()
|
xp[7].Sqr(mem)
|
||||||
xp = append(xp, NewFPcopy(xp[7]))
|
xp[8] = NewFPcopy(xp[7], mem)
|
||||||
xp[8].Sqr()
|
xp[8].Sqr(mem)
|
||||||
xp = append(xp, NewFPcopy(xp[8]))
|
xp[9] = NewFPcopy(xp[8], mem)
|
||||||
xp[9].Sqr()
|
xp[9].Sqr(mem)
|
||||||
xp = append(xp, NewFPcopy(xp[9]))
|
xp[10] = NewFPcopy(xp[9], mem)
|
||||||
xp[10].Mul(xp[5])
|
xp[10].Mul(xp[5], mem)
|
||||||
var n, c int
|
var n, c int
|
||||||
|
|
||||||
e := int(PM1D2)
|
e := int(PM1D2)
|
||||||
@ -529,7 +582,7 @@ func (F *FP) fpow() *FP {
|
|||||||
k := w - c
|
k := w - c
|
||||||
|
|
||||||
i := 10
|
i := 10
|
||||||
key := NewFP()
|
key := NewFP(mem)
|
||||||
|
|
||||||
if k != 0 {
|
if k != 0 {
|
||||||
for ac[i] > k {
|
for ac[i] > k {
|
||||||
@ -544,7 +597,7 @@ func (F *FP) fpow() *FP {
|
|||||||
if ac[i] > k {
|
if ac[i] > k {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
key.Mul(xp[i])
|
key.Mul(xp[i], mem)
|
||||||
k -= ac[i]
|
k -= ac[i]
|
||||||
}
|
}
|
||||||
// phase 2
|
// phase 2
|
||||||
@ -555,19 +608,19 @@ func (F *FP) fpow() *FP {
|
|||||||
j := 3
|
j := 3
|
||||||
m := 8
|
m := 8
|
||||||
nw := n - bw
|
nw := n - bw
|
||||||
t := NewFP()
|
t := NewFP(mem)
|
||||||
for 2*m < nw {
|
for 2*m < nw {
|
||||||
t.copy(xp[j])
|
t.copy(xp[j])
|
||||||
j++
|
j++
|
||||||
for i = 0; i < m; i++ {
|
for i = 0; i < m; i++ {
|
||||||
t.Sqr()
|
t.Sqr(mem)
|
||||||
}
|
}
|
||||||
xp[j].copy(xp[j-1])
|
xp[j].copy(xp[j-1])
|
||||||
xp[j].Mul(t)
|
xp[j].Mul(t, mem)
|
||||||
m *= 2
|
m *= 2
|
||||||
}
|
}
|
||||||
lo := nw - m
|
lo := nw - m
|
||||||
r := NewFPcopy(xp[j])
|
r := NewFPcopy(xp[j], mem)
|
||||||
|
|
||||||
for lo != 0 {
|
for lo != 0 {
|
||||||
m /= 2
|
m /= 2
|
||||||
@ -578,84 +631,86 @@ func (F *FP) fpow() *FP {
|
|||||||
lo -= m
|
lo -= m
|
||||||
t.copy(r)
|
t.copy(r)
|
||||||
for i = 0; i < m; i++ {
|
for i = 0; i < m; i++ {
|
||||||
t.Sqr()
|
t.Sqr(mem)
|
||||||
}
|
}
|
||||||
r.copy(t)
|
r.copy(t)
|
||||||
r.Mul(xp[j])
|
r.Mul(xp[j], mem)
|
||||||
}
|
}
|
||||||
// phase 3
|
// phase 3
|
||||||
if bw != 0 {
|
if bw != 0 {
|
||||||
for i = 0; i < bw; i++ {
|
for i = 0; i < bw; i++ {
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
}
|
}
|
||||||
r.Mul(key)
|
r.Mul(key, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
if MODTYPE == GENERALISED_MERSENNE { // Goldilocks ONLY
|
if MODTYPE == GENERALISED_MERSENNE { // Goldilocks ONLY
|
||||||
key.copy(r)
|
key.copy(r)
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
r.Mul(F)
|
r.Mul(F, mem)
|
||||||
for i = 0; i < n+1; i++ {
|
for i = 0; i < n+1; i++ {
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
}
|
}
|
||||||
r.Mul(key)
|
r.Mul(key, mem)
|
||||||
}
|
}
|
||||||
for nd > 0 {
|
for nd > 0 {
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
nd--
|
nd--
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculates r=x^(p-1-2^e)/2^{e+1) where 2^e|p-1
|
// calculates r=x^(p-1-2^e)/2^{e+1) where 2^e|p-1
|
||||||
func (F *FP) progen() {
|
func (F *FP) progen(mem *arena.Arena) {
|
||||||
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
|
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
|
||||||
F.copy(F.fpow())
|
F.copy(F.fpow(mem))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
e := uint(PM1D2)
|
e := uint(PM1D2)
|
||||||
m := NewBIGints(Modulus)
|
m := NewBIGints(Modulus, mem)
|
||||||
m.dec(1)
|
m.dec(1)
|
||||||
m.shr(e)
|
m.shr(e)
|
||||||
m.dec(1)
|
m.dec(1)
|
||||||
m.fshr(1)
|
m.fshr(1)
|
||||||
F.copy(F.pow(m))
|
F.copy(F.pow(m, mem))
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=1/this mod Modulus */
|
/* this=1/this mod Modulus */
|
||||||
func (F *FP) Invert(h *FP) {
|
func (F *FP) Invert(h *FP, mem *arena.Arena) {
|
||||||
e := int(PM1D2)
|
e := int(PM1D2)
|
||||||
F.norm()
|
F.norm()
|
||||||
s := NewFPcopy(F)
|
s := NewFPcopy(F, mem)
|
||||||
for i := 0; i < e-1; i++ {
|
for i := 0; i < e-1; i++ {
|
||||||
s.Sqr()
|
s.Sqr(mem)
|
||||||
s.Mul(F)
|
s.Mul(F, mem)
|
||||||
}
|
}
|
||||||
if h == nil {
|
if h == nil {
|
||||||
F.progen()
|
F.progen(mem)
|
||||||
} else {
|
} else {
|
||||||
F.copy(h)
|
F.copy(h)
|
||||||
}
|
}
|
||||||
for i := 0; i <= e; i++ {
|
for i := 0; i <= e; i++ {
|
||||||
F.Sqr()
|
F.Sqr(mem)
|
||||||
}
|
}
|
||||||
F.Mul(s)
|
F.Mul(s, mem)
|
||||||
F.reduce()
|
F.reduce(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* test for Quadratic residue */
|
/* test for Quadratic residue */
|
||||||
func (F *FP) qr(h *FP) int {
|
func (F *FP) qr(h *FP) int {
|
||||||
r := NewFPcopy(F)
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
r := NewFPcopy(F, mem)
|
||||||
e := int(PM1D2)
|
e := int(PM1D2)
|
||||||
r.progen()
|
r.progen(mem)
|
||||||
if h != nil {
|
if h != nil {
|
||||||
h.copy(r)
|
h.copy(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
r.Mul(F)
|
r.Mul(F, mem)
|
||||||
for i := 0; i < e-1; i++ {
|
for i := 0; i < e-1; i++ {
|
||||||
r.Sqr()
|
r.Sqr(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.isunity() {
|
if r.isunity() {
|
||||||
@ -666,29 +721,29 @@ func (F *FP) qr(h *FP) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* return sqrt(this) mod Modulus */
|
/* return sqrt(this) mod Modulus */
|
||||||
func (F *FP) Sqrt(h *FP) *FP {
|
func (F *FP) Sqrt(h *FP, mem *arena.Arena) *FP {
|
||||||
e := int(PM1D2)
|
e := int(PM1D2)
|
||||||
g := NewFPcopy(F)
|
g := NewFPcopy(F, mem)
|
||||||
if h == nil {
|
if h == nil {
|
||||||
g.progen()
|
g.progen(mem)
|
||||||
} else {
|
} else {
|
||||||
g.copy(h)
|
g.copy(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
m := NewBIGints(ROI)
|
m := NewBIGints(ROI, mem)
|
||||||
v := NewFPbig(m)
|
v := NewFPbig(m, mem)
|
||||||
|
|
||||||
t := NewFPcopy(g)
|
t := NewFPcopy(g, mem)
|
||||||
t.Sqr()
|
t.Sqr(mem)
|
||||||
t.Mul(F)
|
t.Mul(F, mem)
|
||||||
|
|
||||||
r := NewFPcopy(F)
|
r := NewFPcopy(F, mem)
|
||||||
r.Mul(g)
|
r.Mul(g, mem)
|
||||||
b := NewFPcopy(t)
|
b := NewFPcopy(t, mem)
|
||||||
|
|
||||||
for k := e; k > 1; k-- {
|
for k := e; k > 1; k-- {
|
||||||
for j := 1; j < k-1; j++ {
|
for j := 1; j < k-1; j++ {
|
||||||
b.Sqr()
|
b.Sqr(mem)
|
||||||
}
|
}
|
||||||
var u int
|
var u int
|
||||||
if b.isunity() {
|
if b.isunity() {
|
||||||
@ -697,41 +752,43 @@ func (F *FP) Sqrt(h *FP) *FP {
|
|||||||
u = 1
|
u = 1
|
||||||
}
|
}
|
||||||
g.copy(r)
|
g.copy(r)
|
||||||
g.Mul(v)
|
g.Mul(v, mem)
|
||||||
r.cmove(g, u)
|
r.cmove(g, u)
|
||||||
v.Sqr()
|
v.Sqr(mem)
|
||||||
g.copy(t)
|
g.copy(t)
|
||||||
g.Mul(v)
|
g.Mul(v, mem)
|
||||||
t.cmove(g, u)
|
t.cmove(g, u)
|
||||||
b.copy(t)
|
b.copy(t)
|
||||||
}
|
}
|
||||||
sgn := r.sign()
|
sgn := r.sign(mem)
|
||||||
nr := NewFPcopy(r)
|
nr := NewFPcopy(r, mem)
|
||||||
nr.Neg()
|
nr.Neg(mem)
|
||||||
nr.norm()
|
nr.norm()
|
||||||
r.cmove(nr, sgn)
|
r.cmove(nr, sgn)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP) invsqrt(i *FP, s *FP) int {
|
func (F *FP) invsqrt(i *FP, s *FP) int {
|
||||||
h := NewFP()
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
h := NewFP(mem)
|
||||||
qr := F.qr(h)
|
qr := F.qr(h)
|
||||||
s.copy(F.Sqrt(h))
|
s.copy(F.Sqrt(h, mem))
|
||||||
i.copy(F)
|
i.copy(F)
|
||||||
i.Invert(h)
|
i.Invert(h, mem)
|
||||||
return qr
|
return qr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Two for the price of one - See Hamburg https://eprint.iacr.org/2012/309.pdf
|
// Two for the price of one - See Hamburg https://eprint.iacr.org/2012/309.pdf
|
||||||
// Calculate Invert of i and square root of s, return QR
|
// Calculate Invert of i and square root of s, return QR
|
||||||
func FP_tpo(i *FP, s *FP) int {
|
func FP_tpo(i *FP, s *FP) int {
|
||||||
w := NewFPcopy(s)
|
w := NewFPcopy(s, nil)
|
||||||
t := NewFPcopy(i)
|
t := NewFPcopy(i, nil)
|
||||||
w.Mul(i)
|
w.Mul(i, nil)
|
||||||
t.Mul(w)
|
t.Mul(w, nil)
|
||||||
qr := t.invsqrt(i, s)
|
qr := t.invsqrt(i, s)
|
||||||
i.Mul(w)
|
i.Mul(w, nil)
|
||||||
s.Mul(i)
|
s.Mul(i, nil)
|
||||||
return qr
|
return qr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
|
|
||||||
package bls48581
|
package bls48581
|
||||||
|
|
||||||
|
import "arena"
|
||||||
|
|
||||||
//import "fmt"
|
//import "fmt"
|
||||||
|
|
||||||
type FP16 struct {
|
type FP16 struct {
|
||||||
@ -30,46 +32,81 @@ type FP16 struct {
|
|||||||
b *FP8
|
b *FP8
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP16() *FP16 {
|
func NewFP16(mem *arena.Arena) *FP16 {
|
||||||
F := new(FP16)
|
if mem != nil {
|
||||||
F.a = NewFP8()
|
F := arena.New[FP16](mem)
|
||||||
F.b = NewFP8()
|
F.a = NewFP8(mem)
|
||||||
return F
|
F.b = NewFP8(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP16)
|
||||||
|
F.a = NewFP8(nil)
|
||||||
|
F.b = NewFP8(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Constructors */
|
/* Constructors */
|
||||||
func NewFP16int(a int) *FP16 {
|
func NewFP16int(a int, mem *arena.Arena) *FP16 {
|
||||||
F := new(FP16)
|
if mem != nil {
|
||||||
F.a = NewFP8int(a)
|
F := arena.New[FP16](mem)
|
||||||
F.b = NewFP8()
|
F.a = NewFP8int(a, mem)
|
||||||
return F
|
F.b = NewFP8(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP16)
|
||||||
|
F.a = NewFP8int(a, nil)
|
||||||
|
F.b = NewFP8(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP16copy(x *FP16) *FP16 {
|
func NewFP16copy(x *FP16, mem *arena.Arena) *FP16 {
|
||||||
F := new(FP16)
|
if mem != nil {
|
||||||
F.a = NewFP8copy(x.a)
|
F := arena.New[FP16](mem)
|
||||||
F.b = NewFP8copy(x.b)
|
F.a = NewFP8copy(x.a, mem)
|
||||||
return F
|
F.b = NewFP8copy(x.b, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP16)
|
||||||
|
F.a = NewFP8copy(x.a, nil)
|
||||||
|
F.b = NewFP8copy(x.b, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP16fp8s(c *FP8, d *FP8) *FP16 {
|
func NewFP16fp8s(c *FP8, d *FP8, mem *arena.Arena) *FP16 {
|
||||||
F := new(FP16)
|
if mem != nil {
|
||||||
F.a = NewFP8copy(c)
|
F := arena.New[FP16](mem)
|
||||||
F.b = NewFP8copy(d)
|
F.a = c
|
||||||
return F
|
F.b = d
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP16)
|
||||||
|
F.a = c
|
||||||
|
F.b = d
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP16fp8(c *FP8) *FP16 {
|
func NewFP16fp8(c *FP8, mem *arena.Arena) *FP16 {
|
||||||
F := new(FP16)
|
if mem != nil {
|
||||||
F.a = NewFP8copy(c)
|
F := arena.New[FP16](mem)
|
||||||
F.b = NewFP8()
|
F.a = c
|
||||||
return F
|
F.b = NewFP8(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP16)
|
||||||
|
F.a = c
|
||||||
|
F.b = NewFP8(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reduce all components of this mod Modulus */
|
/* reduce all components of this mod Modulus */
|
||||||
func (F *FP16) reduce() {
|
func (F *FP16) reduce(mem *arena.Arena) {
|
||||||
F.a.reduce()
|
F.a.reduce(mem)
|
||||||
F.b.reduce()
|
F.b.reduce(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* normalise all components of this mod Modulus */
|
/* normalise all components of this mod Modulus */
|
||||||
@ -79,8 +116,8 @@ func (F *FP16) norm() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* test this==0 ? */
|
/* test this==0 ? */
|
||||||
func (F *FP16) IsZero() bool {
|
func (F *FP16) IsZero(mem *arena.Arena) bool {
|
||||||
return F.a.IsZero() && F.b.IsZero()
|
return F.a.IsZero(mem) && F.b.IsZero(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP16) ToBytes(bf []byte) {
|
func (F *FP16) ToBytes(bf []byte) {
|
||||||
@ -107,7 +144,7 @@ func FP16_fromBytes(bf []byte) *FP16 {
|
|||||||
t[i] = bf[i+MB]
|
t[i] = bf[i+MB]
|
||||||
}
|
}
|
||||||
ta := FP8_fromBytes(t[:])
|
ta := FP8_fromBytes(t[:])
|
||||||
return NewFP16fp8s(ta, tb)
|
return NewFP16fp8s(ta, tb, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Conditional move */
|
/* Conditional move */
|
||||||
@ -118,13 +155,15 @@ func (F *FP16) cmove(g *FP16, d int) {
|
|||||||
|
|
||||||
/* test this==1 ? */
|
/* test this==1 ? */
|
||||||
func (F *FP16) isunity() bool {
|
func (F *FP16) isunity() bool {
|
||||||
one := NewFP8int(1)
|
mem := arena.NewArena()
|
||||||
return F.a.Equals(one) && F.b.IsZero()
|
defer mem.Free()
|
||||||
|
one := NewFP8int(1, mem)
|
||||||
|
return F.a.Equals(one) && F.b.IsZero(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* test is w real? That is in a+ib test b is zero */
|
/* test is w real? That is in a+ib test b is zero */
|
||||||
func (F *FP16) isreal() bool {
|
func (F *FP16) isreal() bool {
|
||||||
return F.b.IsZero()
|
return F.b.IsZero(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* extract real part a */
|
/* extract real part a */
|
||||||
@ -165,137 +204,137 @@ func (F *FP16) one() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* set this=-this */
|
/* set this=-this */
|
||||||
func (F *FP16) Neg() {
|
func (F *FP16) Neg(mem *arena.Arena) {
|
||||||
F.norm()
|
F.norm()
|
||||||
m := NewFP8copy(F.a)
|
m := NewFP8copy(F.a, mem)
|
||||||
t := NewFP8()
|
t := NewFP8(mem)
|
||||||
m.Add(F.b)
|
m.Add(F.b, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
t.copy(m)
|
t.copy(m)
|
||||||
t.Add(F.b)
|
t.Add(F.b, mem)
|
||||||
F.b.copy(m)
|
F.b.copy(m)
|
||||||
F.b.Add(F.a)
|
F.b.Add(F.a, mem)
|
||||||
F.a.copy(t)
|
F.a.copy(t)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=conjugate(this) */
|
/* this=conjugate(this) */
|
||||||
func (F *FP16) conj() {
|
func (F *FP16) conj(mem *arena.Arena) {
|
||||||
F.b.Neg()
|
F.b.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=-conjugate(this) */
|
/* this=-conjugate(this) */
|
||||||
func (F *FP16) nconj() {
|
func (F *FP16) nconj(mem *arena.Arena) {
|
||||||
F.a.Neg()
|
F.a.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this+=x */
|
/* this+=x */
|
||||||
func (F *FP16) Add(x *FP16) {
|
func (F *FP16) Add(x *FP16, mem *arena.Arena) {
|
||||||
F.a.Add(x.a)
|
F.a.Add(x.a, mem)
|
||||||
F.b.Add(x.b)
|
F.b.Add(x.b, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=x */
|
/* this-=x */
|
||||||
func (F *FP16) Sub(x *FP16) {
|
func (F *FP16) Sub(x *FP16, mem *arena.Arena) {
|
||||||
m := NewFP16copy(x)
|
m := NewFP16copy(x, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
F.Add(m)
|
F.Add(m, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=x */
|
/* this-=x */
|
||||||
func (F *FP16) rsub(x *FP16) {
|
func (F *FP16) rsub(x *FP16, mem *arena.Arena) {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.Add(x)
|
F.Add(x, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP8 */
|
/* this*=s where s is FP8 */
|
||||||
func (F *FP16) pmul(s *FP8) {
|
func (F *FP16) pmul(s *FP8, mem *arena.Arena) {
|
||||||
F.a.Mul(s)
|
F.a.Mul(s, mem)
|
||||||
F.b.Mul(s)
|
F.b.Mul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP2 */
|
/* this*=s where s is FP2 */
|
||||||
func (F *FP16) qmul(s *FP2) {
|
func (F *FP16) qmul(s *FP2, mem *arena.Arena) {
|
||||||
F.a.qmul(s)
|
F.a.qmul(s, mem)
|
||||||
F.b.qmul(s)
|
F.b.qmul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP */
|
/* this*=s where s is FP */
|
||||||
func (F *FP16) tmul(s *FP) {
|
func (F *FP16) tmul(s *FP, mem *arena.Arena) {
|
||||||
F.a.tmul(s)
|
F.a.tmul(s, mem)
|
||||||
F.b.tmul(s)
|
F.b.tmul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=c where c is int */
|
/* this*=c where c is int */
|
||||||
func (F *FP16) imul(c int) {
|
func (F *FP16) imul(c int, mem *arena.Arena) {
|
||||||
F.a.imul(c)
|
F.a.imul(c, mem)
|
||||||
F.b.imul(c)
|
F.b.imul(c, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=this */
|
/* this*=this */
|
||||||
func (F *FP16) Sqr() {
|
func (F *FP16) Sqr(mem *arena.Arena) {
|
||||||
t1 := NewFP8copy(F.a)
|
t1 := NewFP8copy(F.a, mem)
|
||||||
t2 := NewFP8copy(F.b)
|
t2 := NewFP8copy(F.b, mem)
|
||||||
t3 := NewFP8copy(F.a)
|
t3 := NewFP8copy(F.a, mem)
|
||||||
|
|
||||||
t3.Mul(F.b)
|
t3.Mul(F.b, mem)
|
||||||
t1.Add(F.b)
|
t1.Add(F.b, mem)
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
|
|
||||||
t2.Add(F.a)
|
t2.Add(F.a, mem)
|
||||||
|
|
||||||
t1.norm()
|
t1.norm()
|
||||||
t2.norm()
|
t2.norm()
|
||||||
|
|
||||||
F.a.copy(t1)
|
F.a.copy(t1)
|
||||||
F.a.Mul(t2)
|
F.a.Mul(t2, mem)
|
||||||
|
|
||||||
t2.copy(t3)
|
t2.copy(t3)
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
t2.Add(t3)
|
t2.Add(t3, mem)
|
||||||
t2.norm()
|
t2.norm()
|
||||||
t2.Neg()
|
t2.Neg(mem)
|
||||||
F.a.Add(t2)
|
F.a.Add(t2, mem)
|
||||||
|
|
||||||
F.b.copy(t3)
|
F.b.copy(t3)
|
||||||
F.b.Add(t3)
|
F.b.Add(t3, mem)
|
||||||
|
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=y */
|
/* this*=y */
|
||||||
func (F *FP16) Mul(y *FP16) {
|
func (F *FP16) Mul(y *FP16, mem *arena.Arena) {
|
||||||
t1 := NewFP8copy(F.a)
|
t1 := NewFP8copy(F.a, mem)
|
||||||
t2 := NewFP8copy(F.b)
|
t2 := NewFP8copy(F.b, mem)
|
||||||
t3 := NewFP8()
|
t3 := NewFP8(mem)
|
||||||
t4 := NewFP8copy(F.b)
|
t4 := NewFP8copy(F.b, mem)
|
||||||
|
|
||||||
t1.Mul(y.a)
|
t1.Mul(y.a, mem)
|
||||||
t2.Mul(y.b)
|
t2.Mul(y.b, mem)
|
||||||
t3.copy(y.b)
|
t3.copy(y.b)
|
||||||
t3.Add(y.a)
|
t3.Add(y.a, mem)
|
||||||
t4.Add(F.a)
|
t4.Add(F.a, mem)
|
||||||
|
|
||||||
t3.norm()
|
t3.norm()
|
||||||
t4.norm()
|
t4.norm()
|
||||||
|
|
||||||
t4.Mul(t3)
|
t4.Mul(t3, mem)
|
||||||
|
|
||||||
t3.copy(t1)
|
t3.copy(t1)
|
||||||
t3.Neg()
|
t3.Neg(mem)
|
||||||
t4.Add(t3)
|
t4.Add(t3, mem)
|
||||||
t4.norm()
|
t4.norm()
|
||||||
|
|
||||||
t3.copy(t2)
|
t3.copy(t2)
|
||||||
t3.Neg()
|
t3.Neg(mem)
|
||||||
F.b.copy(t4)
|
F.b.copy(t4)
|
||||||
F.b.Add(t3)
|
F.b.Add(t3, mem)
|
||||||
|
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
F.a.copy(t2)
|
F.a.copy(t2)
|
||||||
F.a.Add(t1)
|
F.a.Add(t1, mem)
|
||||||
|
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
@ -306,77 +345,77 @@ func (F *FP16) toString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* this=1/this */
|
/* this=1/this */
|
||||||
func (F *FP16) Invert() {
|
func (F *FP16) Invert(mem *arena.Arena) {
|
||||||
t1 := NewFP8copy(F.a)
|
t1 := NewFP8copy(F.a, mem)
|
||||||
t2 := NewFP8copy(F.b)
|
t2 := NewFP8copy(F.b, mem)
|
||||||
|
|
||||||
t1.Sqr()
|
t1.Sqr(mem)
|
||||||
t2.Sqr()
|
t2.Sqr(mem)
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
t2.norm()
|
t2.norm()
|
||||||
t1.Sub(t2)
|
t1.Sub(t2, mem)
|
||||||
t1.norm()
|
t1.norm()
|
||||||
|
|
||||||
t1.Invert(nil)
|
t1.Invert(nil, mem)
|
||||||
|
|
||||||
F.a.Mul(t1)
|
F.a.Mul(t1, mem)
|
||||||
t1.Neg()
|
t1.Neg(mem)
|
||||||
t1.norm()
|
t1.norm()
|
||||||
F.b.Mul(t1)
|
F.b.Mul(t1, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=i where i = sqrt(sqrt(-1+sqrt(-1))) */
|
/* this*=i where i = sqrt(sqrt(-1+sqrt(-1))) */
|
||||||
func (F *FP16) times_i() {
|
func (F *FP16) times_i(mem *arena.Arena) {
|
||||||
s := NewFP8copy(F.b)
|
s := NewFP8copy(F.b, mem)
|
||||||
t := NewFP8copy(F.a)
|
t := NewFP8copy(F.a, mem)
|
||||||
s.times_i()
|
s.times_i(mem)
|
||||||
F.a.copy(s)
|
F.a.copy(s)
|
||||||
F.b.copy(t)
|
F.b.copy(t)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP16) times_i2() {
|
func (F *FP16) times_i2(mem *arena.Arena) {
|
||||||
F.a.times_i()
|
F.a.times_i(mem)
|
||||||
F.b.times_i()
|
F.b.times_i(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP16) times_i4() {
|
func (F *FP16) times_i4(mem *arena.Arena) {
|
||||||
F.a.times_i2()
|
F.a.times_i2(mem)
|
||||||
F.b.times_i2()
|
F.b.times_i2(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=this^p using Frobenius */
|
/* this=this^p using Frobenius */
|
||||||
func (F *FP16) frob(f *FP2) {
|
func (F *FP16) frob(f *FP2, mem *arena.Arena) {
|
||||||
ff := NewFP2copy(f)
|
ff := NewFP2copy(f, mem)
|
||||||
ff.Sqr()
|
ff.Sqr(mem)
|
||||||
ff.norm()
|
ff.norm()
|
||||||
|
|
||||||
F.a.frob(ff)
|
F.a.frob(ff, mem)
|
||||||
F.b.frob(ff)
|
F.b.frob(ff, mem)
|
||||||
F.b.qmul(f)
|
F.b.qmul(f, mem)
|
||||||
F.b.times_i()
|
F.b.times_i(mem)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=this^e */
|
/* this=this^e */
|
||||||
func (F *FP16) pow(e *BIG) *FP16 {
|
func (F *FP16) pow(e *BIG, mem *arena.Arena) *FP16 {
|
||||||
w := NewFP16copy(F)
|
w := NewFP16copy(F, mem)
|
||||||
w.norm()
|
w.norm()
|
||||||
z := NewBIGcopy(e)
|
z := NewBIGcopy(e, mem)
|
||||||
r := NewFP16int(1)
|
r := NewFP16int(1, mem)
|
||||||
z.norm()
|
z.norm()
|
||||||
for true {
|
for true {
|
||||||
bt := z.parity()
|
bt := z.parity()
|
||||||
z.fshr(1)
|
z.fshr(1)
|
||||||
if bt == 1 {
|
if bt == 1 {
|
||||||
r.Mul(w)
|
r.Mul(w, mem)
|
||||||
}
|
}
|
||||||
if z.IsZero() {
|
if z.IsZero() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
w.Sqr()
|
w.Sqr(mem)
|
||||||
}
|
}
|
||||||
r.reduce()
|
r.reduce(mem)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,11 @@
|
|||||||
|
|
||||||
package bls48581
|
package bls48581
|
||||||
|
|
||||||
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
import (
|
||||||
|
"arena"
|
||||||
|
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
||||||
|
)
|
||||||
|
|
||||||
//import "fmt"
|
//import "fmt"
|
||||||
|
|
||||||
@ -32,72 +36,128 @@ type FP2 struct {
|
|||||||
b *FP
|
b *FP
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2() *FP2 {
|
func NewFP2(mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFP()
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFP()
|
F.a = NewFP(mem)
|
||||||
return F
|
F.b = NewFP(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFP(nil)
|
||||||
|
F.b = NewFP(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Constructors */
|
/* Constructors */
|
||||||
func NewFP2int(a int) *FP2 {
|
func NewFP2int(a int, mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFPint(a)
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFP()
|
F.a = NewFPint(a, mem)
|
||||||
return F
|
F.b = NewFP(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFPint(a, nil)
|
||||||
|
F.b = NewFP(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2ints(a int, b int) *FP2 {
|
func NewFP2ints(a int, b int, mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFPint(a)
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFPint(b)
|
F.a = NewFPint(a, mem)
|
||||||
return F
|
F.b = NewFPint(b, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFPint(a, nil)
|
||||||
|
F.b = NewFPint(b, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2copy(x *FP2) *FP2 {
|
func NewFP2copy(x *FP2, mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFPcopy(x.a)
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFPcopy(x.b)
|
F.a = NewFPcopy(x.a, mem)
|
||||||
return F
|
F.b = NewFPcopy(x.b, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFPcopy(x.a, nil)
|
||||||
|
F.b = NewFPcopy(x.b, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2fps(c *FP, d *FP) *FP2 {
|
func NewFP2fps(c *FP, d *FP, mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFPcopy(c)
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFPcopy(d)
|
F.a = NewFPcopy(c, mem)
|
||||||
return F
|
F.b = NewFPcopy(d, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFPcopy(c, nil)
|
||||||
|
F.b = NewFPcopy(d, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2bigs(c *BIG, d *BIG) *FP2 {
|
func NewFP2bigs(c *BIG, d *BIG, mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFPbig(c)
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFPbig(d)
|
F.a = NewFPbig(c, mem)
|
||||||
return F
|
F.b = NewFPbig(d, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFPbig(c, nil)
|
||||||
|
F.b = NewFPbig(d, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2fp(c *FP) *FP2 {
|
func NewFP2fp(c *FP, mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFPcopy(c)
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFP()
|
F.a = NewFPcopy(c, mem)
|
||||||
return F
|
F.b = NewFP(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFPcopy(c, nil)
|
||||||
|
F.b = NewFP(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2big(c *BIG) *FP2 {
|
func NewFP2big(c *BIG, mem *arena.Arena) *FP2 {
|
||||||
F := new(FP2)
|
if mem != nil {
|
||||||
F.a = NewFPbig(c)
|
F := arena.New[FP2](mem)
|
||||||
F.b = NewFP()
|
F.a = NewFPbig(c, mem)
|
||||||
return F
|
F.b = NewFP(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP2)
|
||||||
|
F.a = NewFPbig(c, nil)
|
||||||
|
F.b = NewFP(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP2rand(rng *ext.RAND) *FP2 {
|
func NewFP2rand(rng *ext.RAND) *FP2 {
|
||||||
F := NewFP2fps(NewFPrand(rng), NewFPrand(rng))
|
F := NewFP2fps(NewFPrand(rng), NewFPrand(rng), nil)
|
||||||
return F
|
return F
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reduce components mod Modulus */
|
/* reduce components mod Modulus */
|
||||||
func (F *FP2) reduce() {
|
func (F *FP2) reduce(mem *arena.Arena) {
|
||||||
F.a.reduce()
|
F.a.reduce(mem)
|
||||||
F.b.reduce()
|
F.b.reduce(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* normalise components of w */
|
/* normalise components of w */
|
||||||
@ -107,12 +167,12 @@ func (F *FP2) norm() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* test this=0 ? */
|
/* test this=0 ? */
|
||||||
func (F *FP2) IsZero() bool {
|
func (F *FP2) IsZero(mem *arena.Arena) bool {
|
||||||
return (F.a.IsZero() && F.b.IsZero())
|
return (F.a.IsZero(mem) && F.b.IsZero(mem))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP2) islarger() int {
|
func (F *FP2) islarger() int {
|
||||||
if F.IsZero() {
|
if F.IsZero(nil) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
cmp := F.b.islarger()
|
cmp := F.b.islarger()
|
||||||
@ -146,7 +206,7 @@ func FP2_fromBytes(bf []byte) *FP2 {
|
|||||||
t[i] = bf[i+MB]
|
t[i] = bf[i+MB]
|
||||||
}
|
}
|
||||||
ta := FP_fromBytes(t[:])
|
ta := FP_fromBytes(t[:])
|
||||||
return NewFP2fps(ta, tb)
|
return NewFP2fps(ta, tb, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP2) cmove(g *FP2, d int) {
|
func (F *FP2) cmove(g *FP2, d int) {
|
||||||
@ -156,8 +216,10 @@ func (F *FP2) cmove(g *FP2, d int) {
|
|||||||
|
|
||||||
/* test this=1 ? */
|
/* test this=1 ? */
|
||||||
func (F *FP2) isunity() bool {
|
func (F *FP2) isunity() bool {
|
||||||
one := NewFPint(1)
|
mem := arena.NewArena()
|
||||||
return (F.a.Equals(one) && F.b.IsZero())
|
defer mem.Free()
|
||||||
|
one := NewFPint(1, mem)
|
||||||
|
return (F.a.Equals(one) && F.b.IsZero(mem))
|
||||||
}
|
}
|
||||||
|
|
||||||
/* test this=x */
|
/* test this=x */
|
||||||
@ -166,13 +228,13 @@ func (F *FP2) Equals(x *FP2) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* extract a */
|
/* extract a */
|
||||||
func (F *FP2) GetA() *BIG {
|
func (F *FP2) GetA(mem *arena.Arena) *BIG {
|
||||||
return F.a.Redc()
|
return F.a.Redc(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* extract b */
|
/* extract b */
|
||||||
func (F *FP2) GetB() *BIG {
|
func (F *FP2) GetB(mem *arena.Arena) *BIG {
|
||||||
return F.b.Redc()
|
return F.b.Redc(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy this=x */
|
/* copy this=x */
|
||||||
@ -194,12 +256,12 @@ func (F *FP2) one() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Return sign */
|
/* Return sign */
|
||||||
func (F *FP2) sign() int {
|
func (F *FP2) sign(mem *arena.Arena) int {
|
||||||
p1 := F.a.sign()
|
p1 := F.a.sign(mem)
|
||||||
p2 := F.b.sign()
|
p2 := F.b.sign(mem)
|
||||||
var u int
|
var u int
|
||||||
if BIG_ENDIAN_SIGN {
|
if BIG_ENDIAN_SIGN {
|
||||||
if F.b.IsZero() {
|
if F.b.IsZero(mem) {
|
||||||
u = 1
|
u = 1
|
||||||
} else {
|
} else {
|
||||||
u = 0
|
u = 0
|
||||||
@ -207,7 +269,7 @@ func (F *FP2) sign() int {
|
|||||||
p2 ^= (p1 ^ p2) & u
|
p2 ^= (p1 ^ p2) & u
|
||||||
return p2
|
return p2
|
||||||
} else {
|
} else {
|
||||||
if F.a.IsZero() {
|
if F.a.IsZero(mem) {
|
||||||
u = 1
|
u = 1
|
||||||
} else {
|
} else {
|
||||||
u = 0
|
u = 0
|
||||||
@ -218,106 +280,106 @@ func (F *FP2) sign() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* negate this mod Modulus */
|
/* negate this mod Modulus */
|
||||||
func (F *FP2) Neg() {
|
func (F *FP2) Neg(mem *arena.Arena) {
|
||||||
m := NewFPcopy(F.a)
|
m := NewFPcopy(F.a, mem)
|
||||||
t := NewFP()
|
t := NewFP(mem)
|
||||||
|
|
||||||
m.Add(F.b)
|
m.Add(F.b, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
t.copy(m)
|
t.copy(m)
|
||||||
t.Add(F.b)
|
t.Add(F.b, mem)
|
||||||
F.b.copy(m)
|
F.b.copy(m)
|
||||||
F.b.Add(F.a)
|
F.b.Add(F.a, mem)
|
||||||
F.a.copy(t)
|
F.a.copy(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set to a-ib */
|
/* set to a-ib */
|
||||||
func (F *FP2) conj() {
|
func (F *FP2) conj(mem *arena.Arena) {
|
||||||
F.b.Neg()
|
F.b.Neg(mem)
|
||||||
F.b.norm()
|
F.b.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this+=a */
|
/* this+=a */
|
||||||
func (F *FP2) Add(x *FP2) {
|
func (F *FP2) Add(x *FP2, mem *arena.Arena) {
|
||||||
F.a.Add(x.a)
|
F.a.Add(x.a, mem)
|
||||||
F.b.Add(x.b)
|
F.b.Add(x.b, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=a */
|
/* this-=a */
|
||||||
func (F *FP2) Sub(x *FP2) {
|
func (F *FP2) Sub(x *FP2, mem *arena.Arena) {
|
||||||
m := NewFP2copy(x)
|
m := NewFP2copy(x, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
F.Add(m)
|
F.Add(m, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=a */
|
/* this-=a */
|
||||||
func (F *FP2) rsub(x *FP2) {
|
func (F *FP2) rsub(x *FP2, mem *arena.Arena) {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.Add(x)
|
F.Add(x, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s, where s is an FP */
|
/* this*=s, where s is an FP */
|
||||||
func (F *FP2) pmul(s *FP) {
|
func (F *FP2) pmul(s *FP, mem *arena.Arena) {
|
||||||
F.a.Mul(s)
|
F.a.Mul(s, mem)
|
||||||
F.b.Mul(s)
|
F.b.Mul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=i, where i is an int */
|
/* this*=i, where i is an int */
|
||||||
func (F *FP2) imul(c int) {
|
func (F *FP2) imul(c int, mem *arena.Arena) {
|
||||||
F.a.imul(c)
|
F.a.imul(c, mem)
|
||||||
F.b.imul(c)
|
F.b.imul(c, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=this */
|
/* this*=this */
|
||||||
func (F *FP2) Sqr() {
|
func (F *FP2) Sqr(mem *arena.Arena) {
|
||||||
w1 := NewFPcopy(F.a)
|
w1 := NewFPcopy(F.a, mem)
|
||||||
w3 := NewFPcopy(F.a)
|
w3 := NewFPcopy(F.a, mem)
|
||||||
mb := NewFPcopy(F.b)
|
mb := NewFPcopy(F.b, mem)
|
||||||
w1.Add(F.b)
|
w1.Add(F.b, mem)
|
||||||
|
|
||||||
w3.Add(F.a)
|
w3.Add(F.a, mem)
|
||||||
w3.norm()
|
w3.norm()
|
||||||
F.b.Mul(w3)
|
F.b.Mul(w3, mem)
|
||||||
|
|
||||||
mb.Neg()
|
mb.Neg(mem)
|
||||||
F.a.Add(mb)
|
F.a.Add(mb, mem)
|
||||||
|
|
||||||
w1.norm()
|
w1.norm()
|
||||||
F.a.norm()
|
F.a.norm()
|
||||||
|
|
||||||
F.a.Mul(w1)
|
F.a.Mul(w1, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=y */
|
/* this*=y */
|
||||||
/* Now using Lazy reduction */
|
/* Now using Lazy reduction */
|
||||||
func (F *FP2) Mul(y *FP2) {
|
func (F *FP2) Mul(y *FP2, mem *arena.Arena) {
|
||||||
|
|
||||||
if int64(F.a.XES+F.b.XES)*int64(y.a.XES+y.b.XES) > int64(FEXCESS) {
|
if int64(F.a.XES+F.b.XES)*int64(y.a.XES+y.b.XES) > int64(FEXCESS) {
|
||||||
if F.a.XES > 1 {
|
if F.a.XES > 1 {
|
||||||
F.a.reduce()
|
F.a.reduce(mem)
|
||||||
}
|
}
|
||||||
if F.b.XES > 1 {
|
if F.b.XES > 1 {
|
||||||
F.b.reduce()
|
F.b.reduce(mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pR := NewDBIG()
|
pR := NewDBIG(mem)
|
||||||
C := NewBIGcopy(F.a.x)
|
C := NewBIGcopy(F.a.x, mem)
|
||||||
D := NewBIGcopy(y.a.x)
|
D := NewBIGcopy(y.a.x, mem)
|
||||||
p := NewBIGints(Modulus)
|
p := NewBIGints(Modulus, mem)
|
||||||
|
|
||||||
pR.ucopy(p)
|
pR.ucopy(p)
|
||||||
|
|
||||||
A := mul(F.a.x, y.a.x)
|
A := mul(F.a.x, y.a.x, mem)
|
||||||
B := mul(F.b.x, y.b.x)
|
B := mul(F.b.x, y.b.x, mem)
|
||||||
|
|
||||||
C.Add(F.b.x)
|
C.Add(F.b.x)
|
||||||
C.norm()
|
C.norm()
|
||||||
D.Add(y.b.x)
|
D.Add(y.b.x)
|
||||||
D.norm()
|
D.norm()
|
||||||
|
|
||||||
E := mul(C, D)
|
E := mul(C, D, mem)
|
||||||
FF := NewDBIGcopy(A)
|
FF := NewDBIGcopy(A, mem)
|
||||||
FF.Add(B)
|
FF.Add(B)
|
||||||
B.rsub(pR)
|
B.rsub(pR)
|
||||||
|
|
||||||
@ -326,82 +388,84 @@ func (F *FP2) Mul(y *FP2) {
|
|||||||
E.Sub(FF)
|
E.Sub(FF)
|
||||||
E.norm()
|
E.norm()
|
||||||
|
|
||||||
F.a.x.copy(mod(A))
|
F.a.x.copy(mod(A, mem))
|
||||||
F.a.XES = 3
|
F.a.XES = 3
|
||||||
F.b.x.copy(mod(E))
|
F.b.x.copy(mod(E, mem))
|
||||||
F.b.XES = 2
|
F.b.XES = 2
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
func (F *FP2) pow(b *BIG) {
|
func (F *FP2) pow(b *BIG) {
|
||||||
w := NewFP2copy(F);
|
w := NewFP2copy(F);
|
||||||
r := NewFP2int(1)
|
r := NewFP2int(1)
|
||||||
z := NewBIGcopy(b)
|
z := NewBIGcopy(b)
|
||||||
for true {
|
for true {
|
||||||
bt := z.parity()
|
bt := z.parity()
|
||||||
z.shr(1)
|
z.shr(1)
|
||||||
if bt==1 {
|
if bt==1 {
|
||||||
r.Mul(w)
|
r.Mul(w)
|
||||||
|
}
|
||||||
|
if z.IsZero() {break}
|
||||||
|
w.Sqr()
|
||||||
}
|
}
|
||||||
if z.IsZero() {break}
|
r.reduce()
|
||||||
w.Sqr()
|
F.copy(r)
|
||||||
}
|
}
|
||||||
r.reduce()
|
|
||||||
F.copy(r)
|
|
||||||
}
|
|
||||||
*/
|
*/
|
||||||
func (F *FP2) qr(h *FP) int {
|
func (F *FP2) qr(h *FP) int {
|
||||||
c := NewFP2copy(F)
|
mem := arena.NewArena()
|
||||||
c.conj()
|
defer mem.Free()
|
||||||
c.Mul(F)
|
c := NewFP2copy(F, mem)
|
||||||
|
c.conj(mem)
|
||||||
|
c.Mul(F, mem)
|
||||||
return c.a.qr(h)
|
return c.a.qr(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2)) */
|
/* sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2)) */
|
||||||
func (F *FP2) Sqrt(h *FP) {
|
func (F *FP2) Sqrt(h *FP, mem *arena.Arena) {
|
||||||
if F.IsZero() {
|
if F.IsZero(mem) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w1 := NewFPcopy(F.b)
|
w1 := NewFPcopy(F.b, mem)
|
||||||
w2 := NewFPcopy(F.a)
|
w2 := NewFPcopy(F.a, mem)
|
||||||
w3 := NewFP()
|
w3 := NewFP(mem)
|
||||||
w4 := NewFP()
|
w4 := NewFP(mem)
|
||||||
hint := NewFP()
|
hint := NewFP(mem)
|
||||||
w1.Sqr()
|
w1.Sqr(mem)
|
||||||
w2.Sqr()
|
w2.Sqr(mem)
|
||||||
w1.Add(w2)
|
w1.Add(w2, mem)
|
||||||
w1.norm()
|
w1.norm()
|
||||||
|
|
||||||
w1 = w1.Sqrt(h)
|
w1 = w1.Sqrt(h, mem)
|
||||||
w2.copy(F.a)
|
w2.copy(F.a)
|
||||||
w3.copy(F.a)
|
w3.copy(F.a)
|
||||||
|
|
||||||
w2.Add(w1)
|
w2.Add(w1, mem)
|
||||||
w2.norm()
|
w2.norm()
|
||||||
w2.div2()
|
w2.div2(mem)
|
||||||
|
|
||||||
w1.copy(F.b)
|
w1.copy(F.b)
|
||||||
w1.div2()
|
w1.div2(mem)
|
||||||
qr := w2.qr(hint)
|
qr := w2.qr(hint)
|
||||||
|
|
||||||
// tweak hint
|
// tweak hint
|
||||||
w3.copy(hint)
|
w3.copy(hint)
|
||||||
w3.Neg()
|
w3.Neg(mem)
|
||||||
w3.norm()
|
w3.norm()
|
||||||
w4.copy(w2)
|
w4.copy(w2)
|
||||||
w4.Neg()
|
w4.Neg(mem)
|
||||||
w4.norm()
|
w4.norm()
|
||||||
|
|
||||||
w2.cmove(w4, 1-qr)
|
w2.cmove(w4, 1-qr)
|
||||||
hint.cmove(w3, 1-qr)
|
hint.cmove(w3, 1-qr)
|
||||||
|
|
||||||
F.a.copy(w2.Sqrt(hint))
|
F.a.copy(w2.Sqrt(hint, mem))
|
||||||
w3.copy(w2)
|
w3.copy(w2)
|
||||||
w3.Invert(hint)
|
w3.Invert(hint, mem)
|
||||||
w3.Mul(F.a)
|
w3.Mul(F.a, mem)
|
||||||
F.b.copy(w3)
|
F.b.copy(w3)
|
||||||
F.b.Mul(w1)
|
F.b.Mul(w1, mem)
|
||||||
w4.copy(F.a)
|
w4.copy(F.a)
|
||||||
|
|
||||||
F.a.cmove(F.b, 1-qr)
|
F.a.cmove(F.b, 1-qr)
|
||||||
@ -425,9 +489,9 @@ func (F *FP2) Sqrt(h *FP) {
|
|||||||
F.b.cmove(w4,1-qr)
|
F.b.cmove(w4,1-qr)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
sgn := F.sign()
|
sgn := F.sign(mem)
|
||||||
nr := NewFP2copy(F)
|
nr := NewFP2copy(F, mem)
|
||||||
nr.Neg()
|
nr.Neg(mem)
|
||||||
nr.norm()
|
nr.norm()
|
||||||
F.cmove(nr, sgn)
|
F.cmove(nr, sgn)
|
||||||
}
|
}
|
||||||
@ -443,63 +507,63 @@ func (F *FP2) toString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* this=1/this */
|
/* this=1/this */
|
||||||
func (F *FP2) Invert(h *FP) {
|
func (F *FP2) Invert(h *FP, mem *arena.Arena) {
|
||||||
F.norm()
|
F.norm()
|
||||||
w1 := NewFPcopy(F.a)
|
w1 := NewFPcopy(F.a, mem)
|
||||||
w2 := NewFPcopy(F.b)
|
w2 := NewFPcopy(F.b, mem)
|
||||||
|
|
||||||
w1.Sqr()
|
w1.Sqr(mem)
|
||||||
w2.Sqr()
|
w2.Sqr(mem)
|
||||||
w1.Add(w2)
|
w1.Add(w2, mem)
|
||||||
w1.Invert(h)
|
w1.Invert(h, mem)
|
||||||
F.a.Mul(w1)
|
F.a.Mul(w1, mem)
|
||||||
w1.Neg()
|
w1.Neg(mem)
|
||||||
w1.norm()
|
w1.norm()
|
||||||
F.b.Mul(w1)
|
F.b.Mul(w1, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this/=2 */
|
/* this/=2 */
|
||||||
func (F *FP2) div2() {
|
func (F *FP2) div2(mem *arena.Arena) {
|
||||||
F.a.div2()
|
F.a.div2(mem)
|
||||||
F.b.div2()
|
F.b.div2(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=sqrt(-1) */
|
/* this*=sqrt(-1) */
|
||||||
func (F *FP2) times_i() {
|
func (F *FP2) times_i(mem *arena.Arena) {
|
||||||
z := NewFPcopy(F.a)
|
z := NewFPcopy(F.a, mem)
|
||||||
F.a.copy(F.b)
|
F.a.copy(F.b)
|
||||||
F.a.Neg()
|
F.a.Neg(mem)
|
||||||
F.b.copy(z)
|
F.b.copy(z)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* w*=(1+sqrt(-1)) */
|
/* w*=(1+sqrt(-1)) */
|
||||||
/* where X*2-(2^i+sqrt(-1)) is irreducible for FP4 */
|
/* where X*2-(2^i+sqrt(-1)) is irreducible for FP4 */
|
||||||
func (F *FP2) Mul_ip() {
|
func (F *FP2) Mul_ip(mem *arena.Arena) {
|
||||||
t := NewFP2copy(F)
|
t := NewFP2copy(F, mem)
|
||||||
i := QNRI
|
i := QNRI
|
||||||
F.times_i()
|
F.times_i(mem)
|
||||||
for i > 0 {
|
for i > 0 {
|
||||||
t.Add(t)
|
t.Add(t, mem)
|
||||||
t.norm()
|
t.norm()
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
F.Add(t)
|
F.Add(t, mem)
|
||||||
|
|
||||||
if TOWER == POSITOWER {
|
if TOWER == POSITOWER {
|
||||||
F.norm()
|
F.norm()
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* w/=(2^i+sqrt(-1)) */
|
/* w/=(2^i+sqrt(-1)) */
|
||||||
func (F *FP2) div_ip() {
|
func (F *FP2) div_ip(mem *arena.Arena) {
|
||||||
z := NewFP2ints(1<<uint(QNRI), 1)
|
z := NewFP2ints(1<<uint(QNRI), 1, nil)
|
||||||
z.Invert(nil)
|
z.Invert(nil, mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
F.Mul(z)
|
F.Mul(z, mem)
|
||||||
if TOWER == POSITOWER {
|
if TOWER == POSITOWER {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,11 @@
|
|||||||
|
|
||||||
package bls48581
|
package bls48581
|
||||||
|
|
||||||
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
import (
|
||||||
|
"arena"
|
||||||
|
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
||||||
|
)
|
||||||
|
|
||||||
//import "fmt"
|
//import "fmt"
|
||||||
|
|
||||||
@ -32,66 +36,115 @@ type FP4 struct {
|
|||||||
b *FP2
|
b *FP2
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP4() *FP4 {
|
func NewFP4(mem *arena.Arena) *FP4 {
|
||||||
F := new(FP4)
|
if mem != nil {
|
||||||
F.a = NewFP2()
|
F := arena.New[FP4](mem)
|
||||||
F.b = NewFP2()
|
F.a = NewFP2(mem)
|
||||||
return F
|
F.b = NewFP2(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP4)
|
||||||
|
F.a = NewFP2(nil)
|
||||||
|
F.b = NewFP2(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Constructors */
|
/* Constructors */
|
||||||
func NewFP4int(a int) *FP4 {
|
func NewFP4int(a int, mem *arena.Arena) *FP4 {
|
||||||
F := new(FP4)
|
if mem != nil {
|
||||||
F.a = NewFP2int(a)
|
F := arena.New[FP4](mem)
|
||||||
F.b = NewFP2()
|
F.a = NewFP2int(a, mem)
|
||||||
return F
|
F.b = NewFP2(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP4)
|
||||||
|
F.a = NewFP2int(a, nil)
|
||||||
|
F.b = NewFP2(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Constructors */
|
/* Constructors */
|
||||||
func NewFP4ints(a int, b int) *FP4 {
|
func NewFP4ints(a int, b int, mem *arena.Arena) *FP4 {
|
||||||
F := new(FP4)
|
if mem != nil {
|
||||||
F.a = NewFP2int(a)
|
F := arena.New[FP4](mem)
|
||||||
F.b = NewFP2int(b)
|
F.a = NewFP2int(a, mem)
|
||||||
return F
|
F.b = NewFP2int(b, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP4)
|
||||||
|
F.a = NewFP2int(a, nil)
|
||||||
|
F.b = NewFP2int(b, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP4copy(x *FP4) *FP4 {
|
func NewFP4copy(x *FP4, mem *arena.Arena) *FP4 {
|
||||||
F := new(FP4)
|
if mem != nil {
|
||||||
F.a = NewFP2copy(x.a)
|
F := arena.New[FP4](mem)
|
||||||
F.b = NewFP2copy(x.b)
|
F.a = NewFP2copy(x.a, mem)
|
||||||
return F
|
F.b = NewFP2copy(x.b, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP4)
|
||||||
|
F.a = NewFP2copy(x.a, nil)
|
||||||
|
F.b = NewFP2copy(x.b, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP4fp2s(c *FP2, d *FP2) *FP4 {
|
func NewFP4fp2s(c *FP2, d *FP2, mem *arena.Arena) *FP4 {
|
||||||
F := new(FP4)
|
if mem != nil {
|
||||||
F.a = NewFP2copy(c)
|
F := arena.New[FP4](mem)
|
||||||
F.b = NewFP2copy(d)
|
F.a = NewFP2copy(c, mem)
|
||||||
return F
|
F.b = NewFP2copy(d, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP4)
|
||||||
|
F.a = NewFP2copy(c, nil)
|
||||||
|
F.b = NewFP2copy(d, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP4fp2(c *FP2) *FP4 {
|
func NewFP4fp2(c *FP2, mem *arena.Arena) *FP4 {
|
||||||
F := new(FP4)
|
if mem != nil {
|
||||||
F.a = NewFP2copy(c)
|
F := arena.New[FP4](mem)
|
||||||
F.b = NewFP2()
|
F.a = NewFP2copy(c, mem)
|
||||||
return F
|
F.b = NewFP2(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP4)
|
||||||
|
F.a = NewFP2copy(c, nil)
|
||||||
|
F.b = NewFP2(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP4fp(c *FP) *FP4 {
|
func NewFP4fp(c *FP, mem *arena.Arena) *FP4 {
|
||||||
F := new(FP4)
|
if mem != nil {
|
||||||
F.a = NewFP2fp(c)
|
F := arena.New[FP4](mem)
|
||||||
F.b = NewFP2()
|
F.a = NewFP2fp(c, mem)
|
||||||
return F
|
F.b = NewFP2(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP4)
|
||||||
|
F.a = NewFP2fp(c, nil)
|
||||||
|
F.b = NewFP2(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP4rand(rng *ext.RAND) *FP4 {
|
func NewFP4rand(rng *ext.RAND) *FP4 {
|
||||||
F := NewFP4fp2s(NewFP2rand(rng), NewFP2rand(rng))
|
F := NewFP4fp2s(NewFP2rand(rng), NewFP2rand(rng), nil)
|
||||||
return F
|
return F
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reduce all components of this mod Modulus */
|
/* reduce all components of this mod Modulus */
|
||||||
func (F *FP4) reduce() {
|
func (F *FP4) reduce(mem *arena.Arena) {
|
||||||
F.a.reduce()
|
F.a.reduce(mem)
|
||||||
F.b.reduce()
|
F.b.reduce(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* normalise all components of this mod Modulus */
|
/* normalise all components of this mod Modulus */
|
||||||
@ -101,12 +154,12 @@ func (F *FP4) norm() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* test this==0 ? */
|
/* test this==0 ? */
|
||||||
func (F *FP4) IsZero() bool {
|
func (F *FP4) IsZero(mem *arena.Arena) bool {
|
||||||
return F.a.IsZero() && F.b.IsZero()
|
return F.a.IsZero(mem) && F.b.IsZero(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP4) islarger() int {
|
func (F *FP4) islarger() int {
|
||||||
if F.IsZero() {
|
if F.IsZero(nil) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
cmp := F.b.islarger()
|
cmp := F.b.islarger()
|
||||||
@ -140,7 +193,7 @@ func FP4_fromBytes(bf []byte) *FP4 {
|
|||||||
t[i] = bf[i+MB]
|
t[i] = bf[i+MB]
|
||||||
}
|
}
|
||||||
ta := FP2_fromBytes(t[:])
|
ta := FP2_fromBytes(t[:])
|
||||||
return NewFP4fp2s(ta, tb)
|
return NewFP4fp2s(ta, tb, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Conditional move */
|
/* Conditional move */
|
||||||
@ -151,13 +204,17 @@ func (F *FP4) cmove(g *FP4, d int) {
|
|||||||
|
|
||||||
/* test this==1 ? */
|
/* test this==1 ? */
|
||||||
func (F *FP4) isunity() bool {
|
func (F *FP4) isunity() bool {
|
||||||
one := NewFP2int(1)
|
mem := arena.NewArena()
|
||||||
return F.a.Equals(one) && F.b.IsZero()
|
defer mem.Free()
|
||||||
|
one := NewFP2int(1, mem)
|
||||||
|
return F.a.Equals(one) && F.b.IsZero(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* test is w real? That is in a+ib test b is zero */
|
/* test is w real? That is in a+ib test b is zero */
|
||||||
func (F *FP4) isreal() bool {
|
func (F *FP4) isreal() bool {
|
||||||
return F.b.IsZero()
|
mem := arena.NewArena()
|
||||||
|
defer mem.Free()
|
||||||
|
return F.b.IsZero(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* extract real part a */
|
/* extract real part a */
|
||||||
@ -198,12 +255,12 @@ func (F *FP4) one() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Return sign */
|
/* Return sign */
|
||||||
func (F *FP4) sign() int {
|
func (F *FP4) sign(mem *arena.Arena) int {
|
||||||
p1 := F.a.sign()
|
p1 := F.a.sign(mem)
|
||||||
p2 := F.b.sign()
|
p2 := F.b.sign(mem)
|
||||||
var u int
|
var u int
|
||||||
if BIG_ENDIAN_SIGN {
|
if BIG_ENDIAN_SIGN {
|
||||||
if F.b.IsZero() {
|
if F.b.IsZero(mem) {
|
||||||
u = 1
|
u = 1
|
||||||
} else {
|
} else {
|
||||||
u = 0
|
u = 0
|
||||||
@ -211,7 +268,7 @@ func (F *FP4) sign() int {
|
|||||||
p2 ^= (p1 ^ p2) & u
|
p2 ^= (p1 ^ p2) & u
|
||||||
return p2
|
return p2
|
||||||
} else {
|
} else {
|
||||||
if F.a.IsZero() {
|
if F.a.IsZero(mem) {
|
||||||
u = 1
|
u = 1
|
||||||
} else {
|
} else {
|
||||||
u = 0
|
u = 0
|
||||||
@ -222,132 +279,132 @@ func (F *FP4) sign() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* set this=-this */
|
/* set this=-this */
|
||||||
func (F *FP4) Neg() {
|
func (F *FP4) Neg(mem *arena.Arena) {
|
||||||
F.norm()
|
F.norm()
|
||||||
m := NewFP2copy(F.a)
|
m := NewFP2copy(F.a, mem)
|
||||||
t := NewFP2()
|
t := NewFP2(mem)
|
||||||
m.Add(F.b)
|
m.Add(F.b, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
t.copy(m)
|
t.copy(m)
|
||||||
t.Add(F.b)
|
t.Add(F.b, mem)
|
||||||
F.b.copy(m)
|
F.b.copy(m)
|
||||||
F.b.Add(F.a)
|
F.b.Add(F.a, mem)
|
||||||
F.a.copy(t)
|
F.a.copy(t)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=conjugate(this) */
|
/* this=conjugate(this) */
|
||||||
func (F *FP4) conj() {
|
func (F *FP4) conj(mem *arena.Arena) {
|
||||||
F.b.Neg()
|
F.b.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=-conjugate(this) */
|
/* this=-conjugate(this) */
|
||||||
func (F *FP4) nconj() {
|
func (F *FP4) nconj(mem *arena.Arena) {
|
||||||
F.a.Neg()
|
F.a.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this+=x */
|
/* this+=x */
|
||||||
func (F *FP4) Add(x *FP4) {
|
func (F *FP4) Add(x *FP4, mem *arena.Arena) {
|
||||||
F.a.Add(x.a)
|
F.a.Add(x.a, mem)
|
||||||
F.b.Add(x.b)
|
F.b.Add(x.b, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=x */
|
/* this-=x */
|
||||||
func (F *FP4) Sub(x *FP4) {
|
func (F *FP4) Sub(x *FP4, mem *arena.Arena) {
|
||||||
m := NewFP4copy(x)
|
m := NewFP4copy(x, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
F.Add(m)
|
F.Add(m, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=x */
|
/* this-=x */
|
||||||
func (F *FP4) rsub(x *FP4) {
|
func (F *FP4) rsub(x *FP4, mem *arena.Arena) {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.Add(x)
|
F.Add(x, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP2 */
|
/* this*=s where s is FP2 */
|
||||||
func (F *FP4) pmul(s *FP2) {
|
func (F *FP4) pmul(s *FP2, mem *arena.Arena) {
|
||||||
F.a.Mul(s)
|
F.a.Mul(s, mem)
|
||||||
F.b.Mul(s)
|
F.b.Mul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP2 */
|
/* this*=s where s is FP2 */
|
||||||
func (F *FP4) qmul(s *FP) {
|
func (F *FP4) qmul(s *FP, mem *arena.Arena) {
|
||||||
F.a.pmul(s)
|
F.a.pmul(s, mem)
|
||||||
F.b.pmul(s)
|
F.b.pmul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=c where c is int */
|
/* this*=c where c is int */
|
||||||
func (F *FP4) imul(c int) {
|
func (F *FP4) imul(c int, mem *arena.Arena) {
|
||||||
F.a.imul(c)
|
F.a.imul(c, mem)
|
||||||
F.b.imul(c)
|
F.b.imul(c, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=this */
|
/* this*=this */
|
||||||
func (F *FP4) Sqr() {
|
func (F *FP4) Sqr(mem *arena.Arena) {
|
||||||
t1 := NewFP2copy(F.a)
|
t1 := NewFP2copy(F.a, mem)
|
||||||
t2 := NewFP2copy(F.b)
|
t2 := NewFP2copy(F.b, mem)
|
||||||
t3 := NewFP2copy(F.a)
|
t3 := NewFP2copy(F.a, mem)
|
||||||
|
|
||||||
t3.Mul(F.b)
|
t3.Mul(F.b, mem)
|
||||||
t1.Add(F.b)
|
t1.Add(F.b, mem)
|
||||||
t2.Mul_ip()
|
t2.Mul_ip(mem)
|
||||||
|
|
||||||
t2.Add(F.a)
|
t2.Add(F.a, mem)
|
||||||
|
|
||||||
t1.norm()
|
t1.norm()
|
||||||
t2.norm()
|
t2.norm()
|
||||||
|
|
||||||
F.a.copy(t1)
|
F.a.copy(t1)
|
||||||
|
|
||||||
F.a.Mul(t2)
|
F.a.Mul(t2, mem)
|
||||||
|
|
||||||
t2.copy(t3)
|
t2.copy(t3)
|
||||||
t2.Mul_ip()
|
t2.Mul_ip(mem)
|
||||||
t2.Add(t3)
|
t2.Add(t3, mem)
|
||||||
t2.norm()
|
t2.norm()
|
||||||
t2.Neg()
|
t2.Neg(mem)
|
||||||
F.a.Add(t2)
|
F.a.Add(t2, mem)
|
||||||
|
|
||||||
F.b.copy(t3)
|
F.b.copy(t3)
|
||||||
F.b.Add(t3)
|
F.b.Add(t3, mem)
|
||||||
|
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=y */
|
/* this*=y */
|
||||||
func (F *FP4) Mul(y *FP4) {
|
func (F *FP4) Mul(y *FP4, mem *arena.Arena) {
|
||||||
t1 := NewFP2copy(F.a)
|
t1 := NewFP2copy(F.a, mem)
|
||||||
t2 := NewFP2copy(F.b)
|
t2 := NewFP2copy(F.b, mem)
|
||||||
t3 := NewFP2()
|
t3 := NewFP2(mem)
|
||||||
t4 := NewFP2copy(F.b)
|
t4 := NewFP2copy(F.b, mem)
|
||||||
|
|
||||||
t1.Mul(y.a)
|
t1.Mul(y.a, mem)
|
||||||
t2.Mul(y.b)
|
t2.Mul(y.b, mem)
|
||||||
t3.copy(y.b)
|
t3.copy(y.b)
|
||||||
t3.Add(y.a)
|
t3.Add(y.a, mem)
|
||||||
t4.Add(F.a)
|
t4.Add(F.a, mem)
|
||||||
|
|
||||||
t3.norm()
|
t3.norm()
|
||||||
t4.norm()
|
t4.norm()
|
||||||
|
|
||||||
t4.Mul(t3)
|
t4.Mul(t3, mem)
|
||||||
|
|
||||||
t3.copy(t1)
|
t3.copy(t1)
|
||||||
t3.Neg()
|
t3.Neg(mem)
|
||||||
t4.Add(t3)
|
t4.Add(t3, mem)
|
||||||
t4.norm()
|
t4.norm()
|
||||||
|
|
||||||
t3.copy(t2)
|
t3.copy(t2)
|
||||||
t3.Neg()
|
t3.Neg(mem)
|
||||||
F.b.copy(t4)
|
F.b.copy(t4)
|
||||||
F.b.Add(t3)
|
F.b.Add(t3, mem)
|
||||||
|
|
||||||
t2.Mul_ip()
|
t2.Mul_ip(mem)
|
||||||
F.a.copy(t2)
|
F.a.copy(t2)
|
||||||
F.a.Add(t1)
|
F.a.Add(t1, mem)
|
||||||
|
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
@ -358,41 +415,41 @@ func (F *FP4) toString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* this=1/this */
|
/* this=1/this */
|
||||||
func (F *FP4) Invert(h *FP) {
|
func (F *FP4) Invert(h *FP, mem *arena.Arena) {
|
||||||
t1 := NewFP2copy(F.a)
|
t1 := NewFP2copy(F.a, mem)
|
||||||
t2 := NewFP2copy(F.b)
|
t2 := NewFP2copy(F.b, mem)
|
||||||
|
|
||||||
t1.Sqr()
|
t1.Sqr(mem)
|
||||||
t2.Sqr()
|
t2.Sqr(mem)
|
||||||
t2.Mul_ip()
|
t2.Mul_ip(mem)
|
||||||
t2.norm()
|
t2.norm()
|
||||||
t1.Sub(t2)
|
t1.Sub(t2, mem)
|
||||||
|
|
||||||
t1.Invert(h)
|
t1.Invert(h, mem)
|
||||||
F.a.Mul(t1)
|
F.a.Mul(t1, mem)
|
||||||
t1.Neg()
|
t1.Neg(mem)
|
||||||
t1.norm()
|
t1.norm()
|
||||||
F.b.Mul(t1)
|
F.b.Mul(t1, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=i where i = sqrt(2^i+sqrt(-1)) */
|
/* this*=i where i = sqrt(2^i+sqrt(-1)) */
|
||||||
func (F *FP4) times_i() {
|
func (F *FP4) times_i(mem *arena.Arena) {
|
||||||
t := NewFP2copy(F.b)
|
t := NewFP2copy(F.b, mem)
|
||||||
F.b.copy(F.a)
|
F.b.copy(F.a)
|
||||||
t.Mul_ip()
|
t.Mul_ip(mem)
|
||||||
F.a.copy(t)
|
F.a.copy(t)
|
||||||
F.norm()
|
F.norm()
|
||||||
if TOWER == POSITOWER {
|
if TOWER == POSITOWER {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=this^p using Frobenius */
|
/* this=this^p using Frobenius */
|
||||||
func (F *FP4) frob(f *FP2) {
|
func (F *FP4) frob(f *FP2, mem *arena.Arena) {
|
||||||
F.a.conj()
|
F.a.conj(mem)
|
||||||
F.b.conj()
|
F.b.conj(mem)
|
||||||
F.b.Mul(f)
|
F.b.Mul(f, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=this^e
|
/* this=this^e
|
||||||
@ -418,48 +475,48 @@ func (F *FP4) pow(e *BIG) *FP4 {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
/* XTR xtr_a function */
|
/* XTR xtr_a function */
|
||||||
func (F *FP4) xtr_A(w *FP4, y *FP4, z *FP4) {
|
func (F *FP4) xtr_A(w *FP4, y *FP4, z *FP4, mem *arena.Arena) {
|
||||||
r := NewFP4copy(w)
|
r := NewFP4copy(w, mem)
|
||||||
t := NewFP4copy(w)
|
t := NewFP4copy(w, mem)
|
||||||
r.Sub(y)
|
r.Sub(y, mem)
|
||||||
r.norm()
|
r.norm()
|
||||||
r.pmul(F.a)
|
r.pmul(F.a, mem)
|
||||||
t.Add(y)
|
t.Add(y, mem)
|
||||||
t.norm()
|
t.norm()
|
||||||
t.pmul(F.b)
|
t.pmul(F.b, mem)
|
||||||
t.times_i()
|
t.times_i(mem)
|
||||||
|
|
||||||
F.copy(r)
|
F.copy(r)
|
||||||
F.Add(t)
|
F.Add(t, mem)
|
||||||
F.Add(z)
|
F.Add(z, mem)
|
||||||
|
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XTR xtr_d function */
|
/* XTR xtr_d function */
|
||||||
func (F *FP4) xtr_D() {
|
func (F *FP4) xtr_D(mem *arena.Arena) {
|
||||||
w := NewFP4copy(F)
|
w := NewFP4copy(F, mem)
|
||||||
F.Sqr()
|
F.Sqr(mem)
|
||||||
w.conj()
|
w.conj(mem)
|
||||||
w.Add(w)
|
w.Add(w, mem)
|
||||||
w.norm()
|
w.norm()
|
||||||
F.Sub(w)
|
F.Sub(w, mem)
|
||||||
F.reduce()
|
F.reduce(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* r=x^n using XTR method on traces of FP12s */
|
/* r=x^n using XTR method on traces of FP12s */
|
||||||
func (F *FP4) xtr_pow(n *BIG) *FP4 {
|
func (F *FP4) xtr_pow(n *BIG, mem *arena.Arena) *FP4 {
|
||||||
a := NewFP4int(3)
|
a := NewFP4int(3, mem)
|
||||||
b := NewFP4copy(F)
|
b := NewFP4copy(F, mem)
|
||||||
c := NewFP4copy(b)
|
c := NewFP4copy(b, mem)
|
||||||
c.xtr_D()
|
c.xtr_D(mem)
|
||||||
t := NewFP4()
|
t := NewFP4(mem)
|
||||||
r := NewFP4()
|
r := NewFP4(mem)
|
||||||
sf := NewFP4copy(F)
|
sf := NewFP4copy(F, mem)
|
||||||
sf.norm()
|
sf.norm()
|
||||||
|
|
||||||
par := n.parity()
|
par := n.parity()
|
||||||
v := NewBIGcopy(n)
|
v := NewBIGcopy(n, mem)
|
||||||
v.norm()
|
v.norm()
|
||||||
v.fshr(1)
|
v.fshr(1)
|
||||||
if par == 0 {
|
if par == 0 {
|
||||||
@ -471,20 +528,20 @@ func (F *FP4) xtr_pow(n *BIG) *FP4 {
|
|||||||
for i := nb - 1; i >= 0; i-- {
|
for i := nb - 1; i >= 0; i-- {
|
||||||
if v.bit(i) != 1 {
|
if v.bit(i) != 1 {
|
||||||
t.copy(b)
|
t.copy(b)
|
||||||
sf.conj()
|
sf.conj(mem)
|
||||||
c.conj()
|
c.conj(mem)
|
||||||
b.xtr_A(a, sf, c)
|
b.xtr_A(a, sf, c, mem)
|
||||||
sf.conj()
|
sf.conj(mem)
|
||||||
c.copy(t)
|
c.copy(t)
|
||||||
c.xtr_D()
|
c.xtr_D(mem)
|
||||||
a.xtr_D()
|
a.xtr_D(mem)
|
||||||
} else {
|
} else {
|
||||||
t.copy(a)
|
t.copy(a)
|
||||||
t.conj()
|
t.conj(mem)
|
||||||
a.copy(b)
|
a.copy(b)
|
||||||
a.xtr_D()
|
a.xtr_D(mem)
|
||||||
b.xtr_A(c, sf, t)
|
b.xtr_A(c, sf, t, mem)
|
||||||
c.xtr_D()
|
c.xtr_D(mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if par == 0 {
|
if par == 0 {
|
||||||
@ -492,25 +549,25 @@ func (F *FP4) xtr_pow(n *BIG) *FP4 {
|
|||||||
} else {
|
} else {
|
||||||
r.copy(b)
|
r.copy(b)
|
||||||
}
|
}
|
||||||
r.reduce()
|
r.reduce(mem)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
/* r=ck^a.cl^n using XTR double exponentiation method on traces of FP12s. See Stam thesis. */
|
/* r=ck^a.cl^n using XTR double exponentiation method on traces of FP12s. See Stam thesis. */
|
||||||
func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
|
func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *arena.Arena) *FP4 {
|
||||||
|
|
||||||
e := NewBIGcopy(a)
|
e := NewBIGcopy(a, mem)
|
||||||
d := NewBIGcopy(b)
|
d := NewBIGcopy(b, mem)
|
||||||
w := NewBIGint(0)
|
w := NewBIGint(0, mem)
|
||||||
e.norm()
|
e.norm()
|
||||||
d.norm()
|
d.norm()
|
||||||
|
|
||||||
cu := NewFP4copy(ck) // can probably be passed in w/o copying
|
cu := NewFP4copy(ck, mem) // can probably be passed in w/o copying
|
||||||
cv := NewFP4copy(F)
|
cv := NewFP4copy(F, mem)
|
||||||
cumv := NewFP4copy(ckml)
|
cumv := NewFP4copy(ckml, mem)
|
||||||
cum2v := NewFP4copy(ckm2l)
|
cum2v := NewFP4copy(ckm2l, mem)
|
||||||
r := NewFP4()
|
r := NewFP4(mem)
|
||||||
t := NewFP4()
|
t := NewFP4(mem)
|
||||||
|
|
||||||
f2 := 0
|
f2 := 0
|
||||||
for d.parity() == 0 && e.parity() == 0 {
|
for d.parity() == 0 && e.parity() == 0 {
|
||||||
@ -531,9 +588,9 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
|
|||||||
e.norm()
|
e.norm()
|
||||||
|
|
||||||
t.copy(cv)
|
t.copy(cv)
|
||||||
t.xtr_A(cu, cumv, cum2v)
|
t.xtr_A(cu, cumv, cum2v, mem)
|
||||||
cum2v.copy(cumv)
|
cum2v.copy(cumv)
|
||||||
cum2v.conj()
|
cum2v.conj(mem)
|
||||||
cumv.copy(cv)
|
cumv.copy(cv)
|
||||||
cv.copy(cu)
|
cv.copy(cu)
|
||||||
cu.copy(t)
|
cu.copy(t)
|
||||||
@ -541,24 +598,24 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
|
|||||||
if d.parity() == 0 {
|
if d.parity() == 0 {
|
||||||
d.fshr(1)
|
d.fshr(1)
|
||||||
r.copy(cum2v)
|
r.copy(cum2v)
|
||||||
r.conj()
|
r.conj(mem)
|
||||||
t.copy(cumv)
|
t.copy(cumv)
|
||||||
t.xtr_A(cu, cv, r)
|
t.xtr_A(cu, cv, r, mem)
|
||||||
cum2v.copy(cumv)
|
cum2v.copy(cumv)
|
||||||
cum2v.xtr_D()
|
cum2v.xtr_D(mem)
|
||||||
cumv.copy(t)
|
cumv.copy(t)
|
||||||
cu.xtr_D()
|
cu.xtr_D(mem)
|
||||||
} else {
|
} else {
|
||||||
if e.parity() == 1 {
|
if e.parity() == 1 {
|
||||||
d.Sub(e)
|
d.Sub(e)
|
||||||
d.norm()
|
d.norm()
|
||||||
d.fshr(1)
|
d.fshr(1)
|
||||||
t.copy(cv)
|
t.copy(cv)
|
||||||
t.xtr_A(cu, cumv, cum2v)
|
t.xtr_A(cu, cumv, cum2v, mem)
|
||||||
cu.xtr_D()
|
cu.xtr_D(mem)
|
||||||
cum2v.copy(cv)
|
cum2v.copy(cv)
|
||||||
cum2v.xtr_D()
|
cum2v.xtr_D(mem)
|
||||||
cum2v.conj()
|
cum2v.conj(mem)
|
||||||
cv.copy(t)
|
cv.copy(t)
|
||||||
} else {
|
} else {
|
||||||
w.copy(d)
|
w.copy(d)
|
||||||
@ -566,13 +623,13 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
|
|||||||
d.fshr(1)
|
d.fshr(1)
|
||||||
e.copy(w)
|
e.copy(w)
|
||||||
t.copy(cumv)
|
t.copy(cumv)
|
||||||
t.xtr_D()
|
t.xtr_D(mem)
|
||||||
cumv.copy(cum2v)
|
cumv.copy(cum2v)
|
||||||
cumv.conj()
|
cumv.conj(mem)
|
||||||
cum2v.copy(t)
|
cum2v.copy(t)
|
||||||
cum2v.conj()
|
cum2v.conj(mem)
|
||||||
t.copy(cv)
|
t.copy(cv)
|
||||||
t.xtr_D()
|
t.xtr_D(mem)
|
||||||
cv.copy(cu)
|
cv.copy(cu)
|
||||||
cu.copy(t)
|
cu.copy(t)
|
||||||
}
|
}
|
||||||
@ -587,7 +644,7 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
|
|||||||
e.Sub(d)
|
e.Sub(d)
|
||||||
e.norm()
|
e.norm()
|
||||||
t.copy(cv)
|
t.copy(cv)
|
||||||
t.xtr_A(cu, cumv, cum2v)
|
t.xtr_A(cu, cumv, cum2v, mem)
|
||||||
cum2v.copy(cumv)
|
cum2v.copy(cumv)
|
||||||
cumv.copy(cu)
|
cumv.copy(cu)
|
||||||
cu.copy(t)
|
cu.copy(t)
|
||||||
@ -598,13 +655,13 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
|
|||||||
d.fshr(1)
|
d.fshr(1)
|
||||||
e.copy(w)
|
e.copy(w)
|
||||||
t.copy(cumv)
|
t.copy(cumv)
|
||||||
t.xtr_D()
|
t.xtr_D(mem)
|
||||||
cumv.copy(cum2v)
|
cumv.copy(cum2v)
|
||||||
cumv.conj()
|
cumv.conj(mem)
|
||||||
cum2v.copy(t)
|
cum2v.copy(t)
|
||||||
cum2v.conj()
|
cum2v.conj(mem)
|
||||||
t.copy(cv)
|
t.copy(cv)
|
||||||
t.xtr_D()
|
t.xtr_D(mem)
|
||||||
cv.copy(cu)
|
cv.copy(cu)
|
||||||
cu.copy(t)
|
cu.copy(t)
|
||||||
} else {
|
} else {
|
||||||
@ -616,52 +673,52 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
|
|||||||
d.copy(w)
|
d.copy(w)
|
||||||
d.fshr(1)
|
d.fshr(1)
|
||||||
t.copy(cv)
|
t.copy(cv)
|
||||||
t.xtr_A(cu, cumv, cum2v)
|
t.xtr_A(cu, cumv, cum2v, mem)
|
||||||
cumv.conj()
|
cumv.conj(mem)
|
||||||
cum2v.copy(cu)
|
cum2v.copy(cu)
|
||||||
cum2v.xtr_D()
|
cum2v.xtr_D(mem)
|
||||||
cum2v.conj()
|
cum2v.conj(mem)
|
||||||
cu.copy(cv)
|
cu.copy(cv)
|
||||||
cu.xtr_D()
|
cu.xtr_D(mem)
|
||||||
cv.copy(t)
|
cv.copy(t)
|
||||||
} else {
|
} else {
|
||||||
d.fshr(1)
|
d.fshr(1)
|
||||||
r.copy(cum2v)
|
r.copy(cum2v)
|
||||||
r.conj()
|
r.conj(mem)
|
||||||
t.copy(cumv)
|
t.copy(cumv)
|
||||||
t.xtr_A(cu, cv, r)
|
t.xtr_A(cu, cv, r, mem)
|
||||||
cum2v.copy(cumv)
|
cum2v.copy(cumv)
|
||||||
cum2v.xtr_D()
|
cum2v.xtr_D(mem)
|
||||||
cumv.copy(t)
|
cumv.copy(t)
|
||||||
cu.xtr_D()
|
cu.xtr_D(mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r.copy(cv)
|
r.copy(cv)
|
||||||
r.xtr_A(cu, cumv, cum2v)
|
r.xtr_A(cu, cumv, cum2v, mem)
|
||||||
for i := 0; i < f2; i++ {
|
for i := 0; i < f2; i++ {
|
||||||
r.xtr_D()
|
r.xtr_D(mem)
|
||||||
}
|
}
|
||||||
r = r.xtr_pow(d)
|
r = r.xtr_pow(d, mem)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this/=2 */
|
/* this/=2 */
|
||||||
func (F *FP4) div2() {
|
func (F *FP4) div2(mem *arena.Arena) {
|
||||||
F.a.div2()
|
F.a.div2(mem)
|
||||||
F.b.div2()
|
F.b.div2(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP4) div_i() {
|
func (F *FP4) div_i(mem *arena.Arena) {
|
||||||
u := NewFP2copy(F.a)
|
u := NewFP2copy(F.a, mem)
|
||||||
v := NewFP2copy(F.b)
|
v := NewFP2copy(F.b, mem)
|
||||||
u.div_ip()
|
u.div_ip(mem)
|
||||||
F.a.copy(v)
|
F.a.copy(v)
|
||||||
F.b.copy(u)
|
F.b.copy(u)
|
||||||
if TOWER == POSITOWER {
|
if TOWER == POSITOWER {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -688,70 +745,72 @@ func (F *FP4) pow(b *BIG) {
|
|||||||
/* */
|
/* */
|
||||||
// Test for Quadratic Residue
|
// Test for Quadratic Residue
|
||||||
func (F *FP4) qr(h *FP) int {
|
func (F *FP4) qr(h *FP) int {
|
||||||
c := NewFP4copy(F)
|
mem := arena.NewArena()
|
||||||
c.conj()
|
defer mem.Free()
|
||||||
c.Mul(F)
|
c := NewFP4copy(F, mem)
|
||||||
|
c.conj(mem)
|
||||||
|
c.Mul(F, mem)
|
||||||
return c.a.qr(h)
|
return c.a.qr(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
|
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
|
||||||
func (F *FP4) Sqrt(h *FP) {
|
func (F *FP4) Sqrt(h *FP, mem *arena.Arena) {
|
||||||
if F.IsZero() {
|
if F.IsZero(mem) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
a := NewFP2copy(F.a)
|
a := NewFP2copy(F.a, mem)
|
||||||
b := NewFP2()
|
b := NewFP2(mem)
|
||||||
s := NewFP2copy(F.b)
|
s := NewFP2copy(F.b, mem)
|
||||||
t := NewFP2copy(F.a)
|
t := NewFP2copy(F.a, mem)
|
||||||
hint := NewFP()
|
hint := NewFP(mem)
|
||||||
|
|
||||||
s.Sqr()
|
s.Sqr(mem)
|
||||||
a.Sqr()
|
a.Sqr(mem)
|
||||||
s.Mul_ip()
|
s.Mul_ip(mem)
|
||||||
s.norm()
|
s.norm()
|
||||||
a.Sub(s)
|
a.Sub(s, mem)
|
||||||
|
|
||||||
s.copy(a)
|
s.copy(a)
|
||||||
s.norm()
|
s.norm()
|
||||||
s.Sqrt(h)
|
s.Sqrt(h, mem)
|
||||||
|
|
||||||
a.copy(t)
|
a.copy(t)
|
||||||
b.copy(t)
|
b.copy(t)
|
||||||
|
|
||||||
a.Add(s)
|
a.Add(s, mem)
|
||||||
a.norm()
|
a.norm()
|
||||||
a.div2()
|
a.div2(mem)
|
||||||
|
|
||||||
b.copy(F.b)
|
b.copy(F.b)
|
||||||
b.div2()
|
b.div2(mem)
|
||||||
qr := a.qr(hint)
|
qr := a.qr(hint)
|
||||||
|
|
||||||
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
|
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
|
||||||
s.copy(a)
|
s.copy(a)
|
||||||
twk := NewFPbig(NewBIGints(TWK))
|
twk := NewFPbig(NewBIGints(TWK, mem), mem)
|
||||||
twk.Mul(hint)
|
twk.Mul(hint, mem)
|
||||||
s.div_ip()
|
s.div_ip(mem)
|
||||||
s.norm()
|
s.norm()
|
||||||
|
|
||||||
a.cmove(s, 1-qr)
|
a.cmove(s, 1-qr)
|
||||||
hint.cmove(twk, 1-qr)
|
hint.cmove(twk, 1-qr)
|
||||||
|
|
||||||
F.a.copy(a)
|
F.a.copy(a)
|
||||||
F.a.Sqrt(hint)
|
F.a.Sqrt(hint, mem)
|
||||||
s.copy(a)
|
s.copy(a)
|
||||||
s.Invert(hint)
|
s.Invert(hint, mem)
|
||||||
s.Mul(F.a)
|
s.Mul(F.a, mem)
|
||||||
F.b.copy(s)
|
F.b.copy(s)
|
||||||
F.b.Mul(b)
|
F.b.Mul(b, mem)
|
||||||
t.copy(F.a)
|
t.copy(F.a)
|
||||||
|
|
||||||
F.a.cmove(F.b, 1-qr)
|
F.a.cmove(F.b, 1-qr)
|
||||||
F.b.cmove(t, 1-qr)
|
F.b.cmove(t, 1-qr)
|
||||||
|
|
||||||
sgn := F.sign()
|
sgn := F.sign(mem)
|
||||||
nr := NewFP4copy(F)
|
nr := NewFP4copy(F, mem)
|
||||||
nr.Neg()
|
nr.Neg(mem)
|
||||||
nr.norm()
|
nr.norm()
|
||||||
F.cmove(nr, sgn)
|
F.cmove(nr, sgn)
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -23,7 +23,11 @@
|
|||||||
|
|
||||||
package bls48581
|
package bls48581
|
||||||
|
|
||||||
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
import (
|
||||||
|
"arena"
|
||||||
|
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
||||||
|
)
|
||||||
|
|
||||||
//import "fmt"
|
//import "fmt"
|
||||||
|
|
||||||
@ -32,66 +36,115 @@ type FP8 struct {
|
|||||||
b *FP4
|
b *FP4
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP8() *FP8 {
|
func NewFP8(mem *arena.Arena) *FP8 {
|
||||||
F := new(FP8)
|
if mem != nil {
|
||||||
F.a = NewFP4()
|
F := arena.New[FP8](mem)
|
||||||
F.b = NewFP4()
|
F.a = NewFP4(mem)
|
||||||
return F
|
F.b = NewFP4(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP8)
|
||||||
|
F.a = NewFP4(nil)
|
||||||
|
F.b = NewFP4(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Constructors */
|
/* Constructors */
|
||||||
func NewFP8int(a int) *FP8 {
|
func NewFP8int(a int, mem *arena.Arena) *FP8 {
|
||||||
F := new(FP8)
|
if mem != nil {
|
||||||
F.a = NewFP4int(a)
|
F := arena.New[FP8](mem)
|
||||||
F.b = NewFP4()
|
F.a = NewFP4int(a, mem)
|
||||||
return F
|
F.b = NewFP4(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP8)
|
||||||
|
F.a = NewFP4int(a, nil)
|
||||||
|
F.b = NewFP4(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Constructors */
|
/* Constructors */
|
||||||
func NewFP8ints(a int, b int) *FP8 {
|
func NewFP8ints(a int, b int, mem *arena.Arena) *FP8 {
|
||||||
F := new(FP8)
|
if mem != nil {
|
||||||
F.a = NewFP4int(a)
|
F := arena.New[FP8](mem)
|
||||||
F.b = NewFP4int(b)
|
F.a = NewFP4int(a, mem)
|
||||||
return F
|
F.b = NewFP4int(b, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP8)
|
||||||
|
F.a = NewFP4int(a, nil)
|
||||||
|
F.b = NewFP4int(b, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP8copy(x *FP8) *FP8 {
|
func NewFP8copy(x *FP8, mem *arena.Arena) *FP8 {
|
||||||
F := new(FP8)
|
if mem != nil {
|
||||||
F.a = NewFP4copy(x.a)
|
F := arena.New[FP8](mem)
|
||||||
F.b = NewFP4copy(x.b)
|
F.a = NewFP4copy(x.a, mem)
|
||||||
return F
|
F.b = NewFP4copy(x.b, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP8)
|
||||||
|
F.a = NewFP4copy(x.a, nil)
|
||||||
|
F.b = NewFP4copy(x.b, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP8fp4s(c *FP4, d *FP4) *FP8 {
|
func NewFP8fp4s(c *FP4, d *FP4, mem *arena.Arena) *FP8 {
|
||||||
F := new(FP8)
|
if mem != nil {
|
||||||
F.a = NewFP4copy(c)
|
F := arena.New[FP8](mem)
|
||||||
F.b = NewFP4copy(d)
|
F.a = NewFP4copy(c, mem)
|
||||||
return F
|
F.b = NewFP4copy(d, mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP8)
|
||||||
|
F.a = NewFP4copy(c, nil)
|
||||||
|
F.b = NewFP4copy(d, nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP8fp4(c *FP4) *FP8 {
|
func NewFP8fp4(c *FP4, mem *arena.Arena) *FP8 {
|
||||||
F := new(FP8)
|
if mem != nil {
|
||||||
F.a = NewFP4copy(c)
|
F := arena.New[FP8](mem)
|
||||||
F.b = NewFP4()
|
F.a = NewFP4copy(c, mem)
|
||||||
return F
|
F.b = NewFP4(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP8)
|
||||||
|
F.a = NewFP4copy(c, nil)
|
||||||
|
F.b = NewFP4(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP8fp(c *FP) *FP8 {
|
func NewFP8fp(c *FP, mem *arena.Arena) *FP8 {
|
||||||
F := new(FP8)
|
if mem != nil {
|
||||||
F.a = NewFP4fp(c)
|
F := arena.New[FP8](mem)
|
||||||
F.b = NewFP4()
|
F.a = NewFP4fp(c, mem)
|
||||||
return F
|
F.b = NewFP4(mem)
|
||||||
|
return F
|
||||||
|
} else {
|
||||||
|
F := new(FP8)
|
||||||
|
F.a = NewFP4fp(c, nil)
|
||||||
|
F.b = NewFP4(nil)
|
||||||
|
return F
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFP8rand(rng *ext.RAND) *FP8 {
|
func NewFP8rand(rng *ext.RAND) *FP8 {
|
||||||
F := NewFP8fp4s(NewFP4rand(rng), NewFP4rand(rng))
|
F := NewFP8fp4s(NewFP4rand(rng), NewFP4rand(rng), nil)
|
||||||
return F
|
return F
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reduce all components of this mod Modulus */
|
/* reduce all components of this mod Modulus */
|
||||||
func (F *FP8) reduce() {
|
func (F *FP8) reduce(mem *arena.Arena) {
|
||||||
F.a.reduce()
|
F.a.reduce(mem)
|
||||||
F.b.reduce()
|
F.b.reduce(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* normalise all components of this mod Modulus */
|
/* normalise all components of this mod Modulus */
|
||||||
@ -101,12 +154,12 @@ func (F *FP8) norm() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* test this==0 ? */
|
/* test this==0 ? */
|
||||||
func (F *FP8) IsZero() bool {
|
func (F *FP8) IsZero(mem *arena.Arena) bool {
|
||||||
return F.a.IsZero() && F.b.IsZero()
|
return F.a.IsZero(mem) && F.b.IsZero(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP8) islarger() int {
|
func (F *FP8) islarger() int {
|
||||||
if F.IsZero() {
|
if F.IsZero(nil) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
cmp := F.b.islarger()
|
cmp := F.b.islarger()
|
||||||
@ -140,7 +193,7 @@ func FP8_fromBytes(bf []byte) *FP8 {
|
|||||||
t[i] = bf[i+MB]
|
t[i] = bf[i+MB]
|
||||||
}
|
}
|
||||||
ta := FP4_fromBytes(t[:])
|
ta := FP4_fromBytes(t[:])
|
||||||
return NewFP8fp4s(ta, tb)
|
return NewFP8fp4s(ta, tb, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Conditional move */
|
/* Conditional move */
|
||||||
@ -151,13 +204,15 @@ func (F *FP8) cmove(g *FP8, d int) {
|
|||||||
|
|
||||||
/* test this==1 ? */
|
/* test this==1 ? */
|
||||||
func (F *FP8) isunity() bool {
|
func (F *FP8) isunity() bool {
|
||||||
one := NewFP4int(1)
|
mem := arena.NewArena()
|
||||||
return F.a.Equals(one) && F.b.IsZero()
|
defer mem.Free()
|
||||||
|
one := NewFP4int(1, mem)
|
||||||
|
return F.a.Equals(one) && F.b.IsZero(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* test is w real? That is in a+ib test b is zero */
|
/* test is w real? That is in a+ib test b is zero */
|
||||||
func (F *FP8) isreal() bool {
|
func (F *FP8) isreal() bool {
|
||||||
return F.b.IsZero()
|
return F.b.IsZero(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* extract real part a */
|
/* extract real part a */
|
||||||
@ -198,12 +253,12 @@ func (F *FP8) one() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Return sign */
|
/* Return sign */
|
||||||
func (F *FP8) sign() int {
|
func (F *FP8) sign(mem *arena.Arena) int {
|
||||||
p1 := F.a.sign()
|
p1 := F.a.sign(mem)
|
||||||
p2 := F.b.sign()
|
p2 := F.b.sign(mem)
|
||||||
var u int
|
var u int
|
||||||
if BIG_ENDIAN_SIGN {
|
if BIG_ENDIAN_SIGN {
|
||||||
if F.b.IsZero() {
|
if F.b.IsZero(mem) {
|
||||||
u = 1
|
u = 1
|
||||||
} else {
|
} else {
|
||||||
u = 0
|
u = 0
|
||||||
@ -211,7 +266,7 @@ func (F *FP8) sign() int {
|
|||||||
p2 ^= (p1 ^ p2) & u
|
p2 ^= (p1 ^ p2) & u
|
||||||
return p2
|
return p2
|
||||||
} else {
|
} else {
|
||||||
if F.a.IsZero() {
|
if F.a.IsZero(mem) {
|
||||||
u = 1
|
u = 1
|
||||||
} else {
|
} else {
|
||||||
u = 0
|
u = 0
|
||||||
@ -222,137 +277,137 @@ func (F *FP8) sign() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* set this=-this */
|
/* set this=-this */
|
||||||
func (F *FP8) Neg() {
|
func (F *FP8) Neg(mem *arena.Arena) {
|
||||||
F.norm()
|
F.norm()
|
||||||
m := NewFP4copy(F.a)
|
m := NewFP4copy(F.a, mem)
|
||||||
t := NewFP4()
|
t := NewFP4(mem)
|
||||||
m.Add(F.b)
|
m.Add(F.b, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
t.copy(m)
|
t.copy(m)
|
||||||
t.Add(F.b)
|
t.Add(F.b, mem)
|
||||||
F.b.copy(m)
|
F.b.copy(m)
|
||||||
F.b.Add(F.a)
|
F.b.Add(F.a, mem)
|
||||||
F.a.copy(t)
|
F.a.copy(t)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=conjugate(this) */
|
/* this=conjugate(this) */
|
||||||
func (F *FP8) conj() {
|
func (F *FP8) conj(mem *arena.Arena) {
|
||||||
F.b.Neg()
|
F.b.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=-conjugate(this) */
|
/* this=-conjugate(this) */
|
||||||
func (F *FP8) nconj() {
|
func (F *FP8) nconj(mem *arena.Arena) {
|
||||||
F.a.Neg()
|
F.a.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this+=x */
|
/* this+=x */
|
||||||
func (F *FP8) Add(x *FP8) {
|
func (F *FP8) Add(x *FP8, mem *arena.Arena) {
|
||||||
F.a.Add(x.a)
|
F.a.Add(x.a, mem)
|
||||||
F.b.Add(x.b)
|
F.b.Add(x.b, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=x */
|
/* this-=x */
|
||||||
func (F *FP8) Sub(x *FP8) {
|
func (F *FP8) Sub(x *FP8, mem *arena.Arena) {
|
||||||
m := NewFP8copy(x)
|
m := NewFP8copy(x, mem)
|
||||||
m.Neg()
|
m.Neg(mem)
|
||||||
F.Add(m)
|
F.Add(m, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this-=x */
|
/* this-=x */
|
||||||
func (F *FP8) rsub(x *FP8) {
|
func (F *FP8) rsub(x *FP8, mem *arena.Arena) {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.Add(x)
|
F.Add(x, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP4 */
|
/* this*=s where s is FP4 */
|
||||||
func (F *FP8) pmul(s *FP4) {
|
func (F *FP8) pmul(s *FP4, mem *arena.Arena) {
|
||||||
F.a.Mul(s)
|
F.a.Mul(s, mem)
|
||||||
F.b.Mul(s)
|
F.b.Mul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP2 */
|
/* this*=s where s is FP2 */
|
||||||
func (F *FP8) qmul(s *FP2) {
|
func (F *FP8) qmul(s *FP2, mem *arena.Arena) {
|
||||||
F.a.pmul(s)
|
F.a.pmul(s, mem)
|
||||||
F.b.pmul(s)
|
F.b.pmul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=s where s is FP */
|
/* this*=s where s is FP */
|
||||||
func (F *FP8) tmul(s *FP) {
|
func (F *FP8) tmul(s *FP, mem *arena.Arena) {
|
||||||
F.a.qmul(s)
|
F.a.qmul(s, mem)
|
||||||
F.b.qmul(s)
|
F.b.qmul(s, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=c where c is int */
|
/* this*=c where c is int */
|
||||||
func (F *FP8) imul(c int) {
|
func (F *FP8) imul(c int, mem *arena.Arena) {
|
||||||
F.a.imul(c)
|
F.a.imul(c, mem)
|
||||||
F.b.imul(c)
|
F.b.imul(c, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=this */
|
/* this*=this */
|
||||||
func (F *FP8) Sqr() {
|
func (F *FP8) Sqr(mem *arena.Arena) {
|
||||||
t1 := NewFP4copy(F.a)
|
t1 := NewFP4copy(F.a, mem)
|
||||||
t2 := NewFP4copy(F.b)
|
t2 := NewFP4copy(F.b, mem)
|
||||||
t3 := NewFP4copy(F.a)
|
t3 := NewFP4copy(F.a, mem)
|
||||||
|
|
||||||
t3.Mul(F.b)
|
t3.Mul(F.b, mem)
|
||||||
t1.Add(F.b)
|
t1.Add(F.b, mem)
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
|
|
||||||
t2.Add(F.a)
|
t2.Add(F.a, mem)
|
||||||
|
|
||||||
t1.norm()
|
t1.norm()
|
||||||
t2.norm()
|
t2.norm()
|
||||||
|
|
||||||
F.a.copy(t1)
|
F.a.copy(t1)
|
||||||
F.a.Mul(t2)
|
F.a.Mul(t2, mem)
|
||||||
|
|
||||||
t2.copy(t3)
|
t2.copy(t3)
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
t2.Add(t3)
|
t2.Add(t3, mem)
|
||||||
t2.norm()
|
t2.norm()
|
||||||
t2.Neg()
|
t2.Neg(mem)
|
||||||
F.a.Add(t2)
|
F.a.Add(t2, mem)
|
||||||
|
|
||||||
F.b.copy(t3)
|
F.b.copy(t3)
|
||||||
F.b.Add(t3)
|
F.b.Add(t3, mem)
|
||||||
|
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=y */
|
/* this*=y */
|
||||||
func (F *FP8) Mul(y *FP8) {
|
func (F *FP8) Mul(y *FP8, mem *arena.Arena) {
|
||||||
t1 := NewFP4copy(F.a)
|
t1 := NewFP4copy(F.a, mem)
|
||||||
t2 := NewFP4copy(F.b)
|
t2 := NewFP4copy(F.b, mem)
|
||||||
t3 := NewFP4()
|
t3 := NewFP4(mem)
|
||||||
t4 := NewFP4copy(F.b)
|
t4 := NewFP4copy(F.b, mem)
|
||||||
|
|
||||||
t1.Mul(y.a)
|
t1.Mul(y.a, mem)
|
||||||
t2.Mul(y.b)
|
t2.Mul(y.b, mem)
|
||||||
t3.copy(y.b)
|
t3.copy(y.b)
|
||||||
t3.Add(y.a)
|
t3.Add(y.a, mem)
|
||||||
t4.Add(F.a)
|
t4.Add(F.a, mem)
|
||||||
|
|
||||||
t3.norm()
|
t3.norm()
|
||||||
t4.norm()
|
t4.norm()
|
||||||
|
|
||||||
t4.Mul(t3)
|
t4.Mul(t3, mem)
|
||||||
|
|
||||||
t3.copy(t1)
|
t3.copy(t1)
|
||||||
t3.Neg()
|
t3.Neg(mem)
|
||||||
t4.Add(t3)
|
t4.Add(t3, mem)
|
||||||
t4.norm()
|
t4.norm()
|
||||||
|
|
||||||
t3.copy(t2)
|
t3.copy(t2)
|
||||||
t3.Neg()
|
t3.Neg(mem)
|
||||||
F.b.copy(t4)
|
F.b.copy(t4)
|
||||||
F.b.Add(t3)
|
F.b.Add(t3, mem)
|
||||||
|
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
F.a.copy(t2)
|
F.a.copy(t2)
|
||||||
F.a.Add(t1)
|
F.a.Add(t1, mem)
|
||||||
|
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
@ -363,55 +418,55 @@ func (F *FP8) toString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* this=1/this */
|
/* this=1/this */
|
||||||
func (F *FP8) Invert(h *FP) {
|
func (F *FP8) Invert(h *FP, mem *arena.Arena) {
|
||||||
t1 := NewFP4copy(F.a)
|
t1 := NewFP4copy(F.a, mem)
|
||||||
t2 := NewFP4copy(F.b)
|
t2 := NewFP4copy(F.b, mem)
|
||||||
|
|
||||||
t1.Sqr()
|
t1.Sqr(mem)
|
||||||
t2.Sqr()
|
t2.Sqr(mem)
|
||||||
t2.times_i()
|
t2.times_i(mem)
|
||||||
t2.norm()
|
t2.norm()
|
||||||
t1.Sub(t2)
|
t1.Sub(t2, mem)
|
||||||
t1.norm()
|
t1.norm()
|
||||||
|
|
||||||
t1.Invert(h)
|
t1.Invert(h, mem)
|
||||||
|
|
||||||
F.a.Mul(t1)
|
F.a.Mul(t1, mem)
|
||||||
t1.Neg()
|
t1.Neg(mem)
|
||||||
t1.norm()
|
t1.norm()
|
||||||
F.b.Mul(t1)
|
F.b.Mul(t1, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this*=i where i = sqrt(sqrt(-1+sqrt(-1))) */
|
/* this*=i where i = sqrt(sqrt(-1+sqrt(-1))) */
|
||||||
func (F *FP8) times_i() {
|
func (F *FP8) times_i(mem *arena.Arena) {
|
||||||
s := NewFP4copy(F.b)
|
s := NewFP4copy(F.b, mem)
|
||||||
t := NewFP4copy(F.a)
|
t := NewFP4copy(F.a, mem)
|
||||||
s.times_i()
|
s.times_i(mem)
|
||||||
F.a.copy(s)
|
F.a.copy(s)
|
||||||
F.b.copy(t)
|
F.b.copy(t)
|
||||||
F.norm()
|
F.norm()
|
||||||
if TOWER == POSITOWER {
|
if TOWER == POSITOWER {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP8) times_i2() {
|
func (F *FP8) times_i2(mem *arena.Arena) {
|
||||||
F.a.times_i()
|
F.a.times_i(mem)
|
||||||
F.b.times_i()
|
F.b.times_i(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=this^p using Frobenius */
|
/* this=this^p using Frobenius */
|
||||||
func (F *FP8) frob(f *FP2) {
|
func (F *FP8) frob(f *FP2, mem *arena.Arena) {
|
||||||
ff := NewFP2copy(f)
|
ff := NewFP2copy(f, mem)
|
||||||
ff.Sqr()
|
ff.Sqr(mem)
|
||||||
ff.Mul_ip()
|
ff.Mul_ip(mem)
|
||||||
ff.norm()
|
ff.norm()
|
||||||
|
|
||||||
F.a.frob(ff)
|
F.a.frob(ff, mem)
|
||||||
F.b.frob(ff)
|
F.b.frob(ff, mem)
|
||||||
F.b.pmul(f)
|
F.b.pmul(f, mem)
|
||||||
F.b.times_i()
|
F.b.times_i(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this=this^e
|
/* this=this^e
|
||||||
@ -671,19 +726,19 @@ func (F *FP8) xtr_pow2(ck *FP8, ckml *FP8, ckm2l *FP8, a *BIG, b *BIG) *FP8 {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
/* this/=2 */
|
/* this/=2 */
|
||||||
func (F *FP8) div2() {
|
func (F *FP8) div2(mem *arena.Arena) {
|
||||||
F.a.div2()
|
F.a.div2(mem)
|
||||||
F.b.div2()
|
F.b.div2(mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (F *FP8) div_i() {
|
func (F *FP8) div_i(mem *arena.Arena) {
|
||||||
u := NewFP4copy(F.a)
|
u := NewFP4copy(F.a, mem)
|
||||||
v := NewFP4copy(F.b)
|
v := NewFP4copy(F.b, mem)
|
||||||
u.div_i()
|
u.div_i(mem)
|
||||||
F.a.copy(v)
|
F.a.copy(v)
|
||||||
F.b.copy(u)
|
F.b.copy(u)
|
||||||
if TOWER == POSITOWER {
|
if TOWER == POSITOWER {
|
||||||
F.Neg()
|
F.Neg(mem)
|
||||||
F.norm()
|
F.norm()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -710,70 +765,72 @@ func (F *FP8) pow(b *BIG) {
|
|||||||
/* */
|
/* */
|
||||||
// Test for Quadratic Residue
|
// Test for Quadratic Residue
|
||||||
func (F *FP8) qr(h *FP) int {
|
func (F *FP8) qr(h *FP) int {
|
||||||
c := NewFP8copy(F)
|
mem := arena.NewArena()
|
||||||
c.conj()
|
defer mem.Free()
|
||||||
c.Mul(F)
|
c := NewFP8copy(F, mem)
|
||||||
|
c.conj(mem)
|
||||||
|
c.Mul(F, mem)
|
||||||
return c.a.qr(h)
|
return c.a.qr(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
|
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
|
||||||
func (F *FP8) Sqrt(h *FP) {
|
func (F *FP8) Sqrt(h *FP, mem *arena.Arena) {
|
||||||
if F.IsZero() {
|
if F.IsZero(mem) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
a := NewFP4copy(F.a)
|
a := NewFP4copy(F.a, mem)
|
||||||
b := NewFP4()
|
b := NewFP4(mem)
|
||||||
s := NewFP4copy(F.b)
|
s := NewFP4copy(F.b, mem)
|
||||||
t := NewFP4copy(F.a)
|
t := NewFP4copy(F.a, mem)
|
||||||
hint := NewFP()
|
hint := NewFP(mem)
|
||||||
|
|
||||||
s.Sqr()
|
s.Sqr(mem)
|
||||||
a.Sqr()
|
a.Sqr(mem)
|
||||||
s.times_i()
|
s.times_i(mem)
|
||||||
s.norm()
|
s.norm()
|
||||||
a.Sub(s)
|
a.Sub(s, mem)
|
||||||
|
|
||||||
s.copy(a)
|
s.copy(a)
|
||||||
s.norm()
|
s.norm()
|
||||||
|
|
||||||
s.Sqrt(h)
|
s.Sqrt(h, mem)
|
||||||
a.copy(t)
|
a.copy(t)
|
||||||
b.copy(t)
|
b.copy(t)
|
||||||
|
|
||||||
a.Add(s)
|
a.Add(s, mem)
|
||||||
a.norm()
|
a.norm()
|
||||||
a.div2()
|
a.div2(mem)
|
||||||
|
|
||||||
b.copy(F.b)
|
b.copy(F.b)
|
||||||
b.div2()
|
b.div2(mem)
|
||||||
qr := a.qr(hint)
|
qr := a.qr(hint)
|
||||||
|
|
||||||
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
|
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
|
||||||
s.copy(a)
|
s.copy(a)
|
||||||
twk := NewFPbig(NewBIGints(TWK))
|
twk := NewFPbig(NewBIGints(TWK, mem), mem)
|
||||||
twk.Mul(hint)
|
twk.Mul(hint, mem)
|
||||||
s.div_i()
|
s.div_i(mem)
|
||||||
s.norm()
|
s.norm()
|
||||||
|
|
||||||
a.cmove(s, 1-qr)
|
a.cmove(s, 1-qr)
|
||||||
hint.cmove(twk, 1-qr)
|
hint.cmove(twk, 1-qr)
|
||||||
|
|
||||||
F.a.copy(a)
|
F.a.copy(a)
|
||||||
F.a.Sqrt(hint)
|
F.a.Sqrt(hint, mem)
|
||||||
s.copy(a)
|
s.copy(a)
|
||||||
s.Invert(hint)
|
s.Invert(hint, mem)
|
||||||
s.Mul(F.a)
|
s.Mul(F.a, mem)
|
||||||
F.b.copy(s)
|
F.b.copy(s)
|
||||||
F.b.Mul(b)
|
F.b.Mul(b, mem)
|
||||||
t.copy(F.a)
|
t.copy(F.a)
|
||||||
|
|
||||||
F.a.cmove(F.b, 1-qr)
|
F.a.cmove(F.b, 1-qr)
|
||||||
F.b.cmove(t, 1-qr)
|
F.b.cmove(t, 1-qr)
|
||||||
|
|
||||||
sgn := F.sign()
|
sgn := F.sign(mem)
|
||||||
nr := NewFP8copy(F)
|
nr := NewFP8copy(F, mem)
|
||||||
nr.Neg()
|
nr.Neg(mem)
|
||||||
nr.norm()
|
nr.norm()
|
||||||
F.cmove(nr, sgn)
|
F.cmove(nr, sgn)
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,328 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
|
||||||
*
|
|
||||||
* This file is part of MIRACL Core
|
|
||||||
* (see https://github.com/miracl/ext..
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Hybrid Public Key Encryption */
|
|
||||||
|
|
||||||
/* Following https://datatracker.ietf.org/doc/draft-irtf-cfrg-hpke/?include_text=1 */
|
|
||||||
|
|
||||||
package bls48581
|
|
||||||
|
|
||||||
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
|
||||||
|
|
||||||
//import "fmt"
|
|
||||||
|
|
||||||
func reverse(X []byte) {
|
|
||||||
lx := len(X)
|
|
||||||
for i := 0; i < lx/2; i++ {
|
|
||||||
ch := X[i]
|
|
||||||
X[i] = X[lx-i-1]
|
|
||||||
X[lx-i-1] = ch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func labeledExtract(SALT []byte, SUITE_ID []byte, label string, IKM []byte) []byte {
|
|
||||||
rfc := "HPKE-v1"
|
|
||||||
RFC := []byte(rfc)
|
|
||||||
LABEL := []byte(label)
|
|
||||||
var LIKM []byte
|
|
||||||
for i := 0; i < len(RFC); i++ {
|
|
||||||
LIKM = append(LIKM, RFC[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(SUITE_ID); i++ {
|
|
||||||
LIKM = append(LIKM, SUITE_ID[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(LABEL); i++ {
|
|
||||||
LIKM = append(LIKM, LABEL[i])
|
|
||||||
}
|
|
||||||
if IKM != nil {
|
|
||||||
for i := 0; i < len(IKM); i++ {
|
|
||||||
LIKM = append(LIKM, IKM[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ext.HKDF_Extract(ext.MC_SHA2, HASH_TYPE, SALT, LIKM)
|
|
||||||
}
|
|
||||||
|
|
||||||
func labeledExpand(PRK []byte, SUITE_ID []byte, label string, INFO []byte, L int) []byte {
|
|
||||||
rfc := "HPKE-v1"
|
|
||||||
RFC := []byte(rfc)
|
|
||||||
LABEL := []byte(label)
|
|
||||||
AR := ext.InttoBytes(L, 2)
|
|
||||||
var LINFO []byte
|
|
||||||
for i := 0; i < len(AR); i++ {
|
|
||||||
LINFO = append(LINFO, AR[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(RFC); i++ {
|
|
||||||
LINFO = append(LINFO, RFC[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(SUITE_ID); i++ {
|
|
||||||
LINFO = append(LINFO, SUITE_ID[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(LABEL); i++ {
|
|
||||||
LINFO = append(LINFO, LABEL[i])
|
|
||||||
}
|
|
||||||
if INFO != nil {
|
|
||||||
for i := 0; i < len(INFO); i++ {
|
|
||||||
LINFO = append(LINFO, INFO[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ext.HKDF_Expand(ext.MC_SHA2, HASH_TYPE, L, PRK, LINFO)
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractAndExpand(config_id int, DH []byte, context []byte) []byte {
|
|
||||||
kem := config_id & 255
|
|
||||||
txt := "KEM"
|
|
||||||
KEM_ID := ext.InttoBytes(kem, 2)
|
|
||||||
KEM := []byte(txt)
|
|
||||||
var SUITE_ID []byte
|
|
||||||
for i := 0; i < len(KEM); i++ {
|
|
||||||
SUITE_ID = append(SUITE_ID, KEM[i])
|
|
||||||
}
|
|
||||||
SUITE_ID = append(SUITE_ID, KEM_ID[0])
|
|
||||||
SUITE_ID = append(SUITE_ID, KEM_ID[1])
|
|
||||||
|
|
||||||
PRK := labeledExtract(nil, SUITE_ID, "eae_prk", DH)
|
|
||||||
return labeledExpand(PRK, SUITE_ID, "shared_secret", context, HASH_TYPE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func DeriveKeyPair(config_id int, SK []byte, PK []byte, SEED []byte) bool {
|
|
||||||
counter := 0
|
|
||||||
kem := config_id & 255
|
|
||||||
|
|
||||||
txt := "KEM"
|
|
||||||
KEM_ID := ext.InttoBytes(kem, 2)
|
|
||||||
KEM := []byte(txt)
|
|
||||||
var SUITE_ID []byte
|
|
||||||
for i := 0; i < len(KEM); i++ {
|
|
||||||
SUITE_ID = append(SUITE_ID, KEM[i])
|
|
||||||
}
|
|
||||||
SUITE_ID = append(SUITE_ID, KEM_ID[0])
|
|
||||||
SUITE_ID = append(SUITE_ID, KEM_ID[1])
|
|
||||||
|
|
||||||
PRK := labeledExtract(nil, SUITE_ID, "dkp_prk", SEED)
|
|
||||||
var S []byte
|
|
||||||
if kem == 32 || kem == 33 { // RFC7748
|
|
||||||
S = labeledExpand(PRK, SUITE_ID, "sk", nil, EGS)
|
|
||||||
reverse(S)
|
|
||||||
if kem == 32 {
|
|
||||||
S[EGS-1] &= 248
|
|
||||||
S[0] &= 127
|
|
||||||
S[0] |= 64
|
|
||||||
} else {
|
|
||||||
S[EGS-1] &= 252
|
|
||||||
S[0] |= 128
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bit_mask := 0xff
|
|
||||||
if kem == 18 {
|
|
||||||
bit_mask = 1
|
|
||||||
}
|
|
||||||
for i := 0; i < EGS; i++ {
|
|
||||||
S = append(S, 0)
|
|
||||||
}
|
|
||||||
for !ECDH_IN_RANGE(S) && counter < 256 {
|
|
||||||
var INFO [1]byte
|
|
||||||
INFO[0] = byte(counter)
|
|
||||||
S = labeledExpand(PRK, SUITE_ID, "candidate", INFO[:], EGS)
|
|
||||||
S[0] &= byte(bit_mask)
|
|
||||||
counter++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := 0; i < EGS; i++ {
|
|
||||||
SK[i] = S[i]
|
|
||||||
}
|
|
||||||
ECDH_KEY_PAIR_GENERATE(nil, SK, PK)
|
|
||||||
if kem == 32 || kem == 33 {
|
|
||||||
reverse(PK)
|
|
||||||
}
|
|
||||||
if counter < 256 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func Encap(config_id int, skE []byte, pkE []byte, pkR []byte) []byte {
|
|
||||||
DH := make([]byte, EFS)
|
|
||||||
var kemcontext []byte
|
|
||||||
kem := config_id & 255
|
|
||||||
|
|
||||||
if kem == 32 || kem == 33 {
|
|
||||||
reverse(pkR)
|
|
||||||
ECDH_ECPSVDP_DH(skE, pkR, DH[:], 0)
|
|
||||||
reverse(pkR)
|
|
||||||
reverse(DH[:])
|
|
||||||
} else {
|
|
||||||
ECDH_ECPSVDP_DH(skE, pkR, DH[:], 0)
|
|
||||||
}
|
|
||||||
for i := 0; i < len(pkE); i++ {
|
|
||||||
kemcontext = append(kemcontext, pkE[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(pkR); i++ {
|
|
||||||
kemcontext = append(kemcontext, pkR[i])
|
|
||||||
}
|
|
||||||
return extractAndExpand(config_id, DH[:], kemcontext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Decap(config_id int, skR []byte, pkE []byte, pkR []byte) []byte {
|
|
||||||
DH := make([]byte, EFS)
|
|
||||||
var kemcontext []byte
|
|
||||||
kem := config_id & 255
|
|
||||||
|
|
||||||
if kem == 32 || kem == 33 {
|
|
||||||
reverse(pkE)
|
|
||||||
ECDH_ECPSVDP_DH(skR, pkE, DH[:], 0)
|
|
||||||
reverse(pkE)
|
|
||||||
reverse(DH[:])
|
|
||||||
} else {
|
|
||||||
ECDH_ECPSVDP_DH(skR, pkE, DH[:], 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(pkE); i++ {
|
|
||||||
kemcontext = append(kemcontext, pkE[i])
|
|
||||||
}
|
|
||||||
for i := 0; i < len(pkR); i++ {
|
|
||||||
kemcontext = append(kemcontext, pkR[i])
|
|
||||||
}
|
|
||||||
return extractAndExpand(config_id, DH[:], kemcontext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func AuthEncap(config_id int, skE []byte, skS []byte, pkE []byte, pkR []byte, pkS []byte) []byte {
|
|
||||||
pklen := len(pkE)
|
|
||||||
DH := make([]byte, EFS)
|
|
||||||
DH1 := make([]byte, EFS)
|
|
||||||
|
|
||||||
kemcontext := make([]byte, 3*pklen)
|
|
||||||
kem := config_id & 255
|
|
||||||
|
|
||||||
if kem == 32 || kem == 33 {
|
|
||||||
reverse(pkR)
|
|
||||||
ECDH_ECPSVDP_DH(skE, pkR, DH[:], 0)
|
|
||||||
ECDH_ECPSVDP_DH(skS, pkR, DH1[:], 0)
|
|
||||||
reverse(pkR)
|
|
||||||
reverse(DH[:])
|
|
||||||
reverse(DH1[:])
|
|
||||||
} else {
|
|
||||||
ECDH_ECPSVDP_DH(skE, pkR, DH[:], 0)
|
|
||||||
ECDH_ECPSVDP_DH(skS, pkR, DH1[:], 0)
|
|
||||||
}
|
|
||||||
ZZ := make([]byte, 2*EFS)
|
|
||||||
for i := 0; i < EFS; i++ {
|
|
||||||
ZZ[i] = DH[i]
|
|
||||||
ZZ[EFS+i] = DH1[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < pklen; i++ {
|
|
||||||
kemcontext[i] = pkE[i]
|
|
||||||
kemcontext[pklen+i] = pkR[i]
|
|
||||||
kemcontext[2*pklen+i] = pkS[i]
|
|
||||||
}
|
|
||||||
return extractAndExpand(config_id, ZZ[:], kemcontext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func AuthDecap(config_id int, skR []byte, pkE []byte, pkR []byte, pkS []byte) []byte {
|
|
||||||
pklen := len(pkE)
|
|
||||||
DH := make([]byte, EFS)
|
|
||||||
DH1 := make([]byte, EFS)
|
|
||||||
kemcontext := make([]byte, 3*pklen)
|
|
||||||
|
|
||||||
kem := config_id & 255
|
|
||||||
|
|
||||||
if kem == 32 || kem == 33 {
|
|
||||||
reverse(pkE)
|
|
||||||
reverse(pkS)
|
|
||||||
ECDH_ECPSVDP_DH(skR[:], pkE, DH[:], 0)
|
|
||||||
ECDH_ECPSVDP_DH(skR[:], pkS, DH1[:], 0)
|
|
||||||
reverse(pkE)
|
|
||||||
reverse(pkS)
|
|
||||||
reverse(DH[:])
|
|
||||||
reverse(DH1[:])
|
|
||||||
} else {
|
|
||||||
ECDH_ECPSVDP_DH(skR[:], pkE, DH[:], 0)
|
|
||||||
ECDH_ECPSVDP_DH(skR[:], pkS, DH1[:], 0)
|
|
||||||
}
|
|
||||||
ZZ := make([]byte, 2*EFS)
|
|
||||||
for i := 0; i < EFS; i++ {
|
|
||||||
ZZ[i] = DH[i]
|
|
||||||
ZZ[EFS+i] = DH1[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < pklen; i++ {
|
|
||||||
kemcontext[i] = pkE[i]
|
|
||||||
kemcontext[pklen+i] = pkR[i]
|
|
||||||
kemcontext[2*pklen+i] = pkS[i]
|
|
||||||
}
|
|
||||||
return extractAndExpand(config_id, ZZ[:], kemcontext)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
func printBinary(array []byte) {
|
|
||||||
for i := 0; i < len(array); i++ {
|
|
||||||
fmt.Printf("%02x", array[i])
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
func KeySchedule(config_id int, mode int, Z []byte, info []byte, psk []byte, pskID []byte) ([]byte, []byte, []byte) {
|
|
||||||
var context []byte
|
|
||||||
|
|
||||||
kem := config_id & 255
|
|
||||||
kdf := (config_id >> 8) & 3
|
|
||||||
aead := (config_id >> 10) & 3
|
|
||||||
|
|
||||||
txt := "HPKE"
|
|
||||||
KEM := []byte(txt)
|
|
||||||
var SUITE_ID []byte
|
|
||||||
for i := 0; i < len(KEM); i++ {
|
|
||||||
SUITE_ID = append(SUITE_ID, KEM[i])
|
|
||||||
}
|
|
||||||
num := ext.InttoBytes(kem, 2)
|
|
||||||
SUITE_ID = append(SUITE_ID, num[0])
|
|
||||||
SUITE_ID = append(SUITE_ID, num[1])
|
|
||||||
num = ext.InttoBytes(kdf, 2)
|
|
||||||
SUITE_ID = append(SUITE_ID, num[0])
|
|
||||||
SUITE_ID = append(SUITE_ID, num[1])
|
|
||||||
num = ext.InttoBytes(aead, 2)
|
|
||||||
SUITE_ID = append(SUITE_ID, num[0])
|
|
||||||
SUITE_ID = append(SUITE_ID, num[1])
|
|
||||||
|
|
||||||
ar := ext.InttoBytes(mode, 1)
|
|
||||||
for i := 0; i < len(ar); i++ {
|
|
||||||
context = append(context, ar[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
H := labeledExtract(nil, SUITE_ID, "psk_id_hash", pskID)
|
|
||||||
for i := 0; i < HASH_TYPE; i++ {
|
|
||||||
context = append(context, H[i])
|
|
||||||
}
|
|
||||||
H = labeledExtract(nil, SUITE_ID, "info_hash", info)
|
|
||||||
for i := 0; i < HASH_TYPE; i++ {
|
|
||||||
context = append(context, H[i])
|
|
||||||
}
|
|
||||||
//H=labeledExtract(nil,SUITE_ID,"psk_hash",psk)
|
|
||||||
//secret:=labeledExtract(H,SUITE_ID,"secret",Z)
|
|
||||||
|
|
||||||
secret := labeledExtract(Z, SUITE_ID, "secret", psk)
|
|
||||||
|
|
||||||
key := labeledExpand(secret, SUITE_ID, "key", context, AESKEY)
|
|
||||||
nonce := labeledExpand(secret, SUITE_ID, "base_nonce", context, 12)
|
|
||||||
exp_secret := labeledExpand(secret, SUITE_ID, "exp", context, HASH_TYPE)
|
|
||||||
|
|
||||||
return key, nonce, exp_secret
|
|
||||||
}
|
|
@ -1,202 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
|
||||||
*
|
|
||||||
* This file is part of MIRACL Core
|
|
||||||
* (see https://github.com/miracl/ext..
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* MPIN 256-bit API Functions */
|
|
||||||
|
|
||||||
package bls48581
|
|
||||||
|
|
||||||
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
|
|
||||||
|
|
||||||
//import "fmt"
|
|
||||||
|
|
||||||
const MFS int = int(MODBYTES)
|
|
||||||
const MGS int = int(MODBYTES)
|
|
||||||
const BAD_PARAMS int = -11
|
|
||||||
const INVALID_POINT int = -14
|
|
||||||
const WRONG_ORDER int = -18
|
|
||||||
const BAD_PIN int = -19
|
|
||||||
|
|
||||||
/* Configure your PIN here */
|
|
||||||
|
|
||||||
const MAXPIN int32 = 10000 /* PIN less than this */
|
|
||||||
const PBLEN int32 = 14 /* Number of bits in PIN */
|
|
||||||
|
|
||||||
func MPIN_HASH_ID(sha int, ID []byte) []byte {
|
|
||||||
return ext.GPhashit(ext.MC_SHA2, sha, int(MODBYTES), 0, nil, -1, ID)
|
|
||||||
//return mhashit(sha, 0, ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func roundup(a int, b int) int {
|
|
||||||
return (((a)-1)/(b) + 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MPIN_ENCODE_TO_CURVE(DST []byte, ID []byte, HCID []byte) {
|
|
||||||
q := NewBIGints(Modulus)
|
|
||||||
k := q.Nbits()
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
m := r.Nbits()
|
|
||||||
L := roundup(k+roundup(m, 2), 8)
|
|
||||||
var fd = make([]byte, L)
|
|
||||||
OKM := ext.XMD_Expand(ext.MC_SHA2, HASH_TYPE, L, DST, ID)
|
|
||||||
|
|
||||||
for j := 0; j < L; j++ {
|
|
||||||
fd[j] = OKM[j]
|
|
||||||
}
|
|
||||||
dx := DBIG_fromBytes(fd)
|
|
||||||
u := NewFPbig(dx.Mod(q))
|
|
||||||
P := ECP_map2point(u)
|
|
||||||
|
|
||||||
P.Cfp()
|
|
||||||
P.Affine()
|
|
||||||
P.ToBytes(HCID, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* create random secret S */
|
|
||||||
func MPIN_RANDOM_GENERATE(rng *ext.RAND, S []byte) int {
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
s := Randtrunc(r, 16*AESKEY, rng)
|
|
||||||
s.ToBytes(S)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func MPIN_EXTRACT_PIN(CID []byte, pin int, TOKEN []byte) int {
|
|
||||||
P := ECP_fromBytes(TOKEN)
|
|
||||||
if P.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
R := ECP_fromBytes(CID)
|
|
||||||
if R.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
R = R.pinmul(int32(pin)%MAXPIN, PBLEN)
|
|
||||||
P.Sub(R)
|
|
||||||
P.ToBytes(TOKEN, false)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Implement step 2 on client side of MPin protocol */
|
|
||||||
func MPIN_CLIENT_2(X []byte, Y []byte, SEC []byte) int {
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
P := ECP_fromBytes(SEC)
|
|
||||||
if P.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
|
|
||||||
px := FromBytes(X)
|
|
||||||
py := FromBytes(Y)
|
|
||||||
px.Add(py)
|
|
||||||
px.Mod(r)
|
|
||||||
|
|
||||||
P = G1mul(P, px)
|
|
||||||
P.Neg()
|
|
||||||
P.ToBytes(SEC, false)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func MPIN_GET_CLIENT_SECRET(S []byte, IDHTC []byte, CST []byte) int {
|
|
||||||
s := FromBytes(S)
|
|
||||||
P := ECP_fromBytes(IDHTC)
|
|
||||||
if P.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
G1mul(P, s).ToBytes(CST, false)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Implement step 1 on client side of MPin protocol */
|
|
||||||
func MPIN_CLIENT_1(CID []byte, rng *ext.RAND, X []byte, pin int, TOKEN []byte, SEC []byte, xID []byte) int {
|
|
||||||
r := NewBIGints(CURVE_Order)
|
|
||||||
var x *BIG
|
|
||||||
if rng != nil {
|
|
||||||
x = Randtrunc(r, 16*AESKEY, rng)
|
|
||||||
x.ToBytes(X)
|
|
||||||
} else {
|
|
||||||
x = FromBytes(X)
|
|
||||||
}
|
|
||||||
|
|
||||||
P := ECP_fromBytes(CID)
|
|
||||||
if P.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
|
|
||||||
T := ECP_fromBytes(TOKEN)
|
|
||||||
if T.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
|
|
||||||
W := P.pinmul(int32(pin)%MAXPIN, PBLEN)
|
|
||||||
T.Add(W)
|
|
||||||
|
|
||||||
P = G1mul(P, x)
|
|
||||||
P.ToBytes(xID, false)
|
|
||||||
|
|
||||||
T.ToBytes(SEC, false)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Extract Server Secret SST=S*Q where Q is fixed generator in G2 and S is master secret */
|
|
||||||
func MPIN_GET_SERVER_SECRET(S []byte, SST []byte) int {
|
|
||||||
Q := ECP8_generator()
|
|
||||||
s := FromBytes(S)
|
|
||||||
Q = G2mul(Q, s)
|
|
||||||
Q.ToBytes(SST, false)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Implement step 2 of MPin protocol on server side */
|
|
||||||
func MPIN_SERVER(HID []byte, Y []byte, SST []byte, xID []byte, mSEC []byte) int {
|
|
||||||
Q := ECP8_generator()
|
|
||||||
|
|
||||||
sQ := ECP8_fromBytes(SST)
|
|
||||||
if sQ.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
|
|
||||||
if xID == nil {
|
|
||||||
return BAD_PARAMS
|
|
||||||
}
|
|
||||||
R := ECP_fromBytes(xID)
|
|
||||||
if R.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
y := FromBytes(Y)
|
|
||||||
if HID == nil {
|
|
||||||
return BAD_PARAMS
|
|
||||||
}
|
|
||||||
P := ECP_fromBytes(HID)
|
|
||||||
if P.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
|
|
||||||
P = G1mul(P, y)
|
|
||||||
P.Add(R)
|
|
||||||
R = ECP_fromBytes(mSEC)
|
|
||||||
if R.Is_infinity() {
|
|
||||||
return INVALID_POINT
|
|
||||||
}
|
|
||||||
|
|
||||||
var g *FP48
|
|
||||||
g = Ate2(Q, R, sQ, P)
|
|
||||||
g = Fexp(g)
|
|
||||||
|
|
||||||
if !g.Isunity() {
|
|
||||||
return BAD_PIN
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,5 @@
|
|||||||
|
//go:build js && wasm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
*
|
*
|
77
nekryptology/pkg/core/curves/native/bls48581/rom_64.go
Normal file
77
nekryptology/pkg/core/curves/native/bls48581/rom_64.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
//go:build !js && !wasm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2012-2020 MIRACL UK Ltd.
|
||||||
|
*
|
||||||
|
* This file is part of MIRACL Core
|
||||||
|
* (see https://github.com/miracl/core).
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Fixed Data in ROM - Field and Curve parameters */
|
||||||
|
|
||||||
|
package bls48581
|
||||||
|
|
||||||
|
// Base Bits= 60
|
||||||
|
var Modulus = [...]Chunk{0xEDC154E6565912B, 0x8FDF721A4A48AC3, 0x7A5513170EE0A57, 0x394F4736DAF6836, 0xAF6E082ACD9CD30, 0xF3975444A48AE43, 0x22131BB3BE6C0F1, 0x12A0056E84F8D1, 0x76F313824E31D47, 0x1280F73FF34}
|
||||||
|
var ROI = [...]Chunk{0xEDC154E6565912A, 0x8FDF721A4A48AC3, 0x7A5513170EE0A57, 0x394F4736DAF6836, 0xAF6E082ACD9CD30, 0xF3975444A48AE43, 0x22131BB3BE6C0F1, 0x12A0056E84F8D1, 0x76F313824E31D47, 0x1280F73FF34}
|
||||||
|
var R2modp = [...]Chunk{0x79868479F1B5833, 0xFB6EBA8FCB82D07, 0x9CC8A7F1FD84C7F, 0x402C51CF5CC3CBB, 0x3F3114F078502C, 0xFC90829BDC8336E, 0xC7BE91DE9CA8EED, 0xD4D273BB17BFADB, 0x6EC7C9A81E792CA, 0x1DC317A6E4}
|
||||||
|
var SQRTm3 = [...]Chunk{0x51EDFC2A1D65A0A, 0xD62DAA292D8CDBF, 0x24112478269D616, 0x6C25D3CABF8AD71, 0xC8E9B16B5D3E4CD, 0xF50A03B738960EE, 0x1A664376FED4343, 0xBFFD8FB8925AE06, 0x600908C6A28DEAA, 0x1280F73F9A7}
|
||||||
|
|
||||||
|
const MConst Chunk = 0x148B81FC39D5A7D
|
||||||
|
|
||||||
|
var Fra = [...]Chunk{0x62EB6CFE42AEB25, 0xDB41942760AD3F9, 0xA7DF2570715ECE4, 0x90377B51208AC0F, 0x6848493E1C8C418, 0xF496307E298187E, 0x58740E3CAFD6B62, 0xF6067D047983E78, 0x49FA75CD7E73E55, 0xFD30DB501}
|
||||||
|
var Frb = [...]Chunk{0x62EB6CFE42AEB25, 0xDB41942760AD3F9, 0xA7DF2570715ECE4, 0x90377B51208AC0F, 0x6848493E1C8C418, 0xF496307E298187E, 0x58740E3CAFD6B62, 0xF6067D047983E78, 0x49FA75CD7E73E55, 0xFD30DB501}
|
||||||
|
var TWK = [...]Chunk{0x7B433D25F426953, 0xACE45923B9863D, 0xC28BBDFA2D37E16, 0x62FFCC8AFB4BC18, 0x661B4392F002C4F, 0x2ED27E951A14781, 0x670A6683B853246, 0xAEB8C9BA138A075, 0xC10075769CDDD9E, 0x3A65A537B}
|
||||||
|
|
||||||
|
//*** rom curve parameters *****
|
||||||
|
// Base Bits= 60
|
||||||
|
// Ate Bits= 33
|
||||||
|
// G2 Table size= 36
|
||||||
|
|
||||||
|
const CURVE_Cof_I int = 0
|
||||||
|
|
||||||
|
var CURVE_Cof = [...]Chunk{0x140000382, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
|
||||||
|
|
||||||
|
const CURVE_B_I int = 1
|
||||||
|
|
||||||
|
var CURVE_B = [...]Chunk{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
|
||||||
|
var CURVE_Order = [...]Chunk{0x8A5FE6FCD671C01, 0xBE599467C24DA11, 0xC7CD0562303C4CC, 0x9D34C4C92016A85, 0xBC972C2E6E74196, 0x3F0B3CBE003FAD6, 0x615C0D6C635387A, 0xE2885E233A9CCC1, 0x2386F8A925, 0x0}
|
||||||
|
var CURVE_Gx = [...]Chunk{0xBCE8732315AF640, 0x74DA5D3A1E6D8C3, 0x57DB368B11786CB, 0x665D859236EBDBC, 0x46A9DF6F9645847, 0xEDFFB9F75445505, 0xE86868CF61ABDBA, 0x93F860DE3F257E0, 0x40F2BAF2B73DF1E, 0x2AF59B7AC3}
|
||||||
|
var CURVE_Gy = [...]Chunk{0xDBB5DE3E2587A70, 0xF37AEF7B926B576, 0xF77C2876D1B2E35, 0x78584C3EF22F487, 0xFFB98AEE53E80F6, 0xD41B720EF7BB7BE, 0xFEB8A52E991279D, 0xB398A488A553C9E, 0x31F91F86B3A2D1F, 0xCEFDA44F65}
|
||||||
|
var CURVE_HTPC = [...]Chunk{0x393F0BE031193EC, 0xC28896440758243, 0xDBE4AA8E70D4620, 0x6B27BD55EFD560E, 0x24A9624BEECD070, 0xE2626AD7C53B361, 0xDD845A98030C755, 0x29389B4E6A62C2D, 0x5AF94F05D8A9FD4, 0x92348CD5DC}
|
||||||
|
|
||||||
|
var CURVE_Bnx = [...]Chunk{0x140000381, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
|
||||||
|
var CRu = [...]Chunk{0x4DE9AC5E1C79B90, 0x5CD8E3F88E5DE82, 0xAB21F74F7421A20, 0x6694B9B60DB5D62, 0x73422B5FB82F431, 0xFF46A846B5FA6AA, 0x83D66C1E5FCBED6, 0x2096384F2AFA565, 0x8B75055DD5D1F4E, 0x2C6}
|
||||||
|
var CURVE_Pxaaa = [...]Chunk{0x34FD0B4ACE8BFAB, 0xB79766322154DEC, 0x4D80491F510317, 0x3CA0612F4005030, 0xBAAD1A8C42281A6, 0x3A2EF156C46FF79, 0x344DBCCB7DE64DB, 0x2775DEBABBEFC70, 0x71E4A38237FA45A, 0x5D615D9A78}
|
||||||
|
var CURVE_Pxaab = [...]Chunk{0x669B36676B47C57, 0x5556A01AFA143F1, 0x7630D979630FFD7, 0x6AFFA62504F0C3C, 0xABFEDF16214A7, 0x12307F4E1C3943A, 0xE1623E9526F6DA, 0xBC07E8B22BB6D98, 0x258512069B0E86A, 0x7C4973ECE2}
|
||||||
|
var CURVE_Pxaba = [...]Chunk{0x488156CA55A3E6A, 0xEF4CDED6B3F0B46, 0xCBDFBB879D5FEA8, 0x66F0D2A6D55F028, 0xC1DBD19242FFAE7, 0xCCBAB5AB6860161, 0xAE237CA7A6D6957, 0xAD83BC73A8A6CA9, 0xF1334E1B2EA1853, 0x1FCCC70198}
|
||||||
|
var CURVE_Pxabb = [...]Chunk{0x9A7033CBB7FEAFE, 0x10B8CB4E80BC3F0, 0x1C5257C200CA523, 0x43B1B279B9468C3, 0x5F63E1C776E6EC1, 0x393F8BE0CC218A9, 0x62F3E5821B7B92A, 0x54D4BFE8F5985AC, 0xEB6185C78D80129, 0xBE2218C25C}
|
||||||
|
var CURVE_Pxbaa = [...]Chunk{0x39C3A1C53F8CCE5, 0x5B5F746C9D4CBB7, 0xD55FC1889AA80C6, 0xEF492AE589274FA, 0x9E48199D5AC10B2, 0xC5805386699981F, 0xB1642B5675FF0E7, 0xA9DD63007C675D0, 0x35913A3C598E4CA, 0x38B91C600B}
|
||||||
|
var CURVE_Pxbab = [...]Chunk{0x2004D914A3C093A, 0x7960910FCE3370F, 0xA9F177612F097FC, 0x40B9C0B15DD7595, 0x3835D28997EB57B, 0x7BB037418181DF6, 0xEF0977A3D1A5867, 0xCDA088F7B8F35DC, 0x738603F1311E4E, 0xC96C7797EB}
|
||||||
|
var CURVE_Pxbba = [...]Chunk{0x41607E60750E057, 0x4B5B0E205C3354E, 0xCBE4324C22D6333, 0xAA5EFCF3432AAD1, 0xF293B13CED0FD0C, 0xA2C0B7A449CEF11, 0x9D13852B6DB908B, 0x8AEE660DEA41B3, 0x61EE3F0197A4989, 0xB9B7951C60}
|
||||||
|
var CURVE_Pxbbb = [...]Chunk{0xE19DA00FBC6AE34, 0x6AF2FC9E97C3F84, 0x9BD6AEBF9FC44E5, 0x90B7E2B0D458547, 0xA93F29CFF364A71, 0x719728A7F9F8CFC, 0xFAF47B5211CF741, 0x4AAA2B1E5D7A9DE, 0x2BDEC5282624C4F, 0x827D5C22FB}
|
||||||
|
var CURVE_Pyaaa = [...]Chunk{0x3EDD3FE4D2D7971, 0x45012AB12C0FF32, 0x9ABF77EEA6D6590, 0x336D8AE5163C159, 0x35AFA27748D90F7, 0xBFC435FAAB09062, 0x59A577E6F3B39E, 0x2F3024B918B4238, 0x75B5DFA49721645, 0xEB53356C3}
|
||||||
|
var CURVE_Pyaab = [...]Chunk{0x1471DB936CD5665, 0x8B423525FFC7B11, 0x2FA097D760E2E58, 0xD1892AB24E1DD21, 0x6B243B1F192C5C3, 0x64732FCBF3AFB09, 0xA325E6FBA01D729, 0x5FCADC2B75A422B, 0xE0FF144DA653181, 0x284DC75979}
|
||||||
|
var CURVE_Pyaba = [...]Chunk{0x8332A526A2A8474, 0xBC7C46FC3B8FDE6, 0x1D35D51A652269C, 0x36CA3295E5E2F0C, 0xC99D0E904115155, 0xD370514475F7D5, 0x216D5B119D3A48, 0x67669EF2C2FC503, 0x8523E421EFB703, 0xB36A201DD0}
|
||||||
|
var CURVE_Pyabb = [...]Chunk{0x6213DA92841589D, 0xB3D8B8A1E533731, 0x7BDA503EE5E578F, 0x817742770BA10D6, 0x224333FA40DCED2, 0x10E122D2742C89B, 0x60DCEE23DD8B0E7, 0x78762B1C2CDED33, 0xEDC0688223FBBD4, 0xAEC25A4621}
|
||||||
|
var CURVE_Pybaa = [...]Chunk{0x47831F982E50137, 0x857FDDDFCF7A43F, 0x30135945D137B08, 0xCA4E512B64F59F4, 0x7FA238CDCE8A1E2, 0x5F1129857ED85C7, 0xB43DD93B5A95980, 0x88325A2554DC541, 0xA9C46916503FA5A, 0xD209D5A223}
|
||||||
|
var CURVE_Pybab = [...]Chunk{0x4EEDC58CF90BEE4, 0xA59ED8226CF3A59, 0xFC198CAA72B679D, 0xF47C180D139E3AA, 0xE8C270841F6824, 0x55AB7504FA8342, 0xB16722B589D82E2, 0xD537B90421AD66E, 0x36B7A513D339D5A, 0x7D0D037457}
|
||||||
|
var CURVE_Pybba = [...]Chunk{0xD41FAEAFEB23986, 0xE884017D9AA62B3, 0x40FA639F53DCCC9, 0xAB8C74B2618B5BB, 0x5AE3A2864F22C1F, 0xE4C819A6DF98F42, 0xC0841B064155F14, 0xD17AF8A006F364F, 0xE65EA25C2D05DFD, 0x896767811B}
|
||||||
|
var CURVE_Pybbb = [...]Chunk{0x667FFCB732718B6, 0x5AC66E84069C55D, 0xD8C4AB33F748E, 0x333EC7192054173, 0x8E69C31E97E1AD0, 0xEF8ECA9A9533A3F, 0x6BE8E50C87549B6, 0x4F981B5E068F140, 0x9029D393A5C07E8, 0x35E2524FF8}
|
||||||
|
|
||||||
|
//var CURVE_W=[2][10]Chunk {{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}}
|
||||||
|
//var CURVE_SB=[2][2][10]Chunk {{{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}},{{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}}}
|
||||||
|
//var CURVE_WB=[4][10]Chunk {{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}}
|
||||||
|
//var CURVE_BB=[4][4][10]Chunk {{{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}},{{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}},{{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}},{{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0},{0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0}}}
|
@ -18,7 +18,7 @@ type VDF struct {
|
|||||||
finished bool
|
finished bool
|
||||||
}
|
}
|
||||||
|
|
||||||
//size of long integers in quadratic function group
|
// size of long integers in quadratic function group
|
||||||
const sizeInBits = 2048
|
const sizeInBits = 2048
|
||||||
|
|
||||||
// New create a new instance of VDF.
|
// New create a new instance of VDF.
|
||||||
@ -53,12 +53,31 @@ func (vdf *VDF) Execute() {
|
|||||||
vdf.finished = true
|
vdf.finished = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (vdf *VDF) ExecuteIteration(x_blob []byte) {
|
||||||
|
vdf.finished = false
|
||||||
|
|
||||||
|
yBuf, proofBuf := GenerateVDFIteration(vdf.input[:], x_blob, vdf.difficulty, sizeInBits)
|
||||||
|
|
||||||
|
copy(vdf.output[:], yBuf)
|
||||||
|
copy(vdf.output[258:], proofBuf)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
vdf.outputChan <- vdf.output
|
||||||
|
}()
|
||||||
|
|
||||||
|
vdf.finished = true
|
||||||
|
}
|
||||||
|
|
||||||
// Verify runs the verification of generated proof
|
// Verify runs the verification of generated proof
|
||||||
// currently on i7-6700K, verification takes about 350 ms
|
// currently on i7-6700K, verification takes about 350 ms
|
||||||
func (vdf *VDF) Verify(proof [516]byte) bool {
|
func (vdf *VDF) Verify(proof [516]byte) bool {
|
||||||
return VerifyVDF(vdf.input[:], proof[:], vdf.difficulty, sizeInBits)
|
return VerifyVDF(vdf.input[:], proof[:], vdf.difficulty, sizeInBits)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (vdf *VDF) VerifyIteration(x_blob [258]byte, proof [516]byte, iterations uint32) bool {
|
||||||
|
return VerifyVDFIteration(vdf.input[:], x_blob[:], proof[:], vdf.difficulty, sizeInBits)
|
||||||
|
}
|
||||||
|
|
||||||
// IsFinished returns whether the vdf execution is finished or not.
|
// IsFinished returns whether the vdf execution is finished or not.
|
||||||
func (vdf *VDF) IsFinished() bool {
|
func (vdf *VDF) IsFinished() bool {
|
||||||
return vdf.finished
|
return vdf.finished
|
||||||
|
@ -16,8 +16,8 @@ import (
|
|||||||
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/iqc"
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/iqc"
|
||||||
)
|
)
|
||||||
|
|
||||||
//Creates L and k parameters from papers, based on how many iterations need to be
|
// Creates L and k parameters from papers, based on how many iterations need to be
|
||||||
//performed, and how much memory should be used.
|
// performed, and how much memory should be used.
|
||||||
func approximateParameters(T uint32) (int, int, int) {
|
func approximateParameters(T uint32) (int, int, int) {
|
||||||
//log_memory = math.log(10000000, 2)
|
//log_memory = math.log(10000000, 2)
|
||||||
log_memory := math.Log(10000000) / math.Log(2)
|
log_memory := math.Log(10000000) / math.Log(2)
|
||||||
@ -86,6 +86,20 @@ func GenerateVDFWithStopChan(seed []byte, iterations, int_size_bits uint32, stop
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GenerateVDFIteration(seed, x_blob []byte, iterations, int_size_bits uint32) ([]byte, []byte) {
|
||||||
|
int_size := (int_size_bits + 16) >> 4
|
||||||
|
D := iqc.CreateDiscriminant(seed, int_size_bits)
|
||||||
|
x, _ := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D)
|
||||||
|
|
||||||
|
y, proof := calculateVDF(D, x, iterations, int_size_bits, nil)
|
||||||
|
|
||||||
|
if (y == nil) || (proof == nil) {
|
||||||
|
return nil, nil
|
||||||
|
} else {
|
||||||
|
return y.Serialize(), proof.Serialize()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func VerifyVDF(seed, proof_blob []byte, iterations, int_size_bits uint32) bool {
|
func VerifyVDF(seed, proof_blob []byte, iterations, int_size_bits uint32) bool {
|
||||||
int_size := (int_size_bits + 16) >> 4
|
int_size := (int_size_bits + 16) >> 4
|
||||||
|
|
||||||
@ -97,6 +111,16 @@ func VerifyVDF(seed, proof_blob []byte, iterations, int_size_bits uint32) bool {
|
|||||||
return verifyProof(x, y, proof, iterations)
|
return verifyProof(x, y, proof, iterations)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func VerifyVDFIteration(seed, x_blob, proof_blob []byte, iterations, int_size_bits uint32) bool {
|
||||||
|
int_size := (int_size_bits + 16) >> 4
|
||||||
|
D := iqc.CreateDiscriminant(seed, int_size_bits)
|
||||||
|
x, _ := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D)
|
||||||
|
y, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D)
|
||||||
|
proof, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D)
|
||||||
|
|
||||||
|
return verifyProof(x, y, proof, iterations)
|
||||||
|
}
|
||||||
|
|
||||||
// Creates a random prime based on input x, y, T
|
// Creates a random prime based on input x, y, T
|
||||||
// Note – this differs from harmony-one's implementation, as the Fiat-Shamir
|
// Note – this differs from harmony-one's implementation, as the Fiat-Shamir
|
||||||
// transform requires _all_ public parameters be input, or else there is the
|
// transform requires _all_ public parameters be input, or else there is the
|
||||||
@ -133,7 +157,7 @@ func getBlock(i, k, T int, B *big.Int) *big.Int {
|
|||||||
return iqc.FloorDivision(new(big.Int).Mul(p1, p2), B)
|
return iqc.FloorDivision(new(big.Int).Mul(p1, p2), B)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Optimized evalutation of h ^ (2^T // B)
|
// Optimized evalutation of h ^ (2^T // B)
|
||||||
func evalOptimized(identity, h *iqc.ClassGroup, B *big.Int, T uint32, k, l int, C map[int]*iqc.ClassGroup) *iqc.ClassGroup {
|
func evalOptimized(identity, h *iqc.ClassGroup, B *big.Int, T uint32, k, l int, C map[int]*iqc.ClassGroup) *iqc.ClassGroup {
|
||||||
//k1 = k//2
|
//k1 = k//2
|
||||||
var k1 int = k / 2
|
var k1 int = k / 2
|
||||||
@ -219,7 +243,7 @@ func evalOptimized(identity, h *iqc.ClassGroup, B *big.Int, T uint32, k, l int,
|
|||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
//generate y = x ^ (2 ^T) and pi
|
// generate y = x ^ (2 ^T) and pi
|
||||||
func generateProof(identity, x, y *iqc.ClassGroup, T uint32, k, l int, powers map[int]*iqc.ClassGroup) *iqc.ClassGroup {
|
func generateProof(identity, x, y *iqc.ClassGroup, T uint32, k, l int, powers map[int]*iqc.ClassGroup) *iqc.ClassGroup {
|
||||||
//x_s = x.serialize()
|
//x_s = x.serialize()
|
||||||
x_s := x.Serialize()
|
x_s := x.Serialize()
|
||||||
@ -236,10 +260,12 @@ func generateProof(identity, x, y *iqc.ClassGroup, T uint32, k, l int, powers ma
|
|||||||
|
|
||||||
func calculateVDF(discriminant *big.Int, x *iqc.ClassGroup, iterations, int_size_bits uint32, stop <-chan struct{}) (y, proof *iqc.ClassGroup) {
|
func calculateVDF(discriminant *big.Int, x *iqc.ClassGroup, iterations, int_size_bits uint32, stop <-chan struct{}) (y, proof *iqc.ClassGroup) {
|
||||||
L, k, _ := approximateParameters(iterations)
|
L, k, _ := approximateParameters(iterations)
|
||||||
|
|
||||||
loopCount := int(math.Ceil(float64(iterations) / float64(k*L)))
|
loopCount := int(math.Ceil(float64(iterations) / float64(k*L)))
|
||||||
|
// NB: Dusk needs to do the disjoint set arithmetic, marking this spot down
|
||||||
|
// as the insertion point
|
||||||
powers_to_calculate := make([]int, loopCount+2)
|
powers_to_calculate := make([]int, loopCount+2)
|
||||||
|
|
||||||
|
// link into next
|
||||||
for i := 0; i < loopCount+1; i++ {
|
for i := 0; i < loopCount+1; i++ {
|
||||||
powers_to_calculate[i] = i * k * L
|
powers_to_calculate[i] = i * k * L
|
||||||
}
|
}
|
||||||
|
@ -25,13 +25,13 @@ func TestZKPOverMultipleCurves(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i, curve := range curveInstances {
|
for i, curve := range curveInstances {
|
||||||
uniqueSessionId := sha3.New256().Sum([]byte("random seed"))
|
uniqueSessionId := sha3.New256().Sum([]byte("random seed"))
|
||||||
prover := NewProver(curve, nil, uniqueSessionId)
|
prover := NewProver(curve, nil, sha3.New256(), uniqueSessionId)
|
||||||
|
|
||||||
secret := curve.Scalar.Random(rand.Reader)
|
secret := curve.Scalar.Random(rand.Reader)
|
||||||
proof, err := prover.Prove(secret)
|
proof, err := prover.Prove(secret)
|
||||||
require.NoError(t, err, fmt.Sprintf("failed in curve %d", i))
|
require.NoError(t, err, fmt.Sprintf("failed in curve %d", i))
|
||||||
|
|
||||||
err = Verify(proof, curve, nil, uniqueSessionId)
|
err = Verify(proof, curve, nil, sha3.New256(), uniqueSessionId)
|
||||||
require.NoError(t, err, fmt.Sprintf("failed in curve %d", i))
|
require.NoError(t, err, fmt.Sprintf("failed in curve %d", i))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
node/.vscode/settings.json
vendored
Normal file
5
node/.vscode/settings.json
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"go.testEnvVars": {
|
||||||
|
"GOEXPERIMENT": "arenas"
|
||||||
|
}
|
||||||
|
}
|
@ -23,6 +23,7 @@ import (
|
|||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
||||||
)
|
)
|
||||||
@ -431,7 +432,7 @@ func (m model) View() string {
|
|||||||
|
|
||||||
list := []string{}
|
list := []string{}
|
||||||
for i, item := range m.filters {
|
for i, item := range m.filters {
|
||||||
str := item[0:12] + ".." + item[52:]
|
str := item[0:12] + ".." + item[len(item)-12:]
|
||||||
if m.selectedFilter == item {
|
if m.selectedFilter == item {
|
||||||
list = append(list, selectedListStyle.Render(str))
|
list = append(list, selectedListStyle.Render(str))
|
||||||
} else if i == m.cursor {
|
} else if i == m.cursor {
|
||||||
@ -584,7 +585,7 @@ func (m model) View() string {
|
|||||||
|
|
||||||
for _, active := range app.ActiveParticipants {
|
for _, active := range app.ActiveParticipants {
|
||||||
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
|
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
|
||||||
active.KeyValue,
|
active.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
) + "\n"
|
) + "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -624,7 +625,7 @@ func (m model) View() string {
|
|||||||
|
|
||||||
for _, active := range app.ActiveParticipants {
|
for _, active := range app.ActiveParticipants {
|
||||||
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
|
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
|
||||||
active.KeyValue,
|
active.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
) + "\n"
|
) + "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -656,8 +657,10 @@ func (m model) View() string {
|
|||||||
) + "\n"
|
) + "\n"
|
||||||
}
|
}
|
||||||
case application.CEREMONY_APPLICATION_STATE_VALIDATING:
|
case application.CEREMONY_APPLICATION_STATE_VALIDATING:
|
||||||
|
explorerContent += fmt.Sprintf(
|
||||||
|
"G1 Powers: %d\n", len(app.UpdatedTranscript.G1Powers),
|
||||||
|
)
|
||||||
explorerContent += "Preferred Next Round Participants: \n"
|
explorerContent += "Preferred Next Round Participants: \n"
|
||||||
|
|
||||||
for _, next := range app.NextRoundPreferredParticipants {
|
for _, next := range app.NextRoundPreferredParticipants {
|
||||||
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
|
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
|
||||||
next.KeyValue,
|
next.KeyValue,
|
||||||
@ -727,7 +730,10 @@ func consoleModel(
|
|||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
}),
|
}),
|
||||||
hex.EncodeToString(application.CEREMONY_ADDRESS),
|
hex.EncodeToString(append(
|
||||||
|
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
|
||||||
|
p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
|
||||||
|
)),
|
||||||
},
|
},
|
||||||
cursor: 0,
|
cursor: 0,
|
||||||
conn: conn,
|
conn: conn,
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
||||||
ceremonyConsensus "source.quilibrium.com/quilibrium/monorepo/node/consensus/ceremony"
|
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony"
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
||||||
@ -38,6 +37,7 @@ var keyManagerSet = wire.NewSet(
|
|||||||
var storeSet = wire.NewSet(
|
var storeSet = wire.NewSet(
|
||||||
wire.FieldsOf(new(*config.Config), "DB"),
|
wire.FieldsOf(new(*config.Config), "DB"),
|
||||||
store.NewPebbleDB,
|
store.NewPebbleDB,
|
||||||
|
wire.Bind(new(store.KVDB), new(*store.PebbleDB)),
|
||||||
store.NewPebbleClockStore,
|
store.NewPebbleClockStore,
|
||||||
store.NewPebbleKeyStore,
|
store.NewPebbleKeyStore,
|
||||||
store.NewPebbleDataProofStore,
|
store.NewPebbleDataProofStore,
|
||||||
@ -52,16 +52,8 @@ var pubSubSet = wire.NewSet(
|
|||||||
wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)),
|
wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)),
|
||||||
)
|
)
|
||||||
|
|
||||||
var dataConsensusSet = wire.NewSet(
|
|
||||||
wire.FieldsOf(new(*config.Config), "Engine"),
|
|
||||||
ceremonyConsensus.NewCeremonyDataClockConsensusEngine,
|
|
||||||
wire.Bind(
|
|
||||||
new(consensus.DataConsensusEngine),
|
|
||||||
new(*ceremonyConsensus.CeremonyDataClockConsensusEngine),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
var engineSet = wire.NewSet(
|
var engineSet = wire.NewSet(
|
||||||
|
wire.FieldsOf(new(*config.Config), "Engine"),
|
||||||
ceremony.NewCeremonyExecutionEngine,
|
ceremony.NewCeremonyExecutionEngine,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -80,7 +72,6 @@ func NewNode(*config.Config) (*Node, error) {
|
|||||||
storeSet,
|
storeSet,
|
||||||
pubSubSet,
|
pubSubSet,
|
||||||
engineSet,
|
engineSet,
|
||||||
dataConsensusSet,
|
|
||||||
consensusSet,
|
consensusSet,
|
||||||
newNode,
|
newNode,
|
||||||
))
|
))
|
||||||
|
@ -11,9 +11,8 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus/ceremony"
|
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
|
||||||
ceremony2 "source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony"
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||||
@ -24,16 +23,15 @@ import (
|
|||||||
func NewNode(configConfig *config.Config) (*Node, error) {
|
func NewNode(configConfig *config.Config) (*Node, error) {
|
||||||
zapLogger := logger()
|
zapLogger := logger()
|
||||||
dbConfig := configConfig.DB
|
dbConfig := configConfig.DB
|
||||||
db := store.NewPebbleDB(dbConfig)
|
pebbleDB := store.NewPebbleDB(dbConfig)
|
||||||
pebbleClockStore := store.NewPebbleClockStore(db, zapLogger)
|
pebbleClockStore := store.NewPebbleClockStore(pebbleDB, zapLogger)
|
||||||
keyConfig := configConfig.Key
|
keyConfig := configConfig.Key
|
||||||
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
||||||
p2PConfig := configConfig.P2P
|
p2PConfig := configConfig.P2P
|
||||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||||
engineConfig := configConfig.Engine
|
engineConfig := configConfig.Engine
|
||||||
pebbleKeyStore := store.NewPebbleKeyStore(db, zapLogger)
|
pebbleKeyStore := store.NewPebbleKeyStore(pebbleDB, zapLogger)
|
||||||
ceremonyDataClockConsensusEngine := ceremony.NewCeremonyDataClockConsensusEngine(engineConfig, zapLogger, fileKeyManager, pebbleClockStore, pebbleKeyStore, blossomSub)
|
ceremonyExecutionEngine := ceremony.NewCeremonyExecutionEngine(zapLogger, engineConfig, fileKeyManager, blossomSub, pebbleClockStore, pebbleKeyStore)
|
||||||
ceremonyExecutionEngine := ceremony2.NewCeremonyExecutionEngine(zapLogger, ceremonyDataClockConsensusEngine, engineConfig, fileKeyManager, blossomSub, pebbleClockStore, pebbleKeyStore)
|
|
||||||
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub)
|
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub)
|
||||||
node, err := newNode(zapLogger, pebbleClockStore, fileKeyManager, blossomSub, ceremonyExecutionEngine, masterClockConsensusEngine)
|
node, err := newNode(zapLogger, pebbleClockStore, fileKeyManager, blossomSub, ceremonyExecutionEngine, masterClockConsensusEngine)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -52,9 +50,9 @@ func NewDBConsole(configConfig *config.Config) (*DBConsole, error) {
|
|||||||
|
|
||||||
func NewClockStore(configConfig *config.Config) (store.ClockStore, error) {
|
func NewClockStore(configConfig *config.Config) (store.ClockStore, error) {
|
||||||
dbConfig := configConfig.DB
|
dbConfig := configConfig.DB
|
||||||
db := store.NewPebbleDB(dbConfig)
|
pebbleDB := store.NewPebbleDB(dbConfig)
|
||||||
zapLogger := logger()
|
zapLogger := logger()
|
||||||
pebbleClockStore := store.NewPebbleClockStore(db, zapLogger)
|
pebbleClockStore := store.NewPebbleClockStore(pebbleDB, zapLogger)
|
||||||
return pebbleClockStore, nil
|
return pebbleClockStore, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,17 +73,11 @@ var loggerSet = wire.NewSet(
|
|||||||
|
|
||||||
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
|
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
|
||||||
|
|
||||||
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)))
|
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)))
|
||||||
|
|
||||||
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)))
|
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)))
|
||||||
|
|
||||||
var dataConsensusSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Engine"), ceremony.NewCeremonyDataClockConsensusEngine, wire.Bind(
|
var engineSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Engine"), ceremony.NewCeremonyExecutionEngine)
|
||||||
new(consensus.DataConsensusEngine),
|
|
||||||
new(*ceremony.CeremonyDataClockConsensusEngine),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
var engineSet = wire.NewSet(ceremony2.NewCeremonyExecutionEngine)
|
|
||||||
|
|
||||||
var consensusSet = wire.NewSet(master.NewMasterClockConsensusEngine, wire.Bind(
|
var consensusSet = wire.NewSet(master.NewMasterClockConsensusEngine, wire.Bind(
|
||||||
new(consensus.ConsensusEngine),
|
new(consensus.ConsensusEngine),
|
||||||
|
@ -7,4 +7,8 @@ type EngineConfig struct {
|
|||||||
MaxFrames int64 `yaml:"maxFrames"`
|
MaxFrames int64 `yaml:"maxFrames"`
|
||||||
PendingCommitWorkers int64 `yaml:"pendingCommitWorkers"`
|
PendingCommitWorkers int64 `yaml:"pendingCommitWorkers"`
|
||||||
MinimumPeersRequired int `yaml:"minimumPeersRequired"`
|
MinimumPeersRequired int `yaml:"minimumPeersRequired"`
|
||||||
|
|
||||||
|
// Values used only for testing – do not override these in production, your
|
||||||
|
// node will get kicked out
|
||||||
|
Difficulty uint32 `yaml:"difficulty"`
|
||||||
}
|
}
|
||||||
|
@ -2,8 +2,6 @@ package ceremony
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -19,7 +17,6 @@ import (
|
|||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/zkp/schnorr"
|
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
||||||
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
||||||
@ -111,22 +108,6 @@ func (e *CeremonyDataClockConsensusEngine) handleMessage(
|
|||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrap(err, "handle message")
|
return errors.Wrap(err, "handle message")
|
||||||
}
|
}
|
||||||
case protobufs.ProvingKeyRequestType:
|
|
||||||
if err := e.handleProvingKeyRequest(
|
|
||||||
message.From,
|
|
||||||
msg.Address,
|
|
||||||
any,
|
|
||||||
); err != nil {
|
|
||||||
return errors.Wrap(err, "handle message")
|
|
||||||
}
|
|
||||||
case protobufs.ProvingKeyAnnouncementType:
|
|
||||||
if err := e.handleProvingKey(message.From, msg.Address, any); err != nil {
|
|
||||||
return errors.Wrap(err, "handle message")
|
|
||||||
}
|
|
||||||
case protobufs.KeyBundleAnnouncementType:
|
|
||||||
if err := e.handleKeyBundle(message.From, msg.Address, any); err != nil {
|
|
||||||
return errors.Wrap(err, "handle message")
|
|
||||||
}
|
|
||||||
case protobufs.CeremonyPeerListAnnounceType:
|
case protobufs.CeremonyPeerListAnnounceType:
|
||||||
if err := e.handleCeremonyPeerListAnnounce(
|
if err := e.handleCeremonyPeerListAnnounce(
|
||||||
message.From,
|
message.From,
|
||||||
@ -304,177 +285,6 @@ func (e *CeremonyDataClockConsensusEngine) handleCeremonyLobbyStateTransition(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) handleKeyBundle(
|
|
||||||
peerID []byte,
|
|
||||||
address []byte,
|
|
||||||
any *anypb.Any,
|
|
||||||
) error {
|
|
||||||
e.logger.Debug("received key bundle")
|
|
||||||
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{}
|
|
||||||
if err := any.UnmarshalTo(keyBundleAnnouncement); err != nil {
|
|
||||||
return errors.Wrap(err, "handle key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(keyBundleAnnouncement.ProvingKeyBytes) == 0 {
|
|
||||||
return errors.Wrap(errors.New("proving key is nil"), "handle key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
k, err := e.keyStore.GetLatestKeyBundle(keyBundleAnnouncement.ProvingKeyBytes)
|
|
||||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
|
||||||
return errors.Wrap(err, "handle key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
if k != nil {
|
|
||||||
latestAnnouncement := &protobufs.KeyBundleAnnouncement{}
|
|
||||||
err := proto.Unmarshal(k.Data, latestAnnouncement)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "handle key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
if bytes.Equal(
|
|
||||||
latestAnnouncement.IdentityKey.Challenge,
|
|
||||||
keyBundleAnnouncement.IdentityKey.Challenge,
|
|
||||||
) && bytes.Equal(
|
|
||||||
latestAnnouncement.IdentityKey.Response,
|
|
||||||
keyBundleAnnouncement.IdentityKey.Response,
|
|
||||||
) && bytes.Equal(
|
|
||||||
latestAnnouncement.IdentityKey.Statement,
|
|
||||||
keyBundleAnnouncement.IdentityKey.Statement,
|
|
||||||
) && bytes.Equal(
|
|
||||||
latestAnnouncement.SignedPreKey.Challenge,
|
|
||||||
keyBundleAnnouncement.SignedPreKey.Challenge,
|
|
||||||
) && bytes.Equal(
|
|
||||||
latestAnnouncement.SignedPreKey.Response,
|
|
||||||
keyBundleAnnouncement.SignedPreKey.Response,
|
|
||||||
) && bytes.Equal(
|
|
||||||
latestAnnouncement.SignedPreKey.Statement,
|
|
||||||
keyBundleAnnouncement.SignedPreKey.Statement,
|
|
||||||
) {
|
|
||||||
// This has already been proven, ignore
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var provingKey *protobufs.ProvingKeyAnnouncement
|
|
||||||
inclusion, err := e.keyStore.GetProvingKey(
|
|
||||||
keyBundleAnnouncement.ProvingKeyBytes,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, store.ErrNotFound) {
|
|
||||||
return errors.Wrap(err, "handle key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
provingKey, err = e.keyStore.GetStagedProvingKey(
|
|
||||||
keyBundleAnnouncement.ProvingKeyBytes,
|
|
||||||
)
|
|
||||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
|
||||||
return errors.Wrap(err, "handle key bundle")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err := proto.Unmarshal(inclusion.Data, provingKey)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "handle key bundle")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have a matching proving key, we can set this up to be committed.
|
|
||||||
if provingKey != nil {
|
|
||||||
e.logger.Debug("verifying key bundle announcement")
|
|
||||||
if err := keyBundleAnnouncement.Verify(provingKey); err != nil {
|
|
||||||
e.logger.Debug(
|
|
||||||
"could not verify key bundle announcement",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
e.logger.Debug("adding key bundle announcement to pending commits")
|
|
||||||
|
|
||||||
e.pendingCommits <- any
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
e.logger.Debug("proving key not found, requesting from peers")
|
|
||||||
|
|
||||||
if err = e.publishMessage(e.filter, &protobufs.ProvingKeyRequest{
|
|
||||||
ProvingKeyBytes: keyBundleAnnouncement.ProvingKeyBytes,
|
|
||||||
}); err != nil {
|
|
||||||
return errors.Wrap(err, "handle key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
e.dependencyMapMx.Lock()
|
|
||||||
e.dependencyMap[string(keyBundleAnnouncement.ProvingKeyBytes)] = any
|
|
||||||
e.dependencyMapMx.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) handleProvingKey(
|
|
||||||
peerID []byte,
|
|
||||||
address []byte,
|
|
||||||
any *anypb.Any,
|
|
||||||
) error {
|
|
||||||
e.logger.Debug("received proving key")
|
|
||||||
|
|
||||||
provingKeyAnnouncement := &protobufs.ProvingKeyAnnouncement{}
|
|
||||||
if err := any.UnmarshalTo(provingKeyAnnouncement); err != nil {
|
|
||||||
return errors.Wrap(err, "handle proving key")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := provingKeyAnnouncement.Verify(); err != nil {
|
|
||||||
return errors.Wrap(err, "handle proving key")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.keyStore.StageProvingKey(provingKeyAnnouncement); err != nil {
|
|
||||||
return errors.Wrap(err, "handle proving key")
|
|
||||||
}
|
|
||||||
|
|
||||||
provingKey := provingKeyAnnouncement.PublicKey()
|
|
||||||
|
|
||||||
e.logger.Debug(
|
|
||||||
"proving key staged",
|
|
||||||
zap.Binary("proving_key", provingKey),
|
|
||||||
)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
e.dependencyMapMx.Lock()
|
|
||||||
if e.dependencyMap[string(provingKey)] != nil {
|
|
||||||
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{}
|
|
||||||
if err := proto.Unmarshal(
|
|
||||||
e.dependencyMap[string(provingKey)].Value,
|
|
||||||
keyBundleAnnouncement,
|
|
||||||
); err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not unmarshal key bundle announcement",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.dependencyMapMx.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := keyBundleAnnouncement.Verify(
|
|
||||||
provingKeyAnnouncement,
|
|
||||||
); err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not verify key bundle announcement",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.dependencyMapMx.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.pendingCommits <- e.dependencyMap[string(provingKey)]
|
|
||||||
|
|
||||||
delete(e.dependencyMap, string(provingKey))
|
|
||||||
}
|
|
||||||
e.dependencyMapMx.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
||||||
peerID []byte,
|
peerID []byte,
|
||||||
address []byte,
|
address []byte,
|
||||||
@ -694,16 +504,30 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
zap.Binary("filter", frame.Filter),
|
zap.Binary("filter", frame.Filter),
|
||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
zap.Uint64("frame_number", frame.FrameNumber),
|
||||||
)
|
)
|
||||||
|
masterFrame, err := e.clockStore.GetMasterClockFrame(
|
||||||
|
[]byte{
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
},
|
||||||
|
frame.FrameNumber-1,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
e.logger.Info("received frame with no known master, needs sync")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
parentSelector, selector, distance, err :=
|
discriminator, err := masterFrame.GetSelector()
|
||||||
frame.GetParentSelectorAndDistance()
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
|
}
|
||||||
|
|
||||||
|
parentSelector, distance, selector, err :=
|
||||||
|
frame.GetParentSelectorAndDistance(discriminator)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
e.logger.Debug(
|
|
||||||
"difference between selector/discriminator",
|
|
||||||
zap.Binary("difference", distance.Bytes()),
|
|
||||||
)
|
|
||||||
|
|
||||||
if _, err := e.clockStore.GetParentDataClockFrame(
|
if _, err := e.clockStore.GetParentDataClockFrame(
|
||||||
frame.Filter,
|
frame.Filter,
|
||||||
@ -713,7 +537,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
// If this is a frame number higher than what we're already caught up to,
|
// If this is a frame number higher than what we're already caught up to,
|
||||||
// push a request to fill the gap, unless we're syncing or it's in step,
|
// push a request to fill the gap, unless we're syncing or it's in step,
|
||||||
// then just lazily seek.
|
// then just lazily seek.
|
||||||
from := e.frame
|
from := e.frame.FrameNumber
|
||||||
if from >= frame.FrameNumber-1 {
|
if from >= frame.FrameNumber-1 {
|
||||||
from = frame.FrameNumber - 1
|
from = frame.FrameNumber - 1
|
||||||
}
|
}
|
||||||
@ -737,9 +561,9 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := e.clockStore.PutCandidateDataClockFrame(
|
if err := e.clockStore.PutCandidateDataClockFrame(
|
||||||
parentSelector.Bytes(),
|
parentSelector.FillBytes(make([]byte, 32)),
|
||||||
distance.Bytes(),
|
distance.FillBytes(make([]byte, 32)),
|
||||||
selector.Bytes(),
|
selector.FillBytes(make([]byte, 32)),
|
||||||
frame,
|
frame,
|
||||||
txn,
|
txn,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
@ -752,7 +576,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.frame < frame.FrameNumber {
|
if e.frame.FrameNumber < frame.FrameNumber {
|
||||||
e.latestFrameReceived = frame.FrameNumber
|
e.latestFrameReceived = frame.FrameNumber
|
||||||
e.lastFrameReceivedAt = time.Now().UTC()
|
e.lastFrameReceivedAt = time.Now().UTC()
|
||||||
}
|
}
|
||||||
@ -819,12 +643,11 @@ func (e *CeremonyDataClockConsensusEngine) publishMessage(
|
|||||||
return e.pubSub.PublishToBitmask(filter, data)
|
return e.pubSub.PublishToBitmask(filter, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) announceKeyBundle() error {
|
func (e *CeremonyDataClockConsensusEngine) createCommunicationKeys() error {
|
||||||
e.logger.Debug("announcing key bundle")
|
_, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
|
||||||
idk, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, keys.KeyNotFoundErr) {
|
if errors.Is(err, keys.KeyNotFoundErr) {
|
||||||
idk, err = e.keyManager.CreateAgreementKey(
|
_, err = e.keyManager.CreateAgreementKey(
|
||||||
"q-ratchet-idk",
|
"q-ratchet-idk",
|
||||||
keys.KeyTypeX448,
|
keys.KeyTypeX448,
|
||||||
)
|
)
|
||||||
@ -836,10 +659,10 @@ func (e *CeremonyDataClockConsensusEngine) announceKeyBundle() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spk, err := e.keyManager.GetAgreementKey("q-ratchet-spk")
|
_, err = e.keyManager.GetAgreementKey("q-ratchet-spk")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, keys.KeyNotFoundErr) {
|
if errors.Is(err, keys.KeyNotFoundErr) {
|
||||||
spk, err = e.keyManager.CreateAgreementKey(
|
_, err = e.keyManager.CreateAgreementKey(
|
||||||
"q-ratchet-spk",
|
"q-ratchet-spk",
|
||||||
keys.KeyTypeX448,
|
keys.KeyTypeX448,
|
||||||
)
|
)
|
||||||
@ -851,110 +674,5 @@ func (e *CeremonyDataClockConsensusEngine) announceKeyBundle() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
idkPoint := curves.ED448().NewGeneratorPoint().Mul(idk)
|
return nil
|
||||||
idkProver := schnorr.NewProver(
|
|
||||||
curves.ED448(),
|
|
||||||
curves.ED448().NewGeneratorPoint(),
|
|
||||||
sha3.New256(),
|
|
||||||
[]byte{},
|
|
||||||
)
|
|
||||||
|
|
||||||
spkPoint := curves.ED448().NewGeneratorPoint().Mul(spk)
|
|
||||||
spkProver := schnorr.NewProver(
|
|
||||||
curves.ED448(),
|
|
||||||
curves.ED448().NewGeneratorPoint(),
|
|
||||||
sha3.New256(),
|
|
||||||
[]byte{},
|
|
||||||
)
|
|
||||||
|
|
||||||
idkProof, idkCommitment, err := idkProver.ProveCommit(idk)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "announce key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
spkProof, spkCommitment, err := spkProver.ProveCommit(spk)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "announce key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := append(
|
|
||||||
append([]byte{}, idkCommitment...),
|
|
||||||
spkCommitment...,
|
|
||||||
)
|
|
||||||
|
|
||||||
signature, err := e.provingKey.Sign(rand.Reader, msg, crypto.Hash(0))
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "announce key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
signatureProto := &protobufs.ProvingKeyAnnouncement_ProvingKeySignatureEd448{
|
|
||||||
ProvingKeySignatureEd448: &protobufs.Ed448Signature{
|
|
||||||
PublicKey: &protobufs.Ed448PublicKey{
|
|
||||||
KeyValue: e.provingKeyBytes,
|
|
||||||
},
|
|
||||||
Signature: signature,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
provingKeyAnnouncement := &protobufs.ProvingKeyAnnouncement{
|
|
||||||
IdentityCommitment: idkCommitment,
|
|
||||||
PrekeyCommitment: spkCommitment,
|
|
||||||
ProvingKeySignature: signatureProto,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.publishMessage(e.filter, provingKeyAnnouncement); err != nil {
|
|
||||||
return errors.Wrap(err, "announce key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
idkSignature, err := e.provingKey.Sign(
|
|
||||||
rand.Reader,
|
|
||||||
idkPoint.ToAffineCompressed(),
|
|
||||||
crypto.Hash(0),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "announce key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
spkSignature, err := e.provingKey.Sign(
|
|
||||||
rand.Reader,
|
|
||||||
spkPoint.ToAffineCompressed(),
|
|
||||||
crypto.Hash(0),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "announce key bundle")
|
|
||||||
}
|
|
||||||
|
|
||||||
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{
|
|
||||||
ProvingKeyBytes: e.provingKeyBytes,
|
|
||||||
IdentityKey: &protobufs.IdentityKey{
|
|
||||||
Challenge: idkProof.C.Bytes(),
|
|
||||||
Response: idkProof.S.Bytes(),
|
|
||||||
Statement: idkProof.Statement.ToAffineCompressed(),
|
|
||||||
IdentityKeySignature: &protobufs.IdentityKey_PublicKeySignatureEd448{
|
|
||||||
PublicKeySignatureEd448: &protobufs.Ed448Signature{
|
|
||||||
PublicKey: &protobufs.Ed448PublicKey{
|
|
||||||
KeyValue: idkPoint.ToAffineCompressed(),
|
|
||||||
},
|
|
||||||
Signature: idkSignature,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SignedPreKey: &protobufs.SignedPreKey{
|
|
||||||
Challenge: spkProof.C.Bytes(),
|
|
||||||
Response: spkProof.S.Bytes(),
|
|
||||||
Statement: spkProof.Statement.ToAffineCompressed(),
|
|
||||||
SignedPreKeySignature: &protobufs.SignedPreKey_PublicKeySignatureEd448{
|
|
||||||
PublicKeySignatureEd448: &protobufs.Ed448Signature{
|
|
||||||
PublicKey: &protobufs.Ed448PublicKey{
|
|
||||||
KeyValue: spkPoint.ToAffineCompressed(),
|
|
||||||
},
|
|
||||||
Signature: spkSignature,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.Wrap(
|
|
||||||
e.publishMessage(e.filter, keyBundleAnnouncement),
|
|
||||||
"announce key bundle",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
@ -53,8 +53,7 @@ type ChannelServer = protobufs.CeremonyService_GetPublicChannelServer
|
|||||||
|
|
||||||
type CeremonyDataClockConsensusEngine struct {
|
type CeremonyDataClockConsensusEngine struct {
|
||||||
protobufs.UnimplementedCeremonyServiceServer
|
protobufs.UnimplementedCeremonyServiceServer
|
||||||
frame uint64
|
frame *protobufs.ClockFrame
|
||||||
activeFrame *protobufs.ClockFrame
|
|
||||||
difficulty uint32
|
difficulty uint32
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
state consensus.EngineState
|
state consensus.EngineState
|
||||||
@ -113,6 +112,8 @@ func NewCeremonyDataClockConsensusEngine(
|
|||||||
clockStore store.ClockStore,
|
clockStore store.ClockStore,
|
||||||
keyStore store.KeyStore,
|
keyStore store.KeyStore,
|
||||||
pubSub p2p.PubSub,
|
pubSub p2p.PubSub,
|
||||||
|
filter []byte,
|
||||||
|
seed []byte,
|
||||||
) *CeremonyDataClockConsensusEngine {
|
) *CeremonyDataClockConsensusEngine {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
panic(errors.New("logger is nil"))
|
panic(errors.New("logger is nil"))
|
||||||
@ -143,9 +144,14 @@ func NewCeremonyDataClockConsensusEngine(
|
|||||||
minimumPeersRequired = 3
|
minimumPeersRequired = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
|
difficulty := engineConfig.Difficulty
|
||||||
|
if difficulty == 0 {
|
||||||
|
difficulty = 10000
|
||||||
|
}
|
||||||
|
|
||||||
e := &CeremonyDataClockConsensusEngine{
|
e := &CeremonyDataClockConsensusEngine{
|
||||||
frame: 0,
|
frame: nil,
|
||||||
difficulty: 10000,
|
difficulty: difficulty,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
state: consensus.EngineStateStopped,
|
state: consensus.EngineStateStopped,
|
||||||
clockStore: clockStore,
|
clockStore: clockStore,
|
||||||
@ -182,6 +188,8 @@ func NewCeremonyDataClockConsensusEngine(
|
|||||||
engineConfig,
|
engineConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
e.filter = filter
|
||||||
|
e.input = seed
|
||||||
e.provingKey = signer
|
e.provingKey = signer
|
||||||
e.provingKeyType = keyType
|
e.provingKeyType = keyType
|
||||||
e.provingKeyBytes = bytes
|
e.provingKeyBytes = bytes
|
||||||
@ -190,16 +198,10 @@ func NewCeremonyDataClockConsensusEngine(
|
|||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) Start(
|
func (e *CeremonyDataClockConsensusEngine) Start() <-chan error {
|
||||||
filter []byte,
|
|
||||||
seed []byte,
|
|
||||||
) <-chan error {
|
|
||||||
e.logger.Info("starting ceremony consensus engine")
|
e.logger.Info("starting ceremony consensus engine")
|
||||||
e.state = consensus.EngineStateStarting
|
e.state = consensus.EngineStateStarting
|
||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
e.filter = filter
|
|
||||||
e.input = seed
|
|
||||||
e.state = consensus.EngineStateLoading
|
e.state = consensus.EngineStateLoading
|
||||||
|
|
||||||
e.logger.Info("loading last seen state")
|
e.logger.Info("loading last seen state")
|
||||||
@ -214,16 +216,16 @@ func (e *CeremonyDataClockConsensusEngine) Start(
|
|||||||
if latestFrame != nil {
|
if latestFrame != nil {
|
||||||
e.setFrame(latestFrame)
|
e.setFrame(latestFrame)
|
||||||
} else {
|
} else {
|
||||||
latestFrame = e.createGenesisFrame()
|
latestFrame = e.CreateGenesisFrame(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = e.createCommunicationKeys()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("subscribing to pubsub messages")
|
e.logger.Info("subscribing to pubsub messages")
|
||||||
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
|
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
|
||||||
e.pubSub.Subscribe(
|
|
||||||
append(append([]byte{}, e.filter...), e.pubSub.GetPeerID()...),
|
|
||||||
e.handleSync,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
server := grpc.NewServer(
|
server := grpc.NewServer(
|
||||||
@ -240,8 +242,6 @@ func (e *CeremonyDataClockConsensusEngine) Start(
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
latestFrame = e.performSanityCheck(latestFrame)
|
|
||||||
|
|
||||||
e.state = consensus.EngineStateCollecting
|
e.state = consensus.EngineStateCollecting
|
||||||
|
|
||||||
for i := int64(0); i < e.pendingCommitWorkers; i++ {
|
for i := int64(0); i < e.pendingCommitWorkers; i++ {
|
||||||
@ -257,7 +257,7 @@ func (e *CeremonyDataClockConsensusEngine) Start(
|
|||||||
}
|
}
|
||||||
|
|
||||||
timestamp := time.Now().UnixMilli()
|
timestamp := time.Now().UnixMilli()
|
||||||
msg := binary.BigEndian.AppendUint64([]byte{}, e.frame)
|
msg := binary.BigEndian.AppendUint64([]byte{}, e.frame.FrameNumber)
|
||||||
msg = append(msg, consensus.GetVersion()...)
|
msg = append(msg, consensus.GetVersion()...)
|
||||||
msg = binary.BigEndian.AppendUint64(msg, uint64(timestamp))
|
msg = binary.BigEndian.AppendUint64(msg, uint64(timestamp))
|
||||||
sig, err := e.pubSub.SignMessage(msg)
|
sig, err := e.pubSub.SignMessage(msg)
|
||||||
@ -269,7 +269,7 @@ func (e *CeremonyDataClockConsensusEngine) Start(
|
|||||||
e.peerMap[string(e.pubSub.GetPeerID())] = &peerInfo{
|
e.peerMap[string(e.pubSub.GetPeerID())] = &peerInfo{
|
||||||
peerId: e.pubSub.GetPeerID(),
|
peerId: e.pubSub.GetPeerID(),
|
||||||
multiaddr: "",
|
multiaddr: "",
|
||||||
maxFrame: e.frame,
|
maxFrame: e.frame.FrameNumber,
|
||||||
version: consensus.GetVersion(),
|
version: consensus.GetVersion(),
|
||||||
signature: sig,
|
signature: sig,
|
||||||
publicKey: e.pubSub.GetPublicKey(),
|
publicKey: e.pubSub.GetPublicKey(),
|
||||||
@ -307,38 +307,8 @@ func (e *CeremonyDataClockConsensusEngine) Start(
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
latest := latestFrame
|
e.logger.Info("waiting for peer list mappings")
|
||||||
for {
|
time.Sleep(30 * time.Second)
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
peerCount := e.pubSub.GetNetworkPeersCount()
|
|
||||||
if peerCount >= e.minimumPeersRequired {
|
|
||||||
e.logger.Info("selecting leader")
|
|
||||||
if e.frame > latest.FrameNumber && e.frame-latest.FrameNumber > 16 &&
|
|
||||||
e.syncingTarget == nil {
|
|
||||||
e.logger.Info("rewinding sync head due to large delta")
|
|
||||||
latest, _, err = e.clockStore.GetDataClockFrame(
|
|
||||||
e.filter,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
latest, err = e.commitLongestPath(latest)
|
|
||||||
if err != nil {
|
|
||||||
e.logger.Error("could not collect longest path", zap.Error(err))
|
|
||||||
latest, _, err = e.clockStore.GetDataClockFrame(e.filter, 0)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
latest = e.performSanityCheck(latest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for e.state < consensus.EngineStateStopping {
|
for e.state < consensus.EngineStateStopping {
|
||||||
peerCount := e.pubSub.GetNetworkPeersCount()
|
peerCount := e.pubSub.GetNetworkPeersCount()
|
||||||
if peerCount < e.minimumPeersRequired {
|
if peerCount < e.minimumPeersRequired {
|
||||||
@ -350,22 +320,23 @@ func (e *CeremonyDataClockConsensusEngine) Start(
|
|||||||
} else {
|
} else {
|
||||||
switch e.state {
|
switch e.state {
|
||||||
case consensus.EngineStateCollecting:
|
case consensus.EngineStateCollecting:
|
||||||
|
currentFrame := latestFrame
|
||||||
if latestFrame, err = e.collect(latestFrame); err != nil {
|
if latestFrame, err = e.collect(latestFrame); err != nil {
|
||||||
e.logger.Error("could not collect", zap.Error(err))
|
e.logger.Error("could not collect", zap.Error(err))
|
||||||
e.state = consensus.EngineStateCollecting
|
e.state = consensus.EngineStateCollecting
|
||||||
errChan <- err
|
latestFrame = currentFrame
|
||||||
}
|
}
|
||||||
case consensus.EngineStateProving:
|
case consensus.EngineStateProving:
|
||||||
|
currentFrame := latestFrame
|
||||||
if latestFrame, err = e.prove(latestFrame); err != nil {
|
if latestFrame, err = e.prove(latestFrame); err != nil {
|
||||||
e.logger.Error("could not prove", zap.Error(err))
|
e.logger.Error("could not prove", zap.Error(err))
|
||||||
e.state = consensus.EngineStateCollecting
|
e.state = consensus.EngineStateCollecting
|
||||||
errChan <- err
|
latestFrame = currentFrame
|
||||||
}
|
}
|
||||||
case consensus.EngineStatePublishing:
|
case consensus.EngineStatePublishing:
|
||||||
if err = e.publishProof(latestFrame); err != nil {
|
if err = e.publishProof(latestFrame); err != nil {
|
||||||
e.logger.Error("could not publish", zap.Error(err))
|
e.logger.Error("could not publish", zap.Error(err))
|
||||||
e.state = consensus.EngineStateCollecting
|
e.state = consensus.EngineStateCollecting
|
||||||
errChan <- err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -389,7 +360,7 @@ func (e *CeremonyDataClockConsensusEngine) Stop(force bool) <-chan error {
|
|||||||
for name := range e.executionEngines {
|
for name := range e.executionEngines {
|
||||||
name := name
|
name := name
|
||||||
go func(name string) {
|
go func(name string) {
|
||||||
err := <-e.UnregisterExecutor(name, e.frame, force)
|
err := <-e.UnregisterExecutor(name, e.frame.FrameNumber, force)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
}
|
}
|
||||||
@ -463,7 +434,7 @@ func (e *CeremonyDataClockConsensusEngine) performSanityCheck(
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parentSelector, _, _, err := disc.GetParentSelectorAndDistance()
|
parentSelector, _, _, err := disc.GetParentSelectorAndDistance(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -536,7 +507,7 @@ func (e *CeremonyDataClockConsensusEngine) GetDifficulty() uint32 {
|
|||||||
return e.difficulty
|
return e.difficulty
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) GetFrame() uint64 {
|
func (e *CeremonyDataClockConsensusEngine) GetFrame() *protobufs.ClockFrame {
|
||||||
return e.frame
|
return e.frame
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -550,12 +521,6 @@ func (
|
|||||||
return e.frameChan
|
return e.frameChan
|
||||||
}
|
}
|
||||||
|
|
||||||
func (
|
|
||||||
e *CeremonyDataClockConsensusEngine,
|
|
||||||
) GetActiveFrame() *protobufs.ClockFrame {
|
|
||||||
return e.activeFrame
|
|
||||||
}
|
|
||||||
|
|
||||||
func (
|
func (
|
||||||
e *CeremonyDataClockConsensusEngine,
|
e *CeremonyDataClockConsensusEngine,
|
||||||
) GetPeerInfo() *protobufs.PeerInfoResponse {
|
) GetPeerInfo() *protobufs.PeerInfoResponse {
|
||||||
|
@ -3,16 +3,18 @@ package ceremony
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/iden3/go-iden3-crypto/ff"
|
"github.com/iden3/go-iden3-crypto/ff"
|
||||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
@ -25,7 +27,6 @@ import (
|
|||||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -322,9 +323,8 @@ func (e *CeremonyDataClockConsensusEngine) setFrame(
|
|||||||
}
|
}
|
||||||
e.logger.Debug("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
e.logger.Debug("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
||||||
e.currentDistance = distance
|
e.currentDistance = distance
|
||||||
e.frame = frame.FrameNumber
|
e.frame = frame
|
||||||
e.parentSelector = parent.Bytes()
|
e.parentSelector = parent.Bytes()
|
||||||
e.activeFrame = frame
|
|
||||||
go func() {
|
go func() {
|
||||||
e.frameChan <- frame
|
e.frameChan <- frame
|
||||||
}()
|
}()
|
||||||
@ -332,7 +332,7 @@ func (e *CeremonyDataClockConsensusEngine) setFrame(
|
|||||||
|
|
||||||
func (
|
func (
|
||||||
e *CeremonyDataClockConsensusEngine,
|
e *CeremonyDataClockConsensusEngine,
|
||||||
) createGenesisFrame() *protobufs.ClockFrame {
|
) CreateGenesisFrame(testProverKeys [][]byte) *protobufs.ClockFrame {
|
||||||
e.logger.Info("creating genesis frame")
|
e.logger.Info("creating genesis frame")
|
||||||
for _, l := range strings.Split(string(e.input), "\n") {
|
for _, l := range strings.Split(string(e.input), "\n") {
|
||||||
e.logger.Info(l)
|
e.logger.Info(l)
|
||||||
@ -376,7 +376,7 @@ func (
|
|||||||
transcript.RunningG2_256Powers = append(
|
transcript.RunningG2_256Powers = append(
|
||||||
transcript.RunningG2_256Powers,
|
transcript.RunningG2_256Powers,
|
||||||
&protobufs.BLS48581G2PublicKey{
|
&protobufs.BLS48581G2PublicKey{
|
||||||
KeyValue: qcrypto.CeremonyPotPubKeys[len(qcrypto.CeremonyPotPubKeys)-1].
|
KeyValue: qcrypto.CeremonyBLS48581G2[len(qcrypto.CeremonyBLS48581G2)-1].
|
||||||
ToAffineCompressed(),
|
ToAffineCompressed(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -408,6 +408,44 @@ func (
|
|||||||
rewardTrie.Add(addrBytes, 0, 50)
|
rewardTrie.Add(addrBytes, 0, 50)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 2024-01-03: 1.2.0
|
||||||
|
d, err := os.ReadFile("./retroactive_peers.json")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type peerData struct {
|
||||||
|
PeerId string `json:"peer_id"`
|
||||||
|
TokenBalance uint64 `json:"token_balance"`
|
||||||
|
}
|
||||||
|
type rewards struct {
|
||||||
|
Rewards []peerData `json:"rewards"`
|
||||||
|
}
|
||||||
|
|
||||||
|
retroEntries := &rewards{}
|
||||||
|
err = json.Unmarshal(d, retroEntries)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.logger.Info("adding retroactive peer reward info")
|
||||||
|
for _, s := range retroEntries.Rewards {
|
||||||
|
peerId := s.PeerId
|
||||||
|
peerBytes, err := base64.StdEncoding.DecodeString(peerId)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := poseidon.HashBytes(peerBytes)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addrBytes := addr.Bytes()
|
||||||
|
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
|
||||||
|
rewardTrie.Add(addrBytes, 0, s.TokenBalance)
|
||||||
|
}
|
||||||
|
|
||||||
trieBytes, err := rewardTrie.Serialize()
|
trieBytes, err := rewardTrie.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -521,25 +559,42 @@ func (
|
|||||||
// first phase:
|
// first phase:
|
||||||
e.logger.Info("encoding signatories to prover trie")
|
e.logger.Info("encoding signatories to prover trie")
|
||||||
|
|
||||||
for _, s := range qcrypto.CeremonySignatories {
|
if len(testProverKeys) != 0 {
|
||||||
pubkey := s.ToAffineCompressed()
|
e.logger.Warn(
|
||||||
e.logger.Info("0x" + hex.EncodeToString(pubkey))
|
"TEST PROVER ENTRIES BEING ADDED, YOUR NODE WILL BE KICKED IF IN" +
|
||||||
|
" PRODUCTION",
|
||||||
|
)
|
||||||
|
for _, s := range testProverKeys {
|
||||||
|
addr, err := poseidon.HashBytes(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
addr, err := poseidon.HashBytes(pubkey)
|
addrBytes := addr.Bytes()
|
||||||
if err != nil {
|
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
|
||||||
panic(err)
|
e.frameProverTrie.Add(addrBytes, 0)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
for _, s := range qcrypto.CeremonySignatories {
|
||||||
|
pubkey := s.ToAffineCompressed()
|
||||||
|
e.logger.Info("0x" + hex.EncodeToString(pubkey))
|
||||||
|
|
||||||
addrBytes := addr.Bytes()
|
addr, err := poseidon.HashBytes(pubkey)
|
||||||
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
|
if err != nil {
|
||||||
e.frameProverTrie.Add(addrBytes, 0)
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addrBytes := addr.Bytes()
|
||||||
|
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
|
||||||
|
e.frameProverTrie.Add(addrBytes, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("proving genesis frame")
|
e.logger.Info("proving genesis frame")
|
||||||
input := []byte{}
|
input := []byte{}
|
||||||
input = append(input, e.filter...)
|
input = append(input, e.filter...)
|
||||||
input = binary.BigEndian.AppendUint64(input, e.frame)
|
input = binary.BigEndian.AppendUint64(input, 0)
|
||||||
input = binary.BigEndian.AppendUint64(input, uint64(0))
|
input = binary.BigEndian.AppendUint64(input, 0)
|
||||||
input = binary.BigEndian.AppendUint32(input, e.difficulty)
|
input = binary.BigEndian.AppendUint32(input, e.difficulty)
|
||||||
input = append(input, e.input...)
|
input = append(input, e.input...)
|
||||||
|
|
||||||
@ -551,7 +606,7 @@ func (
|
|||||||
|
|
||||||
frame := &protobufs.ClockFrame{
|
frame := &protobufs.ClockFrame{
|
||||||
Filter: e.filter,
|
Filter: e.filter,
|
||||||
FrameNumber: e.frame,
|
FrameNumber: 0,
|
||||||
Timestamp: 0,
|
Timestamp: 0,
|
||||||
Difficulty: e.difficulty,
|
Difficulty: e.difficulty,
|
||||||
Input: inputMessage,
|
Input: inputMessage,
|
||||||
@ -563,7 +618,7 @@ func (
|
|||||||
PublicKeySignature: nil,
|
PublicKeySignature: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
parent, distance, selector, err := frame.GetParentSelectorAndDistance()
|
parent, _, selector, err := frame.GetParentSelectorAndDistance(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -574,9 +629,9 @@ func (
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := e.clockStore.PutCandidateDataClockFrame(
|
if err := e.clockStore.PutCandidateDataClockFrame(
|
||||||
parent.Bytes(),
|
parent.FillBytes(make([]byte, 32)),
|
||||||
distance.Bytes(),
|
big.NewInt(0).FillBytes(make([]byte, 32)),
|
||||||
selector.Bytes(),
|
selector.FillBytes(make([]byte, 32)),
|
||||||
frame,
|
frame,
|
||||||
txn,
|
txn,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
@ -643,13 +698,23 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath(
|
|||||||
return nil, errors.Wrap(err, "commit longest path")
|
return nil, errors.Wrap(err, "commit longest path")
|
||||||
}
|
}
|
||||||
|
|
||||||
selectorBytes := selector.Bytes()
|
masterFrame, err := e.clockStore.GetMasterClockFrame([]byte{
|
||||||
selectorBytes = append(
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
make([]byte, 32-len(selectorBytes)),
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
selectorBytes...,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
)
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
}, s[currentDepth].GetFrameNumber())
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "commit longest path")
|
||||||
|
}
|
||||||
|
|
||||||
|
proverSelector, err := masterFrame.GetSelector()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "commit longest path")
|
||||||
|
}
|
||||||
|
|
||||||
nearest := e.frameProverTrie.FindNearest(
|
nearest := e.frameProverTrie.FindNearest(
|
||||||
selectorBytes,
|
proverSelector.FillBytes(make([]byte, 32)),
|
||||||
)
|
)
|
||||||
addr, err := value.GetAddress()
|
addr, err := value.GetAddress()
|
||||||
|
|
||||||
@ -786,37 +851,6 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath(
|
|||||||
)
|
)
|
||||||
return nil, errors.Wrap(err, "commit longest path")
|
return nil, errors.Wrap(err, "commit longest path")
|
||||||
}
|
}
|
||||||
case protobufs.KeyBundleAnnouncementType:
|
|
||||||
bundle := &protobufs.KeyBundleAnnouncement{}
|
|
||||||
if err := proto.Unmarshal(c.Data, bundle); err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not commit candidate",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.Uint64("frame_number", s.FrameNumber),
|
|
||||||
zap.Binary("commitment", c.Commitment),
|
|
||||||
)
|
|
||||||
return nil, errors.Wrap(err, "commit longest path")
|
|
||||||
}
|
|
||||||
|
|
||||||
e.logger.Debug(
|
|
||||||
"committing key bundle",
|
|
||||||
zap.Uint64("frame_number", s.FrameNumber),
|
|
||||||
zap.Binary("commitment", c.Commitment),
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := e.keyStore.PutKeyBundle(
|
|
||||||
bundle.ProvingKeyBytes,
|
|
||||||
c,
|
|
||||||
txn,
|
|
||||||
); err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not commit candidate",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.Uint64("frame_number", s.FrameNumber),
|
|
||||||
zap.Binary("output", s.Output),
|
|
||||||
)
|
|
||||||
return nil, errors.Wrap(err, "commit longest path")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -851,6 +885,22 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if current.FrameNumber != latest.FrameNumber {
|
||||||
|
to := current.FrameNumber
|
||||||
|
if to-16 > to { // underflow
|
||||||
|
to = 1
|
||||||
|
} else {
|
||||||
|
to = to - 16
|
||||||
|
}
|
||||||
|
|
||||||
|
if 1 < to {
|
||||||
|
err := e.clockStore.DeleteCandidateDataClockFrameRange(e.filter, 1, to)
|
||||||
|
if err != nil {
|
||||||
|
e.logger.Error("error while purging candidate frames", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return current, nil
|
return current, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -860,7 +910,7 @@ func (e *CeremonyDataClockConsensusEngine) GetMostAheadPeer() (
|
|||||||
error,
|
error,
|
||||||
) {
|
) {
|
||||||
e.peerMapMx.Lock()
|
e.peerMapMx.Lock()
|
||||||
max := e.frame
|
max := e.frame.FrameNumber
|
||||||
var peer []byte = nil
|
var peer []byte = nil
|
||||||
for _, v := range e.peerMap {
|
for _, v := range e.peerMap {
|
||||||
if v.maxFrame > max {
|
if v.maxFrame > max {
|
||||||
@ -882,190 +932,6 @@ func (e *CeremonyDataClockConsensusEngine) GetMostAheadPeer() (
|
|||||||
return peer, max, nil
|
return peer, max, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) reverseOptimisticSync(
|
|
||||||
currentLatest *protobufs.ClockFrame,
|
|
||||||
maxFrame uint64,
|
|
||||||
peerId []byte,
|
|
||||||
) (*protobufs.ClockFrame, error) {
|
|
||||||
latest := currentLatest
|
|
||||||
cc, err := e.pubSub.GetDirectChannel(peerId)
|
|
||||||
if err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not establish direct channel",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.peerMapMx.Lock()
|
|
||||||
if _, ok := e.peerMap[string(peerId)]; ok {
|
|
||||||
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
|
|
||||||
delete(e.peerMap, string(peerId))
|
|
||||||
}
|
|
||||||
e.peerMapMx.Unlock()
|
|
||||||
e.syncingTarget = nil
|
|
||||||
return latest, errors.Wrap(err, "reverse optimistic sync")
|
|
||||||
}
|
|
||||||
|
|
||||||
client := protobufs.NewCeremonyServiceClient(cc)
|
|
||||||
|
|
||||||
from := latest.FrameNumber
|
|
||||||
if from <= 1 {
|
|
||||||
from = 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxFrame-from > 32 {
|
|
||||||
// divergence is high, ask them for the latest frame and if they
|
|
||||||
// respond with a valid answer, optimistically continue from this
|
|
||||||
// frame, if we hit a fault we'll mark them as uncooperative and move
|
|
||||||
// on
|
|
||||||
from = 2
|
|
||||||
s, err := client.GetCompressedSyncFrames(
|
|
||||||
context.Background(),
|
|
||||||
&protobufs.ClockFramesRequest{
|
|
||||||
Filter: e.filter,
|
|
||||||
FromFrameNumber: maxFrame - 32,
|
|
||||||
},
|
|
||||||
grpc.MaxCallRecvMsgSize(600*1024*1024),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"received error from peer",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.peerMapMx.Lock()
|
|
||||||
if _, ok := e.peerMap[string(peerId)]; ok {
|
|
||||||
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
|
|
||||||
delete(e.peerMap, string(peerId))
|
|
||||||
}
|
|
||||||
e.peerMapMx.Unlock()
|
|
||||||
e.syncingTarget = nil
|
|
||||||
return latest, errors.Wrap(err, "reverse optimistic sync")
|
|
||||||
}
|
|
||||||
var syncMsg *protobufs.CeremonyCompressedSync
|
|
||||||
for syncMsg, err = s.Recv(); err == nil; syncMsg, err = s.Recv() {
|
|
||||||
e.logger.Info(
|
|
||||||
"received compressed sync frame",
|
|
||||||
zap.Uint64("from", syncMsg.FromFrameNumber),
|
|
||||||
zap.Uint64("to", syncMsg.ToFrameNumber),
|
|
||||||
zap.Int("frames", len(syncMsg.TruncatedClockFrames)),
|
|
||||||
zap.Int("proofs", len(syncMsg.Proofs)),
|
|
||||||
)
|
|
||||||
var next *protobufs.ClockFrame
|
|
||||||
if next, err = e.decompressAndStoreCandidates(
|
|
||||||
peerId,
|
|
||||||
syncMsg,
|
|
||||||
e.logger.Info,
|
|
||||||
); err != nil && !errors.Is(err, ErrNoNewFrames) {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not decompress and store candidate",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.peerMapMx.Lock()
|
|
||||||
if _, ok := e.peerMap[string(peerId)]; ok {
|
|
||||||
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
|
|
||||||
delete(e.peerMap, string(peerId))
|
|
||||||
}
|
|
||||||
e.peerMapMx.Unlock()
|
|
||||||
|
|
||||||
if err := cc.Close(); err != nil {
|
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
e.syncingTarget = nil
|
|
||||||
e.syncingStatus = SyncStatusFailed
|
|
||||||
return currentLatest, errors.Wrap(err, "reverse optimistic sync")
|
|
||||||
}
|
|
||||||
if next != nil {
|
|
||||||
latest = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil && err != io.EOF && !errors.Is(err, ErrNoNewFrames) {
|
|
||||||
if err := cc.Close(); err != nil {
|
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
|
||||||
}
|
|
||||||
e.logger.Error("error while receiving sync", zap.Error(err))
|
|
||||||
e.syncingTarget = nil
|
|
||||||
e.syncingStatus = SyncStatusFailed
|
|
||||||
return latest, errors.Wrap(err, "reverse optimistic sync")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer func() { e.syncingTarget = nil }()
|
|
||||||
e.logger.Info("continuing sync in background")
|
|
||||||
s, err := client.GetCompressedSyncFrames(
|
|
||||||
context.Background(),
|
|
||||||
&protobufs.ClockFramesRequest{
|
|
||||||
Filter: e.filter,
|
|
||||||
FromFrameNumber: from - 1,
|
|
||||||
ToFrameNumber: maxFrame,
|
|
||||||
},
|
|
||||||
grpc.MaxCallRecvMsgSize(600*1024*1024),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"error while retrieving sync",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.peerMapMx.Lock()
|
|
||||||
if _, ok := e.peerMap[string(peerId)]; ok {
|
|
||||||
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
|
|
||||||
delete(e.peerMap, string(peerId))
|
|
||||||
}
|
|
||||||
e.peerMapMx.Unlock()
|
|
||||||
e.syncingStatus = SyncStatusFailed
|
|
||||||
|
|
||||||
if err := cc.Close(); err != nil {
|
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
var syncMsg *protobufs.CeremonyCompressedSync
|
|
||||||
for syncMsg, err = s.Recv(); err == nil; syncMsg, err = s.Recv() {
|
|
||||||
e.logger.Debug(
|
|
||||||
"received compressed sync frame",
|
|
||||||
zap.Uint64("from", syncMsg.FromFrameNumber),
|
|
||||||
zap.Uint64("to", syncMsg.ToFrameNumber),
|
|
||||||
zap.Int("frames", len(syncMsg.TruncatedClockFrames)),
|
|
||||||
zap.Int("proofs", len(syncMsg.Proofs)),
|
|
||||||
)
|
|
||||||
if _, err = e.decompressAndStoreCandidates(
|
|
||||||
peerId,
|
|
||||||
syncMsg,
|
|
||||||
e.logger.Debug,
|
|
||||||
); err != nil && !errors.Is(err, ErrNoNewFrames) {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not decompress and store candidate",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.syncingTarget = nil
|
|
||||||
e.syncingStatus = SyncStatusFailed
|
|
||||||
if err := cc.Close(); err != nil {
|
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil && err != io.EOF && !errors.Is(err, ErrNoNewFrames) {
|
|
||||||
e.syncingTarget = nil
|
|
||||||
e.syncingStatus = SyncStatusFailed
|
|
||||||
e.logger.Error("error while receiving sync", zap.Error(err))
|
|
||||||
if err := cc.Close(); err != nil {
|
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cc.Close(); err != nil {
|
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
e.syncingTarget = nil
|
|
||||||
e.syncingStatus = SyncStatusNotSyncing
|
|
||||||
}()
|
|
||||||
|
|
||||||
return latest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) sync(
|
func (e *CeremonyDataClockConsensusEngine) sync(
|
||||||
currentLatest *protobufs.ClockFrame,
|
currentLatest *protobufs.ClockFrame,
|
||||||
maxFrame uint64,
|
maxFrame uint64,
|
||||||
@ -1095,18 +961,48 @@ func (e *CeremonyDataClockConsensusEngine) sync(
|
|||||||
from = 1
|
from = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if maxFrame > from {
|
if maxFrame > from && maxFrame > 3 {
|
||||||
s, err := client.GetCompressedSyncFrames(
|
from = maxFrame - 2
|
||||||
context.Background(),
|
}
|
||||||
&protobufs.ClockFramesRequest{
|
|
||||||
Filter: e.filter,
|
s, err := client.GetCompressedSyncFrames(
|
||||||
FromFrameNumber: maxFrame - 16,
|
context.Background(),
|
||||||
},
|
&protobufs.ClockFramesRequest{
|
||||||
grpc.MaxCallRecvMsgSize(600*1024*1024),
|
Filter: e.filter,
|
||||||
|
FromFrameNumber: from,
|
||||||
|
},
|
||||||
|
grpc.MaxCallRecvMsgSize(600*1024*1024),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
e.logger.Error(
|
||||||
|
"received error from peer",
|
||||||
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
if err != nil {
|
e.peerMapMx.Lock()
|
||||||
|
if _, ok := e.peerMap[string(peerId)]; ok {
|
||||||
|
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
|
||||||
|
delete(e.peerMap, string(peerId))
|
||||||
|
}
|
||||||
|
e.peerMapMx.Unlock()
|
||||||
|
return latest, errors.Wrap(err, "reverse optimistic sync")
|
||||||
|
}
|
||||||
|
var syncMsg *protobufs.CeremonyCompressedSync
|
||||||
|
for syncMsg, err = s.Recv(); err == nil; syncMsg, err = s.Recv() {
|
||||||
|
e.logger.Info(
|
||||||
|
"received compressed sync frame",
|
||||||
|
zap.Uint64("from", syncMsg.FromFrameNumber),
|
||||||
|
zap.Uint64("to", syncMsg.ToFrameNumber),
|
||||||
|
zap.Int("frames", len(syncMsg.TruncatedClockFrames)),
|
||||||
|
zap.Int("proofs", len(syncMsg.Proofs)),
|
||||||
|
)
|
||||||
|
var next *protobufs.ClockFrame
|
||||||
|
if next, err = e.decompressAndStoreCandidates(
|
||||||
|
peerId,
|
||||||
|
syncMsg,
|
||||||
|
e.logger.Info,
|
||||||
|
); err != nil && !errors.Is(err, ErrNoNewFrames) {
|
||||||
e.logger.Error(
|
e.logger.Error(
|
||||||
"received error from peer",
|
"could not decompress and store candidate",
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
e.peerMapMx.Lock()
|
e.peerMapMx.Lock()
|
||||||
@ -1115,56 +1011,31 @@ func (e *CeremonyDataClockConsensusEngine) sync(
|
|||||||
delete(e.peerMap, string(peerId))
|
delete(e.peerMap, string(peerId))
|
||||||
}
|
}
|
||||||
e.peerMapMx.Unlock()
|
e.peerMapMx.Unlock()
|
||||||
return latest, errors.Wrap(err, "reverse optimistic sync")
|
|
||||||
}
|
|
||||||
var syncMsg *protobufs.CeremonyCompressedSync
|
|
||||||
for syncMsg, err = s.Recv(); err == nil; syncMsg, err = s.Recv() {
|
|
||||||
e.logger.Info(
|
|
||||||
"received compressed sync frame",
|
|
||||||
zap.Uint64("from", syncMsg.FromFrameNumber),
|
|
||||||
zap.Uint64("to", syncMsg.ToFrameNumber),
|
|
||||||
zap.Int("frames", len(syncMsg.TruncatedClockFrames)),
|
|
||||||
zap.Int("proofs", len(syncMsg.Proofs)),
|
|
||||||
)
|
|
||||||
var next *protobufs.ClockFrame
|
|
||||||
if next, err = e.decompressAndStoreCandidates(
|
|
||||||
peerId,
|
|
||||||
syncMsg,
|
|
||||||
e.logger.Info,
|
|
||||||
); err != nil && !errors.Is(err, ErrNoNewFrames) {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not decompress and store candidate",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
e.peerMapMx.Lock()
|
|
||||||
if _, ok := e.peerMap[string(peerId)]; ok {
|
|
||||||
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
|
|
||||||
delete(e.peerMap, string(peerId))
|
|
||||||
}
|
|
||||||
e.peerMapMx.Unlock()
|
|
||||||
|
|
||||||
if err := cc.Close(); err != nil {
|
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return currentLatest, errors.Wrap(err, "reverse optimistic sync")
|
|
||||||
}
|
|
||||||
if next != nil {
|
|
||||||
latest = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil && err != io.EOF && !errors.Is(err, ErrNoNewFrames) {
|
|
||||||
if err := cc.Close(); err != nil {
|
if err := cc.Close(); err != nil {
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
e.logger.Error("error while closing connection", zap.Error(err))
|
||||||
}
|
}
|
||||||
e.logger.Error("error while receiving sync", zap.Error(err))
|
|
||||||
return latest, errors.Wrap(err, "reverse optimistic sync")
|
|
||||||
}
|
|
||||||
|
|
||||||
e.logger.Info("received new leading frame", zap.Uint64("frame_number", latest.FrameNumber))
|
return currentLatest, errors.Wrap(err, "reverse optimistic sync")
|
||||||
|
}
|
||||||
|
if next != nil {
|
||||||
|
latest = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil && err != io.EOF && !errors.Is(err, ErrNoNewFrames) {
|
||||||
if err := cc.Close(); err != nil {
|
if err := cc.Close(); err != nil {
|
||||||
e.logger.Error("error while closing connection", zap.Error(err))
|
e.logger.Error("error while closing connection", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
e.logger.Error("error while receiving sync", zap.Error(err))
|
||||||
|
return latest, errors.Wrap(err, "reverse optimistic sync")
|
||||||
|
}
|
||||||
|
|
||||||
|
e.logger.Info(
|
||||||
|
"received new leading frame",
|
||||||
|
zap.Uint64("frame_number", latest.FrameNumber),
|
||||||
|
)
|
||||||
|
if err := cc.Close(); err != nil {
|
||||||
|
e.logger.Error("error while closing connection", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return latest, nil
|
return latest, nil
|
||||||
@ -1181,43 +1052,31 @@ func (e *CeremonyDataClockConsensusEngine) collect(
|
|||||||
latest = e.previousHead
|
latest = e.previousHead
|
||||||
e.syncingStatus = SyncStatusNotSyncing
|
e.syncingStatus = SyncStatusNotSyncing
|
||||||
}
|
}
|
||||||
maxFrame := uint64(0)
|
|
||||||
var peerId []byte
|
|
||||||
peerId, maxFrame, err := e.GetMostAheadPeer()
|
peerId, maxFrame, err := e.GetMostAheadPeer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Warn("no peers available, skipping sync")
|
e.logger.Warn("no peers available, skipping sync")
|
||||||
} else if peerId == nil {
|
} else if peerId == nil {
|
||||||
e.logger.Info("currently up to date, skipping sync")
|
e.logger.Info("currently up to date, skipping sync")
|
||||||
} else if e.syncingTarget == nil {
|
} else if maxFrame-2 > latest.FrameNumber {
|
||||||
e.syncingStatus = SyncStatusAwaitingResponse
|
|
||||||
e.logger.Info(
|
|
||||||
"setting syncing target",
|
|
||||||
zap.String("peer_id", peer.ID(peerId).String()),
|
|
||||||
)
|
|
||||||
|
|
||||||
e.syncingTarget = peerId
|
|
||||||
e.previousHead = latest
|
|
||||||
latest, err = e.reverseOptimisticSync(latest, maxFrame, peerId)
|
|
||||||
} else if maxFrame > latest.FrameNumber {
|
|
||||||
latest, err = e.sync(latest, maxFrame, peerId)
|
latest, err = e.sync(latest, maxFrame, peerId)
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
|
||||||
_, err = e.keyStore.GetProvingKey(e.provingKeyBytes)
|
|
||||||
if errors.Is(err, store.ErrNotFound) &&
|
|
||||||
latest.FrameNumber-e.lastKeyBundleAnnouncementFrame > 6 {
|
|
||||||
if err = e.announceKeyBundle(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
e.lastKeyBundleAnnouncementFrame = latest.FrameNumber
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"returning leader frame",
|
"returning leader frame",
|
||||||
zap.Uint64("frame_number", latest.FrameNumber),
|
zap.Uint64("frame_number", latest.FrameNumber),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
e.logger.Info("selecting leader")
|
||||||
|
|
||||||
|
latest, err = e.commitLongestPath(latest)
|
||||||
|
if err != nil {
|
||||||
|
e.logger.Error("could not collect longest path", zap.Error(err))
|
||||||
|
latest, _, err = e.clockStore.GetDataClockFrame(e.filter, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
e.setFrame(latest)
|
e.setFrame(latest)
|
||||||
e.state = consensus.EngineStateProving
|
e.state = consensus.EngineStateProving
|
||||||
return latest, nil
|
return latest, nil
|
||||||
|
@ -17,11 +17,11 @@ func (e *CeremonyDataClockConsensusEngine) RegisterExecutor(
|
|||||||
for {
|
for {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"awaiting frame",
|
"awaiting frame",
|
||||||
zap.Uint64("current_frame", e.frame),
|
zap.Uint64("current_frame", e.frame.FrameNumber),
|
||||||
zap.Uint64("target_frame", frame),
|
zap.Uint64("target_frame", frame),
|
||||||
)
|
)
|
||||||
|
|
||||||
newFrame := e.frame
|
newFrame := e.frame.FrameNumber
|
||||||
if newFrame >= frame {
|
if newFrame >= frame {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"injecting execution engine at frame",
|
"injecting execution engine at frame",
|
||||||
@ -54,11 +54,11 @@ func (e *CeremonyDataClockConsensusEngine) UnregisterExecutor(
|
|||||||
for {
|
for {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"awaiting frame",
|
"awaiting frame",
|
||||||
zap.Uint64("current_frame", e.frame),
|
zap.Uint64("current_frame", e.frame.FrameNumber),
|
||||||
zap.Uint64("target_frame", frame),
|
zap.Uint64("target_frame", frame),
|
||||||
)
|
)
|
||||||
|
|
||||||
newFrame := e.frame
|
newFrame := e.frame.FrameNumber
|
||||||
if newFrame >= frame {
|
if newFrame >= frame {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"removing execution engine at frame",
|
"removing execution engine at frame",
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/ceremony/application"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
@ -20,52 +19,6 @@ import (
|
|||||||
|
|
||||||
var ErrNoNewFrames = errors.New("peer reported no frames")
|
var ErrNoNewFrames = errors.New("peer reported no frames")
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) handleSync(
|
|
||||||
message *pb.Message,
|
|
||||||
) error {
|
|
||||||
e.logger.Debug(
|
|
||||||
"received message",
|
|
||||||
zap.Binary("data", message.Data),
|
|
||||||
zap.Binary("from", message.From),
|
|
||||||
zap.Binary("signature", message.Signature),
|
|
||||||
)
|
|
||||||
if bytes.Equal(message.From, e.pubSub.GetPeerID()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := &protobufs.Message{}
|
|
||||||
|
|
||||||
if err := proto.Unmarshal(message.Data, msg); err != nil {
|
|
||||||
return errors.Wrap(err, "handle sync")
|
|
||||||
}
|
|
||||||
|
|
||||||
any := &anypb.Any{}
|
|
||||||
if err := proto.Unmarshal(msg.Payload, any); err != nil {
|
|
||||||
return errors.Wrap(err, "handle sync")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch any.TypeUrl {
|
|
||||||
case protobufs.ProvingKeyAnnouncementType:
|
|
||||||
if err := e.handleProvingKey(
|
|
||||||
message.From,
|
|
||||||
msg.Address,
|
|
||||||
any,
|
|
||||||
); err != nil {
|
|
||||||
return errors.Wrap(err, "handle sync")
|
|
||||||
}
|
|
||||||
case protobufs.KeyBundleAnnouncementType:
|
|
||||||
if err := e.handleKeyBundle(
|
|
||||||
message.From,
|
|
||||||
msg.Address,
|
|
||||||
any,
|
|
||||||
); err != nil {
|
|
||||||
return errors.Wrap(err, "handle sync")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCompressedSyncFrames implements protobufs.CeremonyServiceServer.
|
// GetCompressedSyncFrames implements protobufs.CeremonyServiceServer.
|
||||||
func (e *CeremonyDataClockConsensusEngine) GetCompressedSyncFrames(
|
func (e *CeremonyDataClockConsensusEngine) GetCompressedSyncFrames(
|
||||||
request *protobufs.ClockFramesRequest,
|
request *protobufs.ClockFramesRequest,
|
||||||
@ -153,7 +106,7 @@ func (e *CeremonyDataClockConsensusEngine) GetCompressedSyncFrames(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
max := e.frame
|
max := e.frame.FrameNumber
|
||||||
to := request.ToFrameNumber
|
to := request.ToFrameNumber
|
||||||
|
|
||||||
// We need to slightly rewind, to compensate for unconfirmed frame heads on a
|
// We need to slightly rewind, to compensate for unconfirmed frame heads on a
|
||||||
@ -469,93 +422,3 @@ func (e *CeremonyDataClockConsensusEngine) GetPublicChannel(
|
|||||||
) error {
|
) error {
|
||||||
return errors.New("not supported")
|
return errors.New("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) handleProvingKeyRequest(
|
|
||||||
peerID []byte,
|
|
||||||
address []byte,
|
|
||||||
any *anypb.Any,
|
|
||||||
) error {
|
|
||||||
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
request := &protobufs.ProvingKeyRequest{}
|
|
||||||
if err := any.UnmarshalTo(request); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(request.ProvingKeyBytes) == 0 {
|
|
||||||
e.logger.Debug(
|
|
||||||
"received proving key request for empty key",
|
|
||||||
zap.Binary("peer_id", peerID),
|
|
||||||
zap.Binary("address", address),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
e.pubSub.Subscribe(
|
|
||||||
append(append([]byte{}, e.filter...), peerID...),
|
|
||||||
e.handleSync,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
|
|
||||||
e.logger.Debug(
|
|
||||||
"received proving key request",
|
|
||||||
zap.Binary("peer_id", peerID),
|
|
||||||
zap.Binary("address", address),
|
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
|
||||||
)
|
|
||||||
|
|
||||||
var provingKey *protobufs.ProvingKeyAnnouncement
|
|
||||||
inclusion, err := e.keyStore.GetProvingKey(request.ProvingKeyBytes)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, store.ErrNotFound) {
|
|
||||||
e.logger.Debug(
|
|
||||||
"peer asked for proving key that returned error",
|
|
||||||
zap.Binary("peer_id", peerID),
|
|
||||||
zap.Binary("address", address),
|
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
provingKey, err = e.keyStore.GetStagedProvingKey(request.ProvingKeyBytes)
|
|
||||||
if !errors.Is(err, store.ErrNotFound) {
|
|
||||||
e.logger.Debug(
|
|
||||||
"peer asked for proving key that returned error",
|
|
||||||
zap.Binary("peer_id", peerID),
|
|
||||||
zap.Binary("address", address),
|
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
} else if err != nil {
|
|
||||||
e.logger.Debug(
|
|
||||||
"peer asked for unknown proving key",
|
|
||||||
zap.Binary("peer_id", peerID),
|
|
||||||
zap.Binary("address", address),
|
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err := proto.Unmarshal(inclusion.Data, provingKey)
|
|
||||||
if err != nil {
|
|
||||||
e.logger.Debug(
|
|
||||||
"inclusion commitment could not be deserialized",
|
|
||||||
zap.Binary("peer_id", peerID),
|
|
||||||
zap.Binary("address", address),
|
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.publishMessage(
|
|
||||||
append(append([]byte{}, e.filter...), peerID...),
|
|
||||||
provingKey,
|
|
||||||
); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -28,22 +28,21 @@ type ConsensusEngine interface {
|
|||||||
Stop(force bool) <-chan error
|
Stop(force bool) <-chan error
|
||||||
RegisterExecutor(exec execution.ExecutionEngine, frame uint64) <-chan error
|
RegisterExecutor(exec execution.ExecutionEngine, frame uint64) <-chan error
|
||||||
UnregisterExecutor(name string, frame uint64, force bool) <-chan error
|
UnregisterExecutor(name string, frame uint64, force bool) <-chan error
|
||||||
GetFrame() uint64
|
GetFrame() *protobufs.ClockFrame
|
||||||
GetDifficulty() uint32
|
GetDifficulty() uint32
|
||||||
GetState() EngineState
|
GetState() EngineState
|
||||||
GetFrameChannel() <-chan uint64
|
GetFrameChannel() <-chan *protobufs.ClockFrame
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataConsensusEngine interface {
|
type DataConsensusEngine interface {
|
||||||
Start(filter []byte, seed []byte) <-chan error
|
Start() <-chan error
|
||||||
Stop(force bool) <-chan error
|
Stop(force bool) <-chan error
|
||||||
RegisterExecutor(exec execution.ExecutionEngine, frame uint64) <-chan error
|
RegisterExecutor(exec execution.ExecutionEngine, frame uint64) <-chan error
|
||||||
UnregisterExecutor(name string, frame uint64, force bool) <-chan error
|
UnregisterExecutor(name string, frame uint64, force bool) <-chan error
|
||||||
GetFrame() uint64
|
GetFrame() *protobufs.ClockFrame
|
||||||
GetDifficulty() uint32
|
GetDifficulty() uint32
|
||||||
GetState() EngineState
|
GetState() EngineState
|
||||||
GetFrameChannel() <-chan *protobufs.ClockFrame
|
GetFrameChannel() <-chan *protobufs.ClockFrame
|
||||||
GetActiveFrame() *protobufs.ClockFrame
|
|
||||||
GetProvingKey(
|
GetProvingKey(
|
||||||
engineConfig *config.EngineConfig,
|
engineConfig *config.EngineConfig,
|
||||||
) (crypto.Signer, keys.KeyType, []byte, []byte)
|
) (crypto.Signer, keys.KeyType, []byte, []byte)
|
||||||
@ -52,13 +51,13 @@ type DataConsensusEngine interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GetMinimumVersionCutoff() time.Time {
|
func GetMinimumVersionCutoff() time.Time {
|
||||||
return time.Date(2023, time.December, 2, 7, 0, 0, 0, time.UTC)
|
return time.Date(2024, time.January, 3, 7, 0, 0, 0, time.UTC)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMinimumVersion() []byte {
|
func GetMinimumVersion() []byte {
|
||||||
return []byte{0x01, 0x01, 0x08}
|
return []byte{0x01, 0x02, 0x00}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetVersion() []byte {
|
func GetVersion() []byte {
|
||||||
return []byte{0x01, 0x01, 0x08}
|
return []byte{0x01, 0x02, 0x00}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
|
|||||||
|
|
||||||
eg := errgroup.Group{}
|
eg := errgroup.Group{}
|
||||||
eg.SetLimit(len(e.executionEngines))
|
eg.SetLimit(len(e.executionEngines))
|
||||||
|
|
||||||
for name := range e.executionEngines {
|
for name := range e.executionEngines {
|
||||||
name := name
|
name := name
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
@ -52,7 +51,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
|
|||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle message")
|
return errors.Wrap(err, "handle message")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range messages {
|
for _, m := range messages {
|
||||||
m := m
|
m := m
|
||||||
if err := e.publishMessage(m.Address, m); err != nil {
|
if err := e.publishMessage(m.Address, m); err != nil {
|
||||||
@ -64,11 +62,9 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
|
|||||||
return errors.Wrap(err, "handle message")
|
return errors.Wrap(err, "handle message")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
e.logger.Error("rejecting invalid message", zap.Error(err))
|
e.logger.Error("rejecting invalid message", zap.Error(err))
|
||||||
return errors.Wrap(err, "execution failed")
|
return errors.Wrap(err, "execution failed")
|
||||||
@ -96,7 +92,7 @@ func (e *MasterClockConsensusEngine) handleClockFrameData(
|
|||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.frame > frame.FrameNumber {
|
if e.frame.FrameNumber > frame.FrameNumber {
|
||||||
e.logger.Debug(
|
e.logger.Debug(
|
||||||
"received anachronistic frame",
|
"received anachronistic frame",
|
||||||
zap.Binary("sender", peerID),
|
zap.Binary("sender", peerID),
|
||||||
@ -131,7 +127,7 @@ func (e *MasterClockConsensusEngine) handleClockFrameData(
|
|||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.frame < frame.FrameNumber {
|
if e.frame.FrameNumber < frame.FrameNumber {
|
||||||
if err := e.enqueueSeenFrame(frame); err != nil {
|
if err := e.enqueueSeenFrame(frame); err != nil {
|
||||||
e.logger.Error("could not enqueue seen clock frame", zap.Error(err))
|
e.logger.Error("could not enqueue seen clock frame", zap.Error(err))
|
||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
|
@ -43,8 +43,7 @@ func (e *MasterClockConsensusEngine) setFrame(frame *protobufs.ClockFrame) {
|
|||||||
copy(previousSelectorBytes[:], frame.Output[:516])
|
copy(previousSelectorBytes[:], frame.Output[:516])
|
||||||
|
|
||||||
e.logger.Debug("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
e.logger.Debug("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
||||||
e.frame = frame.FrameNumber
|
e.frame = frame
|
||||||
e.latestFrame = frame
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
e.frameChan <- e.frame
|
e.frameChan <- e.frame
|
||||||
@ -53,7 +52,7 @@ func (e *MasterClockConsensusEngine) setFrame(frame *protobufs.ClockFrame) {
|
|||||||
|
|
||||||
func (
|
func (
|
||||||
e *MasterClockConsensusEngine,
|
e *MasterClockConsensusEngine,
|
||||||
) createGenesisFrame() *protobufs.ClockFrame {
|
) CreateGenesisFrame() *protobufs.ClockFrame {
|
||||||
e.logger.Debug("creating genesis frame")
|
e.logger.Debug("creating genesis frame")
|
||||||
b := sha3.Sum256(e.input)
|
b := sha3.Sum256(e.input)
|
||||||
v := vdf.New(e.difficulty, b)
|
v := vdf.New(e.difficulty, b)
|
||||||
@ -65,7 +64,7 @@ func (
|
|||||||
e.logger.Debug("proving genesis frame")
|
e.logger.Debug("proving genesis frame")
|
||||||
input := []byte{}
|
input := []byte{}
|
||||||
input = append(input, e.filter...)
|
input = append(input, e.filter...)
|
||||||
input = binary.BigEndian.AppendUint64(input, e.frame)
|
input = binary.BigEndian.AppendUint64(input, 0)
|
||||||
input = binary.BigEndian.AppendUint32(input, e.difficulty)
|
input = binary.BigEndian.AppendUint32(input, e.difficulty)
|
||||||
if bytes.Equal(e.input, []byte{0x00}) {
|
if bytes.Equal(e.input, []byte{0x00}) {
|
||||||
value := [516]byte{}
|
value := [516]byte{}
|
||||||
@ -82,7 +81,7 @@ func (
|
|||||||
|
|
||||||
frame := &protobufs.ClockFrame{
|
frame := &protobufs.ClockFrame{
|
||||||
Filter: e.filter,
|
Filter: e.filter,
|
||||||
FrameNumber: e.frame,
|
FrameNumber: 0,
|
||||||
Timestamp: 0,
|
Timestamp: 0,
|
||||||
Difficulty: e.difficulty,
|
Difficulty: e.difficulty,
|
||||||
Input: inputMessage,
|
Input: inputMessage,
|
||||||
@ -107,13 +106,13 @@ func (e *MasterClockConsensusEngine) collect(
|
|||||||
if e.state == consensus.EngineStateCollecting {
|
if e.state == consensus.EngineStateCollecting {
|
||||||
e.logger.Debug("collecting vdf proofs")
|
e.logger.Debug("collecting vdf proofs")
|
||||||
|
|
||||||
latest := e.latestFrame
|
latest := e.frame
|
||||||
|
|
||||||
if e.syncingStatus == SyncStatusNotSyncing {
|
if e.syncingStatus == SyncStatusNotSyncing {
|
||||||
peer, err := e.pubSub.GetRandomPeer(e.filter)
|
peer, err := e.pubSub.GetRandomPeer(e.filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, p2p.ErrNoPeersAvailable) {
|
if errors.Is(err, p2p.ErrNoPeersAvailable) {
|
||||||
e.logger.Warn("no peers available, skipping sync")
|
e.logger.Debug("no peers available, skipping sync")
|
||||||
} else {
|
} else {
|
||||||
e.logger.Error("error while fetching random peer", zap.Error(err))
|
e.logger.Error("error while fetching random peer", zap.Error(err))
|
||||||
}
|
}
|
||||||
@ -200,10 +199,10 @@ func (
|
|||||||
})
|
})
|
||||||
|
|
||||||
if len(e.seenFrames) == 0 {
|
if len(e.seenFrames) == 0 {
|
||||||
return e.latestFrame, nil
|
return e.frame, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
prev := e.latestFrame
|
prev := e.frame
|
||||||
committedSet := []*protobufs.ClockFrame{}
|
committedSet := []*protobufs.ClockFrame{}
|
||||||
|
|
||||||
for len(e.seenFrames) > 0 {
|
for len(e.seenFrames) > 0 {
|
||||||
|
@ -17,7 +17,7 @@ func (e *MasterClockConsensusEngine) RegisterExecutor(
|
|||||||
go func() {
|
go func() {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"starting execution engine at frame",
|
"starting execution engine at frame",
|
||||||
zap.Uint64("current_frame", e.frame),
|
zap.Uint64("current_frame", e.frame.FrameNumber),
|
||||||
)
|
)
|
||||||
err := <-exec.Start()
|
err := <-exec.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -29,11 +29,11 @@ func (e *MasterClockConsensusEngine) RegisterExecutor(
|
|||||||
for {
|
for {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"awaiting frame",
|
"awaiting frame",
|
||||||
zap.Uint64("current_frame", e.frame),
|
zap.Uint64("current_frame", e.frame.FrameNumber),
|
||||||
zap.Uint64("target_frame", frame),
|
zap.Uint64("target_frame", frame),
|
||||||
)
|
)
|
||||||
|
|
||||||
newFrame := e.frame
|
newFrame := e.frame.FrameNumber
|
||||||
if newFrame >= frame {
|
if newFrame >= frame {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"injecting execution engine at frame",
|
"injecting execution engine at frame",
|
||||||
@ -76,11 +76,11 @@ func (e *MasterClockConsensusEngine) UnregisterExecutor(
|
|||||||
for {
|
for {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"awaiting frame",
|
"awaiting frame",
|
||||||
zap.Uint64("current_frame", e.frame),
|
zap.Uint64("current_frame", e.frame.FrameNumber),
|
||||||
zap.Uint64("target_frame", frame),
|
zap.Uint64("target_frame", frame),
|
||||||
)
|
)
|
||||||
|
|
||||||
newFrame := e.frame
|
newFrame := e.frame.FrameNumber
|
||||||
if newFrame >= frame {
|
if newFrame >= frame {
|
||||||
logger.Info(
|
logger.Info(
|
||||||
"removing execution engine at frame",
|
"removing execution engine at frame",
|
||||||
|
@ -25,16 +25,15 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type MasterClockConsensusEngine struct {
|
type MasterClockConsensusEngine struct {
|
||||||
frame uint64
|
frame *protobufs.ClockFrame
|
||||||
difficulty uint32
|
difficulty uint32
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
state consensus.EngineState
|
state consensus.EngineState
|
||||||
pubSub p2p.PubSub
|
pubSub p2p.PubSub
|
||||||
keyManager keys.KeyManager
|
keyManager keys.KeyManager
|
||||||
lastFrameReceivedAt time.Time
|
lastFrameReceivedAt time.Time
|
||||||
latestFrame *protobufs.ClockFrame
|
|
||||||
|
|
||||||
frameChan chan uint64
|
frameChan chan *protobufs.ClockFrame
|
||||||
executionEngines map[string]execution.ExecutionEngine
|
executionEngines map[string]execution.ExecutionEngine
|
||||||
filter []byte
|
filter []byte
|
||||||
input []byte
|
input []byte
|
||||||
@ -79,20 +78,29 @@ func NewMasterClockConsensusEngine(
|
|||||||
}
|
}
|
||||||
|
|
||||||
e := &MasterClockConsensusEngine{
|
e := &MasterClockConsensusEngine{
|
||||||
frame: 0,
|
frame: nil,
|
||||||
difficulty: 10000,
|
difficulty: 10000,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
state: consensus.EngineStateStopped,
|
state: consensus.EngineStateStopped,
|
||||||
keyManager: keyManager,
|
keyManager: keyManager,
|
||||||
pubSub: pubSub,
|
pubSub: pubSub,
|
||||||
frameChan: make(chan uint64),
|
|
||||||
executionEngines: map[string]execution.ExecutionEngine{},
|
executionEngines: map[string]execution.ExecutionEngine{},
|
||||||
|
frameChan: make(chan *protobufs.ClockFrame),
|
||||||
input: seed,
|
input: seed,
|
||||||
lastFrameReceivedAt: time.Time{},
|
lastFrameReceivedAt: time.Time{},
|
||||||
syncingStatus: SyncStatusNotSyncing,
|
syncingStatus: SyncStatusNotSyncing,
|
||||||
clockStore: clockStore,
|
clockStore: clockStore,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
latestFrame, err := e.clockStore.GetLatestMasterClockFrame(e.filter)
|
||||||
|
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if latestFrame != nil {
|
||||||
|
e.frame = latestFrame
|
||||||
|
}
|
||||||
|
|
||||||
if e.filter, err = hex.DecodeString(engineConfig.Filter); err != nil {
|
if e.filter, err = hex.DecodeString(engineConfig.Filter); err != nil {
|
||||||
panic(errors.Wrap(err, "could not parse filter value"))
|
panic(errors.Wrap(err, "could not parse filter value"))
|
||||||
}
|
}
|
||||||
@ -103,7 +111,7 @@ func NewMasterClockConsensusEngine(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) Start() <-chan error {
|
func (e *MasterClockConsensusEngine) Start() <-chan error {
|
||||||
e.logger.Info("starting consensus engine")
|
e.logger.Info("starting master consensus engine")
|
||||||
e.state = consensus.EngineStateStarting
|
e.state = consensus.EngineStateStarting
|
||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
@ -112,7 +120,7 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
|||||||
|
|
||||||
latestFrame, err := e.clockStore.GetLatestMasterClockFrame(e.filter)
|
latestFrame, err := e.clockStore.GetLatestMasterClockFrame(e.filter)
|
||||||
if err != nil && errors.Is(err, store.ErrNotFound) {
|
if err != nil && errors.Is(err, store.ErrNotFound) {
|
||||||
latestFrame = e.createGenesisFrame()
|
latestFrame = e.CreateGenesisFrame()
|
||||||
txn, err := e.clockStore.NewTransaction()
|
txn, err := e.clockStore.NewTransaction()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -131,11 +139,111 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
|||||||
e.setFrame(latestFrame)
|
e.setFrame(latestFrame)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e.buildHistoricFrameCache(latestFrame)
|
||||||
|
|
||||||
|
e.logger.Info("subscribing to pubsub messages")
|
||||||
|
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
|
||||||
|
e.pubSub.Subscribe(e.pubSub.GetPeerID(), e.handleSync, true)
|
||||||
|
|
||||||
|
e.state = consensus.EngineStateCollecting
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
e.logger.Info(
|
||||||
|
"peers in store",
|
||||||
|
zap.Int("peer_store_count", e.pubSub.GetPeerstoreCount()),
|
||||||
|
zap.Int("network_peer_count", e.pubSub.GetNetworkPeersCount()),
|
||||||
|
)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for e.state < consensus.EngineStateStopping {
|
||||||
|
var err error
|
||||||
|
switch e.state {
|
||||||
|
case consensus.EngineStateCollecting:
|
||||||
|
currentFrame := latestFrame
|
||||||
|
if latestFrame, err = e.collect(latestFrame); err != nil {
|
||||||
|
e.logger.Error("could not collect", zap.Error(err))
|
||||||
|
latestFrame = currentFrame
|
||||||
|
}
|
||||||
|
case consensus.EngineStateProving:
|
||||||
|
currentFrame := latestFrame
|
||||||
|
if latestFrame, err = e.prove(latestFrame); err != nil {
|
||||||
|
e.logger.Error("could not prove", zap.Error(err))
|
||||||
|
latestFrame = currentFrame
|
||||||
|
}
|
||||||
|
case consensus.EngineStatePublishing:
|
||||||
|
if err = e.publishProof(latestFrame); err != nil {
|
||||||
|
e.logger.Error("could not publish", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errChan <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
return errChan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MasterClockConsensusEngine) Stop(force bool) <-chan error {
|
||||||
|
e.logger.Info("stopping consensus engine")
|
||||||
|
e.state = consensus.EngineStateStopping
|
||||||
|
errChan := make(chan error)
|
||||||
|
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(len(e.executionEngines))
|
||||||
|
for name := range e.executionEngines {
|
||||||
|
name := name
|
||||||
|
go func(name string) {
|
||||||
|
err := <-e.UnregisterExecutor(name, e.frame.FrameNumber, force)
|
||||||
|
if err != nil {
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.logger.Info("waiting for execution engines to stop")
|
||||||
|
wg.Wait()
|
||||||
|
e.logger.Info("execution engines stopped")
|
||||||
|
|
||||||
|
e.state = consensus.EngineStateStopped
|
||||||
|
go func() {
|
||||||
|
errChan <- nil
|
||||||
|
}()
|
||||||
|
return errChan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MasterClockConsensusEngine) GetDifficulty() uint32 {
|
||||||
|
return e.difficulty
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MasterClockConsensusEngine) GetFrame() *protobufs.ClockFrame {
|
||||||
|
return e.frame
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MasterClockConsensusEngine) GetState() consensus.EngineState {
|
||||||
|
return e.state
|
||||||
|
}
|
||||||
|
|
||||||
|
func (
|
||||||
|
e *MasterClockConsensusEngine,
|
||||||
|
) GetFrameChannel() <-chan *protobufs.ClockFrame {
|
||||||
|
return e.frameChan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MasterClockConsensusEngine) buildHistoricFrameCache(
|
||||||
|
latestFrame *protobufs.ClockFrame,
|
||||||
|
) {
|
||||||
e.historicFrames = []*protobufs.ClockFrame{}
|
e.historicFrames = []*protobufs.ClockFrame{}
|
||||||
|
|
||||||
if latestFrame.FrameNumber != 0 {
|
if latestFrame.FrameNumber != 0 {
|
||||||
min := uint64(0)
|
min := uint64(0)
|
||||||
if latestFrame.FrameNumber-255 > min {
|
if latestFrame.FrameNumber-255 > min && latestFrame.FrameNumber > 255 {
|
||||||
min = latestFrame.FrameNumber - 255
|
min = latestFrame.FrameNumber - 255
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,98 +271,4 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
e.historicFrames = append(e.historicFrames, latestFrame)
|
e.historicFrames = append(e.historicFrames, latestFrame)
|
||||||
|
|
||||||
e.logger.Info("subscribing to pubsub messages")
|
|
||||||
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
|
|
||||||
e.pubSub.Subscribe(e.pubSub.GetPeerID(), e.handleSync, true)
|
|
||||||
|
|
||||||
e.state = consensus.EngineStateCollecting
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
e.logger.Info(
|
|
||||||
"peers in store",
|
|
||||||
zap.Int("peer_store_count", e.pubSub.GetPeerstoreCount()),
|
|
||||||
zap.Int("network_peer_count", e.pubSub.GetNetworkPeersCount()),
|
|
||||||
)
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for e.state < consensus.EngineStateStopping {
|
|
||||||
var err error
|
|
||||||
switch e.state {
|
|
||||||
case consensus.EngineStateCollecting:
|
|
||||||
if latestFrame, err = e.collect(latestFrame); err != nil {
|
|
||||||
e.logger.Error("could not collect", zap.Error(err))
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
case consensus.EngineStateProving:
|
|
||||||
if latestFrame, err = e.prove(latestFrame); err != nil {
|
|
||||||
e.logger.Error("could not prove", zap.Error(err))
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
case consensus.EngineStatePublishing:
|
|
||||||
if err = e.publishProof(latestFrame); err != nil {
|
|
||||||
e.logger.Error("could not publish", zap.Error(err))
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
errChan <- nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
return errChan
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) Stop(force bool) <-chan error {
|
|
||||||
e.logger.Info("stopping consensus engine")
|
|
||||||
e.state = consensus.EngineStateStopping
|
|
||||||
errChan := make(chan error)
|
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
wg.Add(len(e.executionEngines))
|
|
||||||
for name := range e.executionEngines {
|
|
||||||
name := name
|
|
||||||
go func(name string) {
|
|
||||||
err := <-e.UnregisterExecutor(name, e.frame, force)
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.logger.Info("waiting for execution engines to stop")
|
|
||||||
wg.Wait()
|
|
||||||
e.logger.Info("execution engines stopped")
|
|
||||||
|
|
||||||
e.state = consensus.EngineStateStopped
|
|
||||||
|
|
||||||
e.engineMx.Lock()
|
|
||||||
defer e.engineMx.Unlock()
|
|
||||||
go func() {
|
|
||||||
errChan <- nil
|
|
||||||
}()
|
|
||||||
return errChan
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) GetDifficulty() uint32 {
|
|
||||||
return e.difficulty
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) GetFrame() uint64 {
|
|
||||||
return e.frame
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) GetState() consensus.EngineState {
|
|
||||||
return e.state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) GetFrameChannel() <-chan uint64 {
|
|
||||||
return e.frameChan
|
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
@ -30,46 +29,6 @@ func (e *MasterClockConsensusEngine) handleSync(message *pb.Message) error {
|
|||||||
return errors.Wrap(err, "handle sync")
|
return errors.Wrap(err, "handle sync")
|
||||||
}
|
}
|
||||||
|
|
||||||
eg := errgroup.Group{}
|
|
||||||
eg.SetLimit(len(e.executionEngines))
|
|
||||||
|
|
||||||
for name := range e.executionEngines {
|
|
||||||
name := name
|
|
||||||
eg.Go(func() error {
|
|
||||||
messages, err := e.executionEngines[name].ProcessMessage(
|
|
||||||
msg.Address,
|
|
||||||
msg,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not process message for engine",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("engine_name", name),
|
|
||||||
)
|
|
||||||
return errors.Wrap(err, "handle message")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, m := range messages {
|
|
||||||
m := m
|
|
||||||
if err := e.publishMessage(e.filter, m); err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not publish message for engine",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("engine_name", name),
|
|
||||||
)
|
|
||||||
return errors.Wrap(err, "handle message")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
|
||||||
e.logger.Error("rejecting invalid message", zap.Error(err))
|
|
||||||
return errors.Wrap(err, "handle sync")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch any.TypeUrl {
|
switch any.TypeUrl {
|
||||||
case protobufs.ClockFramesResponseType:
|
case protobufs.ClockFramesResponseType:
|
||||||
if err := e.handleClockFramesResponse(
|
if err := e.handleClockFramesResponse(
|
||||||
@ -149,7 +108,7 @@ func (e *MasterClockConsensusEngine) handleClockFramesResponse(
|
|||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
zap.Uint64("frame_number", frame.FrameNumber),
|
||||||
)
|
)
|
||||||
|
|
||||||
if e.frame < frame.FrameNumber {
|
if e.frame.FrameNumber < frame.FrameNumber {
|
||||||
if err := e.enqueueSeenFrame(frame); err != nil {
|
if err := e.enqueueSeenFrame(frame); err != nil {
|
||||||
e.logger.Error("could not enqueue seen clock frame", zap.Error(err))
|
e.logger.Error("could not enqueue seen clock frame", zap.Error(err))
|
||||||
return errors.Wrap(err, "handle clock frame response")
|
return errors.Wrap(err, "handle clock frame response")
|
||||||
@ -186,7 +145,7 @@ func (e *MasterClockConsensusEngine) handleClockFramesRequest(
|
|||||||
|
|
||||||
from := request.FromFrameNumber
|
from := request.FromFrameNumber
|
||||||
|
|
||||||
if e.frame < from || len(e.historicFrames) == 0 {
|
if e.frame.FrameNumber < from || len(e.historicFrames) == 0 {
|
||||||
e.logger.Debug(
|
e.logger.Debug(
|
||||||
"peer asked for undiscovered frame",
|
"peer asked for undiscovered frame",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
@ -210,8 +169,8 @@ func (e *MasterClockConsensusEngine) handleClockFramesRequest(
|
|||||||
to = request.FromFrameNumber + 127
|
to = request.FromFrameNumber + 127
|
||||||
}
|
}
|
||||||
|
|
||||||
if int(to) > int(e.latestFrame.FrameNumber) {
|
if int(to) > int(e.frame.FrameNumber) {
|
||||||
to = e.latestFrame.FrameNumber
|
to = e.frame.FrameNumber
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Debug(
|
e.logger.Debug(
|
||||||
|
@ -75,6 +75,221 @@ var CeremonyPotPubKeys []curves.PairingPoint
|
|||||||
var CeremonySignatories []curves.Point
|
var CeremonySignatories []curves.Point
|
||||||
var FFTBLS48581 map[uint64][]curves.PairingPoint = make(map[uint64][]curves.PairingPoint)
|
var FFTBLS48581 map[uint64][]curves.PairingPoint = make(map[uint64][]curves.PairingPoint)
|
||||||
|
|
||||||
|
func TestInit(file string) {
|
||||||
|
// start with phase 1 ceremony:
|
||||||
|
csBytes, err := os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bls48581.Init()
|
||||||
|
|
||||||
|
cs := &CeremonyState{}
|
||||||
|
if err := json.Unmarshal(csBytes, cs); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
g1s := make([]curves.PairingPoint, 1024)
|
||||||
|
g2s := make([]curves.PairingPoint, 257)
|
||||||
|
g1ffts := make([]curves.PairingPoint, 1024)
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(1024)
|
||||||
|
|
||||||
|
for i := 0; i < 1024; i++ {
|
||||||
|
i := i
|
||||||
|
go func() {
|
||||||
|
b, err := hex.DecodeString(cs.PowersOfTau.G1Affines[i][2:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
g1, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(b)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
g1s[i] = g1.(curves.PairingPoint)
|
||||||
|
|
||||||
|
f, err := hex.DecodeString(cs.PowersOfTau.G1FFT[i][2:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
g1fft, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(f)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
g1ffts[i] = g1fft.(curves.PairingPoint)
|
||||||
|
|
||||||
|
if i < 257 {
|
||||||
|
b, err := hex.DecodeString(cs.PowersOfTau.G2Affines[i][2:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
g2, err := curves.BLS48581G2().NewGeneratorPoint().FromAffineCompressed(
|
||||||
|
b,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
g2s[i] = g2.(curves.PairingPoint)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
wg.Add(len(cs.Witness.RunningProducts))
|
||||||
|
CeremonyRunningProducts = make([]curves.PairingPoint, len(cs.Witness.RunningProducts))
|
||||||
|
for i, s := range cs.Witness.RunningProducts {
|
||||||
|
i, s := i, s
|
||||||
|
go func() {
|
||||||
|
b, err := hex.DecodeString(s[2:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
g1, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(b)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
CeremonyRunningProducts[i] = g1.(curves.PairingPoint)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
wg.Add(len(cs.Witness.PotPubKeys))
|
||||||
|
CeremonyPotPubKeys = make([]curves.PairingPoint, len(cs.Witness.PotPubKeys))
|
||||||
|
for i, s := range cs.Witness.PotPubKeys {
|
||||||
|
i, s := i, s
|
||||||
|
go func() {
|
||||||
|
b, err := hex.DecodeString(s[2:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
g2, err := curves.BLS48581G2().NewGeneratorPoint().FromAffineCompressed(b)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
CeremonyPotPubKeys[i] = g2.(curves.PairingPoint)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
wg.Add(len(cs.VoucherPubKeys))
|
||||||
|
CeremonySignatories = make([]curves.Point, len(cs.VoucherPubKeys))
|
||||||
|
for i, s := range cs.VoucherPubKeys {
|
||||||
|
i, s := i, s
|
||||||
|
go func() {
|
||||||
|
b, err := hex.DecodeString(s[2:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
CeremonySignatories[i], err = curves.ED448().Point.FromAffineCompressed(b)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
CeremonyBLS48581G1 = g1s
|
||||||
|
CeremonyBLS48581G2 = g2s
|
||||||
|
|
||||||
|
// Post-ceremony, precompute everything and put it in the finalized ceremony
|
||||||
|
// state
|
||||||
|
modulus := make([]byte, 73)
|
||||||
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
|
q := new(big.Int).SetBytes(modulus)
|
||||||
|
sizes := []int64{16, 128, 1024}
|
||||||
|
|
||||||
|
wg.Add(len(sizes))
|
||||||
|
root := make([]curves.PairingScalar, 3)
|
||||||
|
roots := make([][]curves.PairingScalar, 3)
|
||||||
|
reverseRoots := make([][]curves.PairingScalar, 3)
|
||||||
|
ffts := make([][]curves.PairingPoint, 3)
|
||||||
|
|
||||||
|
for idx, i := range sizes {
|
||||||
|
i := i
|
||||||
|
idx := idx
|
||||||
|
go func() {
|
||||||
|
exp := new(big.Int).Quo(
|
||||||
|
new(big.Int).Sub(q, big.NewInt(1)),
|
||||||
|
big.NewInt(i),
|
||||||
|
)
|
||||||
|
rootOfUnity := new(big.Int).Exp(big.NewInt(int64(37)), exp, q)
|
||||||
|
roots[idx] = make([]curves.PairingScalar, i+1)
|
||||||
|
reverseRoots[idx] = make([]curves.PairingScalar, i+1)
|
||||||
|
wg2 := sync.WaitGroup{}
|
||||||
|
wg2.Add(int(i))
|
||||||
|
for j := int64(0); j < i; j++ {
|
||||||
|
j := j
|
||||||
|
go func() {
|
||||||
|
rev := big.NewInt(int64(j))
|
||||||
|
r := new(big.Int).Exp(
|
||||||
|
rootOfUnity,
|
||||||
|
rev,
|
||||||
|
q,
|
||||||
|
)
|
||||||
|
scalar, _ := (&curves.ScalarBls48581{}).SetBigInt(r)
|
||||||
|
|
||||||
|
if rev.Cmp(big.NewInt(1)) == 0 {
|
||||||
|
root[idx] = scalar.(curves.PairingScalar)
|
||||||
|
}
|
||||||
|
|
||||||
|
roots[idx][j] = scalar.(curves.PairingScalar)
|
||||||
|
reverseRoots[idx][i-j] = roots[idx][j]
|
||||||
|
wg2.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg2.Wait()
|
||||||
|
roots[idx][i] = roots[idx][0]
|
||||||
|
reverseRoots[idx][0] = reverseRoots[idx][i]
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
wg.Add(len(sizes))
|
||||||
|
for i := range root {
|
||||||
|
i := i
|
||||||
|
RootOfUnityBLS48581[uint64(sizes[i])] = root[i]
|
||||||
|
RootsOfUnityBLS48581[uint64(sizes[i])] = roots[i]
|
||||||
|
ReverseRootsOfUnityBLS48581[uint64(sizes[i])] = reverseRoots[i]
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// We precomputed 65536, others are cheap and will be fully precomputed
|
||||||
|
// post-ceremony
|
||||||
|
if sizes[i] < 65536 {
|
||||||
|
fftG1, err := FFTG1(
|
||||||
|
CeremonyBLS48581G1[:sizes[i]],
|
||||||
|
*curves.BLS48581(
|
||||||
|
curves.BLS48581G1().NewGeneratorPoint(),
|
||||||
|
),
|
||||||
|
uint64(sizes[i]),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ffts[i] = fftG1
|
||||||
|
} else {
|
||||||
|
ffts[i] = g1ffts
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
for i := range root {
|
||||||
|
FFTBLS48581[uint64(sizes[i])] = ffts[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func Init() {
|
func Init() {
|
||||||
// start with phase 1 ceremony:
|
// start with phase 1 ceremony:
|
||||||
csBytes, err := os.ReadFile("./ceremony.json")
|
csBytes, err := os.ReadFile("./ceremony.json")
|
||||||
@ -202,7 +417,7 @@ func Init() {
|
|||||||
// Post-ceremony, precompute everything and put it in the finalized ceremony
|
// Post-ceremony, precompute everything and put it in the finalized ceremony
|
||||||
// state
|
// state
|
||||||
modulus := make([]byte, 73)
|
modulus := make([]byte, 73)
|
||||||
bls48581.NewBIGints(bls48581.CURVE_Order).ToBytes(modulus)
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
q := new(big.Int).SetBytes(modulus)
|
q := new(big.Int).SetBytes(modulus)
|
||||||
sizes := []int64{16, 128, 1024, 65536}
|
sizes := []int64{16, 128, 1024, 65536}
|
||||||
|
|
||||||
@ -310,7 +525,7 @@ func NewKZGProver(
|
|||||||
|
|
||||||
func DefaultKZGProver() *KZGProver {
|
func DefaultKZGProver() *KZGProver {
|
||||||
modulus := make([]byte, 73)
|
modulus := make([]byte, 73)
|
||||||
bls48581.NewBIGints(bls48581.CURVE_Order).ToBytes(modulus)
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
q := new(big.Int).SetBytes(modulus)
|
q := new(big.Int).SetBytes(modulus)
|
||||||
return NewKZGProver(
|
return NewKZGProver(
|
||||||
curves.BLS48581(curves.BLS48581G1().Point),
|
curves.BLS48581(curves.BLS48581G1().Point),
|
||||||
@ -426,7 +641,7 @@ func (p *KZGProver) EvaluateLagrangeForm(
|
|||||||
|
|
||||||
xBI := x.BigInt()
|
xBI := x.BigInt()
|
||||||
modulus := make([]byte, 73)
|
modulus := make([]byte, 73)
|
||||||
bls48581.NewBIGints(bls48581.CURVE_Order).ToBytes(modulus)
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
q := new(big.Int).SetBytes(modulus)
|
q := new(big.Int).SetBytes(modulus)
|
||||||
xBI.Exp(xBI, width.BigInt(), q)
|
xBI.Exp(xBI, width.BigInt(), q)
|
||||||
xBI.Sub(xBI, big.NewInt(1))
|
xBI.Sub(xBI, big.NewInt(1))
|
||||||
|
@ -81,7 +81,7 @@ func TestMain(m *testing.M) {
|
|||||||
// Post-ceremony, precompute everything and put it in the finalized ceremony
|
// Post-ceremony, precompute everything and put it in the finalized ceremony
|
||||||
// state
|
// state
|
||||||
modulus := make([]byte, 73)
|
modulus := make([]byte, 73)
|
||||||
bls48581.NewBIGints(bls48581.CURVE_Order).ToBytes(modulus)
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
q := new(big.Int).SetBytes(modulus)
|
q := new(big.Int).SetBytes(modulus)
|
||||||
sizes := []int64{16}
|
sizes := []int64{16}
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
func TestKzgBytesToPoly(t *testing.T) {
|
func TestKzgBytesToPoly(t *testing.T) {
|
||||||
modulus := make([]byte, 73)
|
modulus := make([]byte, 73)
|
||||||
bls48581.NewBIGints(bls48581.CURVE_Order).ToBytes(modulus)
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
q := new(big.Int).SetBytes(modulus)
|
q := new(big.Int).SetBytes(modulus)
|
||||||
p := crypto.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
|
p := crypto.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
|
||||||
|
|
||||||
@ -215,7 +215,7 @@ func TestKzgBytesToPoly(t *testing.T) {
|
|||||||
|
|
||||||
func TestPolynomialCommitment(t *testing.T) {
|
func TestPolynomialCommitment(t *testing.T) {
|
||||||
modulus := make([]byte, 73)
|
modulus := make([]byte, 73)
|
||||||
bls48581.NewBIGints(bls48581.CURVE_Order).ToBytes(modulus)
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
q := new(big.Int).SetBytes(modulus)
|
q := new(big.Int).SetBytes(modulus)
|
||||||
p := crypto.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
|
p := crypto.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
|
||||||
|
|
||||||
@ -263,7 +263,7 @@ func TestPolynomialCommitment(t *testing.T) {
|
|||||||
|
|
||||||
func TestKZGProof(t *testing.T) {
|
func TestKZGProof(t *testing.T) {
|
||||||
modulus := make([]byte, 73)
|
modulus := make([]byte, 73)
|
||||||
bls48581.NewBIGints(bls48581.CURVE_Order).ToBytes(modulus)
|
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
|
||||||
q := new(big.Int).SetBytes(modulus)
|
q := new(big.Int).SetBytes(modulus)
|
||||||
p := crypto.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
|
p := crypto.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
|
||||||
|
|
||||||
@ -290,27 +290,51 @@ func TestKZGProof(t *testing.T) {
|
|||||||
curves.BLS48581G1().NewGeneratorPoint(),
|
curves.BLS48581G1().NewGeneratorPoint(),
|
||||||
),
|
),
|
||||||
16,
|
16,
|
||||||
false,
|
true,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
commit, err := p.Commit(evalPoly)
|
commit, err := p.Commit(poly)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
z, err := (&curves.ScalarBls48581{}).SetBigInt(big.NewInt(2))
|
z := crypto.RootsOfUnityBLS48581[16][2]
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
checky := poly[len(poly)-1]
|
checky := evalPoly[len(poly)-1]
|
||||||
for i := len(poly) - 2; i >= 0; i-- {
|
for i := len(evalPoly) - 2; i >= 0; i-- {
|
||||||
checky = checky.Mul(z).Add(poly[i]).(curves.PairingScalar)
|
checky = checky.Mul(z).Add(evalPoly[i]).(curves.PairingScalar)
|
||||||
}
|
}
|
||||||
y, err := p.EvaluateLagrangeForm(evalPoly, z.(curves.PairingScalar), 16, 0)
|
fmt.Printf("%+x\n", checky.Bytes())
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, y.Cmp(checky), 0)
|
|
||||||
|
|
||||||
proof, err := p.Prove(evalPoly, commit, z.(curves.PairingScalar))
|
divisors := make([]curves.PairingScalar, 2)
|
||||||
|
divisors[0] = (&curves.ScalarBls48581{}).Zero().Sub(z).(*curves.ScalarBls48581)
|
||||||
|
divisors[1] = (&curves.ScalarBls48581{}).One().(*curves.ScalarBls48581)
|
||||||
|
|
||||||
|
a := make([]curves.PairingScalar, len(evalPoly))
|
||||||
|
for i := 0; i < len(a); i++ {
|
||||||
|
a[i] = evalPoly[i].Clone().(*curves.ScalarBls48581)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adapted from Feist's amortized proofs:
|
||||||
|
aPos := len(a) - 1
|
||||||
|
bPos := len(divisors) - 1
|
||||||
|
diff := aPos - bPos
|
||||||
|
out := make([]curves.PairingScalar, diff+1, diff+1)
|
||||||
|
for diff >= 0 {
|
||||||
|
out[diff] = a[aPos].Div(divisors[bPos]).(*curves.ScalarBls48581)
|
||||||
|
for i := bPos; i >= 0; i-- {
|
||||||
|
a[diff+i] = a[diff+i].Sub(
|
||||||
|
out[diff].Mul(divisors[i]),
|
||||||
|
).(*curves.ScalarBls48581)
|
||||||
|
}
|
||||||
|
aPos -= 1
|
||||||
|
diff -= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
proof, err := p.PointLinearCombination(crypto.CeremonyBLS48581G1[:15], out)
|
||||||
|
// proof, err := p.Prove(evalPoly, commit, z.(curves.PairingScalar))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, p.Verify(commit, z.(curves.PairingScalar), y, proof))
|
require.True(t, p.Verify(commit, z, checky, proof))
|
||||||
|
|
||||||
commitments, err := p.CommitAggregate(
|
commitments, err := p.CommitAggregate(
|
||||||
[][]curves.PairingScalar{evalPoly},
|
[][]curves.PairingScalar{evalPoly},
|
||||||
|
@ -14,8 +14,6 @@ var ErrInvalidStateTransition = errors.New("invalid state transition")
|
|||||||
|
|
||||||
type CeremonyApplicationState int
|
type CeremonyApplicationState int
|
||||||
|
|
||||||
const V118_CUTOFF = uint64(45000)
|
|
||||||
|
|
||||||
var CEREMONY_ADDRESS = []byte{
|
var CEREMONY_ADDRESS = []byte{
|
||||||
// SHA3-256("q_kzg_ceremony")
|
// SHA3-256("q_kzg_ceremony")
|
||||||
0x34, 0x00, 0x1b, 0xe7, 0x43, 0x2c, 0x2e, 0x66,
|
0x34, 0x00, 0x1b, 0xe7, 0x43, 0x2c, 0x2e, 0x66,
|
||||||
@ -50,7 +48,7 @@ type CeremonyApplication struct {
|
|||||||
StateCount uint64
|
StateCount uint64
|
||||||
RoundCount uint64
|
RoundCount uint64
|
||||||
LobbyState CeremonyApplicationState
|
LobbyState CeremonyApplicationState
|
||||||
ActiveParticipants []*protobufs.Ed448PublicKey
|
ActiveParticipants []*protobufs.CeremonyLobbyJoin
|
||||||
NextRoundPreferredParticipants []*protobufs.Ed448PublicKey
|
NextRoundPreferredParticipants []*protobufs.Ed448PublicKey
|
||||||
LatestSeenProverAttestations []*protobufs.CeremonySeenProverAttestation
|
LatestSeenProverAttestations []*protobufs.CeremonySeenProverAttestation
|
||||||
DroppedParticipantAttestations []*protobufs.CeremonyDroppedProverAttestation
|
DroppedParticipantAttestations []*protobufs.CeremonyDroppedProverAttestation
|
||||||
@ -82,8 +80,22 @@ func (a *CeremonyApplication) Equals(b *CeremonyApplication) bool {
|
|||||||
|
|
||||||
for i := range a.ActiveParticipants {
|
for i := range a.ActiveParticipants {
|
||||||
if !bytes.Equal(
|
if !bytes.Equal(
|
||||||
a.ActiveParticipants[i].KeyValue,
|
a.ActiveParticipants[i].PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
b.ActiveParticipants[i].KeyValue,
|
b.ActiveParticipants[i].PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(
|
||||||
|
a.ActiveParticipants[i].IdentityKey.KeyValue,
|
||||||
|
b.ActiveParticipants[i].IdentityKey.KeyValue,
|
||||||
|
) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(
|
||||||
|
a.ActiveParticipants[i].SignedPreKey.KeyValue,
|
||||||
|
b.ActiveParticipants[i].SignedPreKey.KeyValue,
|
||||||
) {
|
) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -856,7 +868,7 @@ func (a *CeremonyApplication) ApplyTransition(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentFrameNumber > V118_CUTOFF && a.StateCount > 100 {
|
if a.StateCount > 10 {
|
||||||
shouldReset = true
|
shouldReset = true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -866,17 +878,19 @@ func (a *CeremonyApplication) ApplyTransition(
|
|||||||
a.RoundCount = 0
|
a.RoundCount = 0
|
||||||
for _, p := range a.ActiveParticipants {
|
for _, p := range a.ActiveParticipants {
|
||||||
p := p
|
p := p
|
||||||
if _, ok := droppedProversMap[string(p.KeyValue)]; !ok {
|
if _, ok := droppedProversMap[string(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
)]; !ok {
|
||||||
a.NextRoundPreferredParticipants = append(
|
a.NextRoundPreferredParticipants = append(
|
||||||
append(
|
append(
|
||||||
[]*protobufs.Ed448PublicKey{},
|
[]*protobufs.Ed448PublicKey{},
|
||||||
p,
|
p.PublicKeySignatureEd448.PublicKey,
|
||||||
),
|
),
|
||||||
a.NextRoundPreferredParticipants...,
|
a.NextRoundPreferredParticipants...,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.ActiveParticipants = []*protobufs.Ed448PublicKey{}
|
a.ActiveParticipants = []*protobufs.CeremonyLobbyJoin{}
|
||||||
a.DroppedParticipantAttestations =
|
a.DroppedParticipantAttestations =
|
||||||
[]*protobufs.CeremonyDroppedProverAttestation{}
|
[]*protobufs.CeremonyDroppedProverAttestation{}
|
||||||
a.LatestSeenProverAttestations =
|
a.LatestSeenProverAttestations =
|
||||||
@ -958,7 +972,7 @@ func (a *CeremonyApplication) ApplyTransition(
|
|||||||
}
|
}
|
||||||
|
|
||||||
a.LobbyState = CEREMONY_APPLICATION_STATE_VALIDATING
|
a.LobbyState = CEREMONY_APPLICATION_STATE_VALIDATING
|
||||||
a.ActiveParticipants = []*protobufs.Ed448PublicKey{}
|
a.ActiveParticipants = []*protobufs.CeremonyLobbyJoin{}
|
||||||
a.DroppedParticipantAttestations =
|
a.DroppedParticipantAttestations =
|
||||||
[]*protobufs.CeremonyDroppedProverAttestation{}
|
[]*protobufs.CeremonyDroppedProverAttestation{}
|
||||||
a.LatestSeenProverAttestations =
|
a.LatestSeenProverAttestations =
|
||||||
@ -984,7 +998,7 @@ func (a *CeremonyApplication) ApplyTransition(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentFrameNumber > V118_CUTOFF && a.StateCount > 100 {
|
if a.StateCount > 10 {
|
||||||
shouldReset = true
|
shouldReset = true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -994,17 +1008,19 @@ func (a *CeremonyApplication) ApplyTransition(
|
|||||||
a.RoundCount = 0
|
a.RoundCount = 0
|
||||||
for _, p := range a.ActiveParticipants {
|
for _, p := range a.ActiveParticipants {
|
||||||
p := p
|
p := p
|
||||||
if _, ok := droppedProversMap[string(p.KeyValue)]; !ok {
|
if _, ok := droppedProversMap[string(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
)]; !ok {
|
||||||
a.NextRoundPreferredParticipants = append(
|
a.NextRoundPreferredParticipants = append(
|
||||||
append(
|
append(
|
||||||
[]*protobufs.Ed448PublicKey{},
|
[]*protobufs.Ed448PublicKey{},
|
||||||
p,
|
p.PublicKeySignatureEd448.PublicKey,
|
||||||
),
|
),
|
||||||
a.NextRoundPreferredParticipants...,
|
a.NextRoundPreferredParticipants...,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.ActiveParticipants = []*protobufs.Ed448PublicKey{}
|
a.ActiveParticipants = []*protobufs.CeremonyLobbyJoin{}
|
||||||
a.DroppedParticipantAttestations =
|
a.DroppedParticipantAttestations =
|
||||||
[]*protobufs.CeremonyDroppedProverAttestation{}
|
[]*protobufs.CeremonyDroppedProverAttestation{}
|
||||||
a.LatestSeenProverAttestations =
|
a.LatestSeenProverAttestations =
|
||||||
@ -1036,7 +1052,25 @@ func (a *CeremonyApplication) ApplyTransition(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.UpdatedTranscript == nil {
|
shouldReset := false
|
||||||
|
if a.StateCount > 100 {
|
||||||
|
shouldReset = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if shouldReset {
|
||||||
|
a.LobbyState = CEREMONY_APPLICATION_STATE_OPEN
|
||||||
|
a.StateCount = 0
|
||||||
|
a.RoundCount = 0
|
||||||
|
a.ActiveParticipants = []*protobufs.CeremonyLobbyJoin{}
|
||||||
|
a.DroppedParticipantAttestations =
|
||||||
|
[]*protobufs.CeremonyDroppedProverAttestation{}
|
||||||
|
a.LatestSeenProverAttestations =
|
||||||
|
[]*protobufs.CeremonySeenProverAttestation{}
|
||||||
|
a.TranscriptRoundAdvanceCommits =
|
||||||
|
[]*protobufs.CeremonyAdvanceRound{}
|
||||||
|
a.TranscriptShares =
|
||||||
|
[]*protobufs.CeremonyTranscriptShare{}
|
||||||
|
} else if a.UpdatedTranscript == nil {
|
||||||
rewardMultiplier := uint64(1)
|
rewardMultiplier := uint64(1)
|
||||||
for i := 0; i < len(a.FinalCommits)-1; i++ {
|
for i := 0; i < len(a.FinalCommits)-1; i++ {
|
||||||
rewardMultiplier = rewardMultiplier << 1
|
rewardMultiplier = rewardMultiplier << 1
|
||||||
@ -1064,7 +1098,7 @@ func (a *CeremonyApplication) ApplyTransition(
|
|||||||
a.LobbyState = CEREMONY_APPLICATION_STATE_OPEN
|
a.LobbyState = CEREMONY_APPLICATION_STATE_OPEN
|
||||||
a.StateCount = 0
|
a.StateCount = 0
|
||||||
a.RoundCount = 0
|
a.RoundCount = 0
|
||||||
a.ActiveParticipants = []*protobufs.Ed448PublicKey{}
|
a.ActiveParticipants = []*protobufs.CeremonyLobbyJoin{}
|
||||||
a.DroppedParticipantAttestations =
|
a.DroppedParticipantAttestations =
|
||||||
[]*protobufs.CeremonyDroppedProverAttestation{}
|
[]*protobufs.CeremonyDroppedProverAttestation{}
|
||||||
a.LatestSeenProverAttestations =
|
a.LatestSeenProverAttestations =
|
||||||
|
@ -22,7 +22,10 @@ func (a *CeremonyApplication) applySeenProverAttestation(
|
|||||||
|
|
||||||
inParticipantList := false
|
inParticipantList := false
|
||||||
for _, p := range a.ActiveParticipants {
|
for _, p := range a.ActiveParticipants {
|
||||||
if bytes.Equal(p.KeyValue, seenProverAttestation.SeenProverKey.KeyValue) {
|
if bytes.Equal(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
seenProverAttestation.SeenProverKey.KeyValue,
|
||||||
|
) {
|
||||||
inParticipantList = true
|
inParticipantList = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -93,7 +96,7 @@ func (a *CeremonyApplication) applyDroppedProverAttestation(
|
|||||||
inParticipantList := false
|
inParticipantList := false
|
||||||
for _, p := range a.ActiveParticipants {
|
for _, p := range a.ActiveParticipants {
|
||||||
if bytes.Equal(
|
if bytes.Equal(
|
||||||
p.KeyValue,
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
droppedProverAttestation.DroppedProverKey.KeyValue,
|
droppedProverAttestation.DroppedProverKey.KeyValue,
|
||||||
) {
|
) {
|
||||||
inParticipantList = true
|
inParticipantList = true
|
||||||
@ -189,7 +192,7 @@ func (a *CeremonyApplication) applyTranscriptCommit(
|
|||||||
inParticipantList := false
|
inParticipantList := false
|
||||||
for _, p := range a.ActiveParticipants {
|
for _, p := range a.ActiveParticipants {
|
||||||
if bytes.Equal(
|
if bytes.Equal(
|
||||||
p.KeyValue,
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
transcriptCommit.ProverSignature.PublicKey.KeyValue,
|
transcriptCommit.ProverSignature.PublicKey.KeyValue,
|
||||||
) {
|
) {
|
||||||
inParticipantList = true
|
inParticipantList = true
|
||||||
|
@ -89,11 +89,11 @@ func (a *CeremonyApplication) finalizeParticipantSet() error {
|
|||||||
power = power >> 1
|
power = power >> 1
|
||||||
}
|
}
|
||||||
|
|
||||||
a.ActiveParticipants = []*protobufs.Ed448PublicKey{}
|
a.ActiveParticipants = []*protobufs.CeremonyLobbyJoin{}
|
||||||
for i := 0; i < int(power); i++ {
|
for i := 0; i < int(power); i++ {
|
||||||
a.ActiveParticipants = append(
|
a.ActiveParticipants = append(
|
||||||
a.ActiveParticipants,
|
a.ActiveParticipants,
|
||||||
a.LobbyJoins[i].PublicKeySignatureEd448.PublicKey,
|
a.LobbyJoins[i],
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,7 +122,10 @@ func TestCeremonyTransitions(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_IN_PROGRESS)
|
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_IN_PROGRESS)
|
||||||
require.True(t, bytes.Equal(a.ActiveParticipants[0].KeyValue, proverPubKey))
|
require.True(t, bytes.Equal(
|
||||||
|
a.ActiveParticipants[0].PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
proverPubKey,
|
||||||
|
))
|
||||||
|
|
||||||
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
|
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
|
||||||
tau2 := tau.Mul(tau)
|
tau2 := tau.Mul(tau)
|
||||||
|
@ -2,9 +2,9 @@ package application
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
)
|
)
|
||||||
@ -37,59 +37,47 @@ func (a *CeremonyApplication) applyTranscript(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
g1s := make([]*curves.PointBls48581G1, len(a.UpdatedTranscript.G1Powers))
|
g1s := make([]curves.Point, len(a.UpdatedTranscript.G1Powers))
|
||||||
eg := errgroup.Group{}
|
|
||||||
eg.SetLimit(100)
|
|
||||||
|
|
||||||
for i := range a.UpdatedTranscript.G1Powers {
|
for i := range a.UpdatedTranscript.G1Powers {
|
||||||
i := i
|
i := i
|
||||||
eg.Go(func() error {
|
if !bytes.Equal(
|
||||||
if !bytes.Equal(
|
a.UpdatedTranscript.G1Powers[i].KeyValue,
|
||||||
a.UpdatedTranscript.G1Powers[i].KeyValue,
|
transcript.G1Powers[i].KeyValue,
|
||||||
transcript.G1Powers[i].KeyValue,
|
) {
|
||||||
) {
|
return errors.Wrap(errors.New("invalid g1s"), "apply transcript")
|
||||||
return errors.Wrap(errors.New("invalid g1s"), "apply transcript")
|
}
|
||||||
}
|
|
||||||
|
|
||||||
g1 := &curves.PointBls48581G1{}
|
g1 := &curves.PointBls48581G1{}
|
||||||
x, err := g1.FromAffineCompressed(a.UpdatedTranscript.G1Powers[i].KeyValue)
|
x, err := g1.FromAffineCompressed(
|
||||||
if err != nil {
|
a.UpdatedTranscript.G1Powers[i].KeyValue,
|
||||||
return errors.Wrap(err, "apply transcript")
|
)
|
||||||
}
|
if err != nil {
|
||||||
g1, _ = x.(*curves.PointBls48581G1)
|
return errors.Wrap(err, "apply transcript")
|
||||||
|
}
|
||||||
|
|
||||||
g1s[i] = g1
|
g1s[i] = x
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
g2s := make([]*curves.PointBls48581G2, len(a.UpdatedTranscript.G2Powers))
|
g2s := make([]curves.Point, len(a.UpdatedTranscript.G2Powers))
|
||||||
for i := range a.UpdatedTranscript.G2Powers {
|
for i := range a.UpdatedTranscript.G2Powers {
|
||||||
i := i
|
i := i
|
||||||
eg.Go(func() error {
|
if !bytes.Equal(
|
||||||
if !bytes.Equal(
|
a.UpdatedTranscript.G2Powers[i].KeyValue,
|
||||||
a.UpdatedTranscript.G2Powers[i].KeyValue,
|
transcript.G2Powers[i].KeyValue,
|
||||||
transcript.G2Powers[i].KeyValue,
|
) {
|
||||||
) {
|
return errors.Wrap(errors.New("invalid g2s"), "apply transcript")
|
||||||
return errors.Wrap(errors.New("invalid g2s"), "apply transcript")
|
}
|
||||||
}
|
|
||||||
|
|
||||||
g2 := &curves.PointBls48581G2{}
|
g2 := &curves.PointBls48581G2{}
|
||||||
x, err := g2.FromAffineCompressed(a.UpdatedTranscript.G2Powers[i].KeyValue)
|
x, err := g2.FromAffineCompressed(
|
||||||
if err != nil {
|
a.UpdatedTranscript.G2Powers[i].KeyValue,
|
||||||
return errors.Wrap(err, "apply transcript")
|
)
|
||||||
}
|
if err != nil {
|
||||||
g2, _ = x.(*curves.PointBls48581G2)
|
return errors.Wrap(err, "apply transcript")
|
||||||
|
}
|
||||||
|
|
||||||
g2s[i] = g2
|
g2s[i] = x
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
g1Witnesses := []*curves.PointBls48581G1{}
|
g1Witnesses := []*curves.PointBls48581G1{}
|
||||||
@ -168,52 +156,70 @@ func (a *CeremonyApplication) applyTranscript(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mp := []curves.PairingPoint{}
|
|
||||||
mpg2 := curves.BLS48581G2().Point.Generator().(curves.PairingPoint)
|
mpg2 := curves.BLS48581G2().Point.Generator().(curves.PairingPoint)
|
||||||
mpg2n := g2s[1].Neg().(curves.PairingPoint)
|
mpg2n := g2s[1].Neg().(curves.PairingPoint)
|
||||||
|
|
||||||
for i := 0; i < len(g1s)-1; i++ {
|
|
||||||
mp = append(mp, g1s[i])
|
|
||||||
mp = append(mp, mpg2n)
|
|
||||||
mp = append(mp, g1s[i+1])
|
|
||||||
mp = append(mp, mpg2)
|
|
||||||
}
|
|
||||||
|
|
||||||
mp2 := []curves.PairingPoint{}
|
|
||||||
mpg1 := curves.BLS48581G1().Point.Generator().(curves.PairingPoint)
|
mpg1 := curves.BLS48581G1().Point.Generator().(curves.PairingPoint)
|
||||||
mpg1n := g1s[1].Neg().(curves.PairingPoint)
|
mpg1n := g1s[1].Neg().(curves.PairingPoint)
|
||||||
for i := 0; i < len(g2s)-1; i++ {
|
|
||||||
mp2 = append(mp2, mpg1n)
|
randoms := []curves.Scalar{}
|
||||||
mp2 = append(mp2, g2s[i])
|
sum := curves.BLS48581G1().Scalar.Zero()
|
||||||
mp2 = append(mp2, mpg1)
|
|
||||||
mp2 = append(mp2, g2s[i+1])
|
for i := 0; i < len(g1s)-1; i++ {
|
||||||
|
randoms = append(randoms, curves.BLS48581G1().Scalar.Random(rand.Reader))
|
||||||
|
sum = sum.Add(randoms[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
l := g1s[0].MultiPairing(mp...)
|
g1CheckR := g1s[0].SumOfProducts(g1s[1:], randoms)
|
||||||
if !l.IsOne() {
|
g1CheckL := g1s[0].SumOfProducts(g1s[:len(g1s)-1], randoms)
|
||||||
|
|
||||||
|
if !mpg2.MultiPairing(
|
||||||
|
g1CheckL.(curves.PairingPoint),
|
||||||
|
mpg2n.Mul(sum).(curves.PairingPoint),
|
||||||
|
g1CheckR.(curves.PairingPoint),
|
||||||
|
mpg2.Mul(sum).(curves.PairingPoint),
|
||||||
|
).IsOne() {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
errors.New("pairing check failed for g1s"),
|
errors.New("pairing check failed for g1s"),
|
||||||
"apply transcript",
|
"apply transcript",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
l = g1s[0].MultiPairing(mp2...)
|
var g2CheckL, g2CheckR curves.Point
|
||||||
if !l.IsOne() {
|
g2Sum := curves.BLS48581G1().Scalar.Zero()
|
||||||
|
for i := 0; i < len(g2s)-1; i++ {
|
||||||
|
g2Sum = g2Sum.Add(randoms[i])
|
||||||
|
if g2CheckL == nil {
|
||||||
|
g2CheckL = g2s[0].Mul(randoms[0])
|
||||||
|
g2CheckR = g2s[1].Mul(randoms[0])
|
||||||
|
} else {
|
||||||
|
g2CheckL = g2CheckL.Add(g2s[i].Mul(randoms[i]))
|
||||||
|
g2CheckR = g2CheckR.Add(g2s[i+1].Mul(randoms[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !mpg2.MultiPairing(
|
||||||
|
mpg1n.Mul(g2Sum).(curves.PairingPoint),
|
||||||
|
g2CheckL.(curves.PairingPoint),
|
||||||
|
mpg1.Mul(g2Sum).(curves.PairingPoint),
|
||||||
|
g2CheckR.(curves.PairingPoint),
|
||||||
|
).IsOne() {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
errors.New("pairing check failed for g2s"),
|
errors.New("pairing check failed for g2s"),
|
||||||
"apply transcript",
|
"apply transcript",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
mp3 := []curves.PairingPoint{}
|
mp3 := make([]curves.PairingPoint, (len(g2Powers)-1)*4)
|
||||||
for i := 0; i < len(g2Powers)-1; i++ {
|
for i := 0; i < len(g2Powers)-1; i++ {
|
||||||
mp3 = append(mp3, g1Witnesses[i+1].Neg().(curves.PairingPoint))
|
i := i
|
||||||
mp3 = append(mp3, g2Powers[i])
|
mp3[i*4+0] = g1Witnesses[i+1].Neg().(curves.PairingPoint)
|
||||||
mp3 = append(mp3, mpg1)
|
mp3[i*4+1] = g2Powers[i]
|
||||||
mp3 = append(mp3, g2Powers[i+1])
|
mp3[i*4+2] = mpg1
|
||||||
|
mp3[i*4+3] = g2Powers[i+1]
|
||||||
}
|
}
|
||||||
|
|
||||||
l = g1s[0].MultiPairing(mp3...)
|
l := mp3[0].MultiPairing(mp3...)
|
||||||
if !l.IsOne() {
|
if !l.IsOne() {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
errors.New("pairing check failed for witnesses"),
|
errors.New("pairing check failed for witnesses"),
|
||||||
|
@ -3,7 +3,9 @@ package application
|
|||||||
import (
|
import (
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/circl/sign/ed448"
|
"github.com/cloudflare/circl/sign/ed448"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -12,6 +14,166 @@ import (
|
|||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// This does a full test of the 65536 powers, run this if you want to wait a
|
||||||
|
// long time
|
||||||
|
func TestApplyTranscript_Slow(t *testing.T) {
|
||||||
|
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
|
||||||
|
olds := []*curves.ScalarBls48581{
|
||||||
|
curves.BLS48581G1().Scalar.One().(*curves.ScalarBls48581),
|
||||||
|
}
|
||||||
|
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
|
||||||
|
taus := []*curves.ScalarBls48581{
|
||||||
|
curves.BLS48581G1().Scalar.One().(*curves.ScalarBls48581),
|
||||||
|
}
|
||||||
|
fmt.Println(time.Now().Unix())
|
||||||
|
fmt.Println("generate taus")
|
||||||
|
for i := 0; i < 65536; i++ {
|
||||||
|
olds = append(olds, olds[i].Mul(old).(*curves.ScalarBls48581))
|
||||||
|
taus = append(taus, taus[i].Mul(tau).(*curves.ScalarBls48581))
|
||||||
|
}
|
||||||
|
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
|
||||||
|
|
||||||
|
fmt.Println(time.Now().Unix())
|
||||||
|
fmt.Println("taus generated")
|
||||||
|
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
proverSig, err := proverKey.Sign(
|
||||||
|
rand.Reader,
|
||||||
|
tauPubG2.ToAffineCompressed(),
|
||||||
|
crypto.Hash(0),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fmt.Println(time.Now().Unix())
|
||||||
|
fmt.Println("prover signature generated")
|
||||||
|
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
|
||||||
|
key := tau.Bytes()
|
||||||
|
|
||||||
|
for i, j := 0, len(key)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
key[i], key[j] = key[j], key[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
if bls48581.Core_Sign(blsSignature, proverKey, key) != bls48581.BLS_OK {
|
||||||
|
require.Fail(t, "could not sign")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(time.Now().Unix())
|
||||||
|
fmt.Println("bls signature generated")
|
||||||
|
|
||||||
|
blsSig := blsSignature[:]
|
||||||
|
oldTranscript := &protobufs.CeremonyTranscript{
|
||||||
|
G1Powers: []*protobufs.BLS48581G1PublicKey{},
|
||||||
|
G2Powers: []*protobufs.BLS48581G2PublicKey{},
|
||||||
|
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
|
||||||
|
{
|
||||||
|
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
|
||||||
|
{
|
||||||
|
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
|
||||||
|
olds[256],
|
||||||
|
).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
updatedTranscript := &protobufs.CeremonyTranscript{
|
||||||
|
G1Powers: []*protobufs.BLS48581G1PublicKey{},
|
||||||
|
G2Powers: []*protobufs.BLS48581G2PublicKey{},
|
||||||
|
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
|
||||||
|
{
|
||||||
|
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
|
||||||
|
taus[256],
|
||||||
|
).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
|
||||||
|
{
|
||||||
|
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
|
||||||
|
olds[256],
|
||||||
|
).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
|
||||||
|
olds[256],
|
||||||
|
).Mul(taus[256]).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, o := range olds {
|
||||||
|
oldTranscript.G1Powers = append(
|
||||||
|
oldTranscript.G1Powers,
|
||||||
|
&protobufs.BLS48581G1PublicKey{
|
||||||
|
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
|
||||||
|
o,
|
||||||
|
).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
updatedTranscript.G1Powers = append(
|
||||||
|
updatedTranscript.G1Powers,
|
||||||
|
&protobufs.BLS48581G1PublicKey{
|
||||||
|
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
|
||||||
|
o,
|
||||||
|
).Mul(taus[i]).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if i < 257 {
|
||||||
|
oldTranscript.G2Powers = append(
|
||||||
|
oldTranscript.G2Powers,
|
||||||
|
&protobufs.BLS48581G2PublicKey{
|
||||||
|
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
|
||||||
|
o,
|
||||||
|
).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
updatedTranscript.G2Powers = append(
|
||||||
|
updatedTranscript.G2Powers,
|
||||||
|
&protobufs.BLS48581G2PublicKey{
|
||||||
|
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
|
||||||
|
o,
|
||||||
|
).Mul(taus[i]).ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(time.Now().Unix())
|
||||||
|
fmt.Println("transcripts generated")
|
||||||
|
a := &CeremonyApplication{
|
||||||
|
StateCount: 0,
|
||||||
|
RoundCount: 0,
|
||||||
|
LobbyState: CEREMONY_APPLICATION_STATE_VALIDATING,
|
||||||
|
FinalCommits: []*protobufs.CeremonyTranscriptCommit{
|
||||||
|
{
|
||||||
|
ProverSignature: &protobufs.Ed448Signature{
|
||||||
|
Signature: proverSig,
|
||||||
|
PublicKey: &protobufs.Ed448PublicKey{
|
||||||
|
KeyValue: proverPubKey,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ContributionSignature: &protobufs.BLS48581Signature{
|
||||||
|
Signature: blsSig,
|
||||||
|
PublicKey: &protobufs.BLS48581G2PublicKey{
|
||||||
|
KeyValue: tauPubG2.ToAffineCompressed(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
LatestTranscript: oldTranscript,
|
||||||
|
UpdatedTranscript: updatedTranscript,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = a.applyTranscript(updatedTranscript)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestApplyTranscript(t *testing.T) {
|
func TestApplyTranscript(t *testing.T) {
|
||||||
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
|
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
|
||||||
old2 := old.Mul(old)
|
old2 := old.Mul(old)
|
||||||
@ -322,5 +484,5 @@ func TestApplyRewritingTranscriptFails(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = a.applyTranscript(updatedTranscript)
|
err = a.applyTranscript(updatedTranscript)
|
||||||
require.NoError(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ type CeremonyExecutionEngine struct {
|
|||||||
keyManager keys.KeyManager
|
keyManager keys.KeyManager
|
||||||
engineConfig *config.EngineConfig
|
engineConfig *config.EngineConfig
|
||||||
pubSub p2p.PubSub
|
pubSub p2p.PubSub
|
||||||
|
peerIdHash []byte
|
||||||
provingKey crypto.Signer
|
provingKey crypto.Signer
|
||||||
proverPublicKey []byte
|
proverPublicKey []byte
|
||||||
provingKeyAddress []byte
|
provingKeyAddress []byte
|
||||||
@ -48,11 +49,11 @@ type CeremonyExecutionEngine struct {
|
|||||||
alreadyPublishedTranscript bool
|
alreadyPublishedTranscript bool
|
||||||
seenMessageMap map[string]bool
|
seenMessageMap map[string]bool
|
||||||
seenMessageMx sync.Mutex
|
seenMessageMx sync.Mutex
|
||||||
|
intrinsicFilter []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCeremonyExecutionEngine(
|
func NewCeremonyExecutionEngine(
|
||||||
logger *zap.Logger,
|
logger *zap.Logger,
|
||||||
clock *ceremony.CeremonyDataClockConsensusEngine,
|
|
||||||
engineConfig *config.EngineConfig,
|
engineConfig *config.EngineConfig,
|
||||||
keyManager keys.KeyManager,
|
keyManager keys.KeyManager,
|
||||||
pubSub p2p.PubSub,
|
pubSub p2p.PubSub,
|
||||||
@ -63,6 +64,27 @@ func NewCeremonyExecutionEngine(
|
|||||||
panic(errors.New("logger is nil"))
|
panic(errors.New("logger is nil"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
intrinsicFilter := append(
|
||||||
|
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
|
||||||
|
p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
|
||||||
|
)
|
||||||
|
|
||||||
|
clock := ceremony.NewCeremonyDataClockConsensusEngine(
|
||||||
|
engineConfig,
|
||||||
|
logger,
|
||||||
|
keyManager,
|
||||||
|
clockStore,
|
||||||
|
keyStore,
|
||||||
|
pubSub,
|
||||||
|
intrinsicFilter,
|
||||||
|
seed,
|
||||||
|
)
|
||||||
|
|
||||||
e := &CeremonyExecutionEngine{
|
e := &CeremonyExecutionEngine{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
clock: clock,
|
clock: clock,
|
||||||
@ -76,8 +98,18 @@ func NewCeremonyExecutionEngine(
|
|||||||
alreadyPublishedShare: false,
|
alreadyPublishedShare: false,
|
||||||
seenMessageMx: sync.Mutex{},
|
seenMessageMx: sync.Mutex{},
|
||||||
seenMessageMap: map[string]bool{},
|
seenMessageMap: map[string]bool{},
|
||||||
|
intrinsicFilter: intrinsicFilter,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
peerId := e.pubSub.GetPeerID()
|
||||||
|
addr, err := poseidon.HashBytes(peerId)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addrBytes := addr.Bytes()
|
||||||
|
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
|
||||||
|
e.peerIdHash = addrBytes
|
||||||
provingKey, _, publicKeyBytes, provingKeyAddress := e.clock.GetProvingKey(
|
provingKey, _, publicKeyBytes, provingKeyAddress := e.clock.GetProvingKey(
|
||||||
engineConfig,
|
engineConfig,
|
||||||
)
|
)
|
||||||
@ -117,15 +149,7 @@ func (e *CeremonyExecutionEngine) Start() <-chan error {
|
|||||||
))
|
))
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
seed, err := hex.DecodeString(e.engineConfig.GenesisSeed)
|
err := <-e.clock.Start()
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = <-e.clock.Start(
|
|
||||||
application.CEREMONY_ADDRESS,
|
|
||||||
seed,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -175,7 +199,7 @@ func (e *CeremonyExecutionEngine) ProcessMessage(
|
|||||||
return nil, errors.Wrap(err, "process message")
|
return nil, errors.Wrap(err, "process message")
|
||||||
}
|
}
|
||||||
|
|
||||||
if frame.FrameNumber < e.clock.GetFrame() {
|
if frame.FrameNumber < e.clock.GetFrame().FrameNumber {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,7 +294,7 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
frameChan := e.clock.GetFrameChannel()
|
frameChan := e.clock.GetFrameChannel()
|
||||||
for {
|
for {
|
||||||
frameFromBuffer := <-frameChan
|
frameFromBuffer := <-frameChan
|
||||||
frame := e.clock.GetActiveFrame()
|
frame := e.clock.GetFrame()
|
||||||
e.activeClockFrame = frame
|
e.activeClockFrame = frame
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"evaluating next frame",
|
"evaluating next frame",
|
||||||
@ -289,9 +313,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, _, reward := app.RewardTrie.Get(e.provingKeyAddress)
|
_, _, reward := app.RewardTrie.Get(e.provingKeyAddress)
|
||||||
|
_, _, retro := app.RewardTrie.Get(e.peerIdHash)
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"current application state",
|
"current application state",
|
||||||
zap.Uint64("my_balance", reward),
|
zap.Uint64("my_balance", reward+retro),
|
||||||
zap.String("lobby_state", app.LobbyState.String()),
|
zap.String("lobby_state", app.LobbyState.String()),
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -313,7 +338,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"lobby open for joins",
|
"lobby open for joins",
|
||||||
zap.Int("joined_participants", len(app.LobbyJoins)),
|
zap.Int("joined_participants", len(app.LobbyJoins)),
|
||||||
zap.Int("preferred_participants", len(app.NextRoundPreferredParticipants)),
|
zap.Int(
|
||||||
|
"preferred_participants",
|
||||||
|
len(app.NextRoundPreferredParticipants),
|
||||||
|
),
|
||||||
zap.Bool("in_lobby", alreadyJoined),
|
zap.Bool("in_lobby", alreadyJoined),
|
||||||
zap.Uint64("state_count", app.StateCount),
|
zap.Uint64("state_count", app.StateCount),
|
||||||
)
|
)
|
||||||
@ -337,7 +365,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
case application.CEREMONY_APPLICATION_STATE_IN_PROGRESS:
|
case application.CEREMONY_APPLICATION_STATE_IN_PROGRESS:
|
||||||
inRound := false
|
inRound := false
|
||||||
for _, p := range app.ActiveParticipants {
|
for _, p := range app.ActiveParticipants {
|
||||||
if bytes.Equal(p.KeyValue, e.proverPublicKey) {
|
if bytes.Equal(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
e.proverPublicKey,
|
||||||
|
) {
|
||||||
inRound = true
|
inRound = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -353,7 +384,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"round in progress",
|
"round in progress",
|
||||||
zap.Any("participants", app.ActiveParticipants),
|
zap.Any("participants", app.ActiveParticipants),
|
||||||
zap.Any("current_seen_attestations", len(app.LatestSeenProverAttestations)),
|
zap.Any(
|
||||||
|
"current_seen_attestations",
|
||||||
|
len(app.LatestSeenProverAttestations),
|
||||||
|
),
|
||||||
zap.Any(
|
zap.Any(
|
||||||
"current_dropped_attestations",
|
"current_dropped_attestations",
|
||||||
len(app.DroppedParticipantAttestations),
|
len(app.DroppedParticipantAttestations),
|
||||||
@ -371,7 +405,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
if len(e.peerChannels) == 0 && app.RoundCount == 1 &&
|
if len(e.peerChannels) == 0 && app.RoundCount == 1 &&
|
||||||
len(app.ActiveParticipants) > 1 {
|
len(app.ActiveParticipants) > 1 {
|
||||||
for i, p := range app.ActiveParticipants {
|
for i, p := range app.ActiveParticipants {
|
||||||
if bytes.Equal(p.KeyValue, e.proverPublicKey) {
|
if bytes.Equal(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
e.proverPublicKey,
|
||||||
|
) {
|
||||||
shouldConnect = true
|
shouldConnect = true
|
||||||
position = i
|
position = i
|
||||||
break
|
break
|
||||||
@ -418,7 +455,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if len(app.ActiveParticipants) == 1 &&
|
} else if len(app.ActiveParticipants) == 1 &&
|
||||||
bytes.Equal(app.ActiveParticipants[0].KeyValue, e.proverPublicKey) {
|
bytes.Equal(
|
||||||
|
app.ActiveParticipants[0].PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
e.proverPublicKey,
|
||||||
|
) {
|
||||||
if err = e.commitRound(e.activeSecrets); err != nil {
|
if err = e.commitRound(e.activeSecrets); err != nil {
|
||||||
e.logger.Error("error while participating in round", zap.Error(err))
|
e.logger.Error("error while participating in round", zap.Error(err))
|
||||||
}
|
}
|
||||||
@ -427,7 +467,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"round contribution finalizing",
|
"round contribution finalizing",
|
||||||
zap.Any("participants", len(app.ActiveParticipants)),
|
zap.Any("participants", len(app.ActiveParticipants)),
|
||||||
zap.Any("current_seen_attestations", len(app.LatestSeenProverAttestations)),
|
zap.Any(
|
||||||
|
"current_seen_attestations",
|
||||||
|
len(app.LatestSeenProverAttestations),
|
||||||
|
),
|
||||||
zap.Any(
|
zap.Any(
|
||||||
"current_dropped_attestations",
|
"current_dropped_attestations",
|
||||||
len(app.DroppedParticipantAttestations),
|
len(app.DroppedParticipantAttestations),
|
||||||
@ -450,7 +493,10 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
|
|
||||||
shouldPublish := false
|
shouldPublish := false
|
||||||
for _, p := range app.ActiveParticipants {
|
for _, p := range app.ActiveParticipants {
|
||||||
if bytes.Equal(p.KeyValue, e.proverPublicKey) {
|
if bytes.Equal(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
e.proverPublicKey,
|
||||||
|
) {
|
||||||
shouldPublish = true
|
shouldPublish = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -587,7 +633,7 @@ func (e *CeremonyExecutionEngine) announceJoin(
|
|||||||
|
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
e.publishMessage(
|
e.publishMessage(
|
||||||
application.CEREMONY_ADDRESS,
|
e.intrinsicFilter,
|
||||||
join,
|
join,
|
||||||
),
|
),
|
||||||
"announce join",
|
"announce join",
|
||||||
@ -607,34 +653,20 @@ func (e *CeremonyExecutionEngine) connectToActivePeers(
|
|||||||
return errors.Wrap(err, "connect to active peers")
|
return errors.Wrap(err, "connect to active peers")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, p := range app.ActiveParticipants {
|
for i, p := range app.LobbyJoins {
|
||||||
if !bytes.Equal(p.KeyValue, e.proverPublicKey) {
|
if !bytes.Equal(
|
||||||
ic, err := e.keyStore.GetLatestKeyBundle(p.KeyValue)
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
if err != nil {
|
e.proverPublicKey,
|
||||||
return errors.Wrap(err, "connect to active peers")
|
) {
|
||||||
}
|
|
||||||
|
|
||||||
var kba *protobufs.KeyBundleAnnouncement
|
|
||||||
switch ic.TypeUrl {
|
|
||||||
case protobufs.KeyBundleAnnouncementType:
|
|
||||||
kba = &protobufs.KeyBundleAnnouncement{}
|
|
||||||
if err := proto.Unmarshal(
|
|
||||||
ic.Data,
|
|
||||||
kba,
|
|
||||||
); err != nil {
|
|
||||||
return errors.Wrap(err, "connect to active peers")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
receiverIdk, err := curves.ED448().Point.FromAffineCompressed(
|
receiverIdk, err := curves.ED448().Point.FromAffineCompressed(
|
||||||
kba.IdentityKey.GetPublicKeySignatureEd448().PublicKey.KeyValue,
|
p.IdentityKey.KeyValue,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "connect to active peers")
|
return errors.Wrap(err, "connect to active peers")
|
||||||
}
|
}
|
||||||
|
|
||||||
receiverSpk, err := curves.ED448().Point.FromAffineCompressed(
|
receiverSpk, err := curves.ED448().Point.FromAffineCompressed(
|
||||||
kba.SignedPreKey.GetPublicKeySignatureEd448().PublicKey.KeyValue,
|
p.SignedPreKey.KeyValue,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "connect to active peers")
|
return errors.Wrap(err, "connect to active peers")
|
||||||
@ -642,19 +674,24 @@ func (e *CeremonyExecutionEngine) connectToActivePeers(
|
|||||||
|
|
||||||
client, err := e.clock.GetPublicChannelForProvingKey(
|
client, err := e.clock.GetPublicChannelForProvingKey(
|
||||||
i > position,
|
i > position,
|
||||||
p.KeyValue,
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Error(
|
e.logger.Error(
|
||||||
"peer does not support direct public channels",
|
"peer does not support direct public channels",
|
||||||
zap.Binary("proving_key", p.KeyValue),
|
zap.Binary(
|
||||||
|
"proving_key",
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
e.peerChannels[string(p.KeyValue)], err = p2p.NewPublicP2PChannel(
|
e.peerChannels[string(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
)], err = p2p.NewPublicP2PChannel(
|
||||||
client,
|
client,
|
||||||
e.proverPublicKey,
|
e.proverPublicKey,
|
||||||
p.KeyValue,
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
i > position,
|
i > position,
|
||||||
idk,
|
idk,
|
||||||
spk,
|
spk,
|
||||||
@ -690,8 +727,13 @@ func (e *CeremonyExecutionEngine) participateRound(
|
|||||||
idks := []curves.Point{}
|
idks := []curves.Point{}
|
||||||
initiator := false
|
initiator := false
|
||||||
for _, p := range app.ActiveParticipants {
|
for _, p := range app.ActiveParticipants {
|
||||||
if !bytes.Equal(p.KeyValue, e.proverPublicKey) {
|
if !bytes.Equal(
|
||||||
ic, err := e.keyStore.GetLatestKeyBundle(p.KeyValue)
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
e.proverPublicKey,
|
||||||
|
) {
|
||||||
|
ic, err := e.keyStore.GetLatestKeyBundle(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "participate round")
|
return errors.Wrap(err, "participate round")
|
||||||
}
|
}
|
||||||
@ -722,22 +764,29 @@ func (e *CeremonyExecutionEngine) participateRound(
|
|||||||
return errors.Wrap(err, "participate round")
|
return errors.Wrap(err, "participate round")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := e.peerChannels[string(p.KeyValue)]; !ok {
|
if _, ok := e.peerChannels[string(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
)]; !ok {
|
||||||
client, err := e.clock.GetPublicChannelForProvingKey(
|
client, err := e.clock.GetPublicChannelForProvingKey(
|
||||||
initiator,
|
initiator,
|
||||||
p.KeyValue,
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Error(
|
e.logger.Error(
|
||||||
"peer does not support direct public channels",
|
"peer does not support direct public channels",
|
||||||
zap.Binary("proving_key", p.KeyValue),
|
zap.Binary(
|
||||||
|
"proving_key",
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
e.peerChannels[string(p.KeyValue)], err = p2p.NewPublicP2PChannel(
|
e.peerChannels[string(
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
)], err = p2p.NewPublicP2PChannel(
|
||||||
client,
|
client,
|
||||||
e.proverPublicKey,
|
e.proverPublicKey,
|
||||||
p.KeyValue,
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
initiator,
|
initiator,
|
||||||
idk,
|
idk,
|
||||||
spk,
|
spk,
|
||||||
@ -761,7 +810,10 @@ func (e *CeremonyExecutionEngine) participateRound(
|
|||||||
|
|
||||||
pubKeys := [][]byte{}
|
pubKeys := [][]byte{}
|
||||||
for _, p := range app.ActiveParticipants {
|
for _, p := range app.ActiveParticipants {
|
||||||
pubKeys = append(pubKeys, p.KeyValue)
|
pubKeys = append(
|
||||||
|
pubKeys,
|
||||||
|
p.PublicKeySignatureEd448.PublicKey.KeyValue,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
newSecrets, err := application.ProcessRound(
|
newSecrets, err := application.ProcessRound(
|
||||||
@ -834,7 +886,7 @@ func (e *CeremonyExecutionEngine) commitRound(secrets []curves.Scalar) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := e.publishMessage(
|
if err := e.publishMessage(
|
||||||
application.CEREMONY_ADDRESS,
|
e.intrinsicFilter,
|
||||||
advance,
|
advance,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrap(err, "commit round")
|
return errors.Wrap(err, "commit round")
|
||||||
@ -849,7 +901,7 @@ func (e *CeremonyExecutionEngine) commitRound(secrets []curves.Scalar) error {
|
|||||||
func (e *CeremonyExecutionEngine) publishDroppedParticipant(
|
func (e *CeremonyExecutionEngine) publishDroppedParticipant(
|
||||||
participant []byte,
|
participant []byte,
|
||||||
) {
|
) {
|
||||||
frameNumber := e.clock.GetFrame()
|
frameNumber := e.clock.GetFrame().FrameNumber
|
||||||
|
|
||||||
b := binary.BigEndian.AppendUint64([]byte("dropped"), frameNumber)
|
b := binary.BigEndian.AppendUint64([]byte("dropped"), frameNumber)
|
||||||
b = append(b, participant...)
|
b = append(b, participant...)
|
||||||
@ -876,7 +928,7 @@ func (e *CeremonyExecutionEngine) publishDroppedParticipant(
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = e.publishMessage(
|
err = e.publishMessage(
|
||||||
application.CEREMONY_ADDRESS,
|
e.intrinsicFilter,
|
||||||
dropped,
|
dropped,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -893,7 +945,7 @@ func (e *CeremonyExecutionEngine) publishDroppedParticipant(
|
|||||||
func (e *CeremonyExecutionEngine) publishLastSeenParticipant(
|
func (e *CeremonyExecutionEngine) publishLastSeenParticipant(
|
||||||
participant []byte,
|
participant []byte,
|
||||||
) {
|
) {
|
||||||
frameNumber := e.clock.GetFrame()
|
frameNumber := e.clock.GetFrame().FrameNumber
|
||||||
|
|
||||||
b := binary.BigEndian.AppendUint64([]byte("lastseen"), frameNumber)
|
b := binary.BigEndian.AppendUint64([]byte("lastseen"), frameNumber)
|
||||||
b = append(b, participant...)
|
b = append(b, participant...)
|
||||||
@ -919,7 +971,7 @@ func (e *CeremonyExecutionEngine) publishLastSeenParticipant(
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
err = e.publishMessage(
|
err = e.publishMessage(
|
||||||
application.CEREMONY_ADDRESS,
|
e.intrinsicFilter,
|
||||||
seen,
|
seen,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1019,7 +1071,7 @@ func (e *CeremonyExecutionEngine) publishTranscriptShare(
|
|||||||
|
|
||||||
err = errors.Wrap(
|
err = errors.Wrap(
|
||||||
e.publishMessage(
|
e.publishMessage(
|
||||||
application.CEREMONY_ADDRESS,
|
e.intrinsicFilter,
|
||||||
transcriptShare,
|
transcriptShare,
|
||||||
),
|
),
|
||||||
"publish transcript share",
|
"publish transcript share",
|
||||||
@ -1035,7 +1087,7 @@ func (e *CeremonyExecutionEngine) publishTranscriptShare(
|
|||||||
func (e *CeremonyExecutionEngine) VerifyExecution(
|
func (e *CeremonyExecutionEngine) VerifyExecution(
|
||||||
frame *protobufs.ClockFrame,
|
frame *protobufs.ClockFrame,
|
||||||
) error {
|
) error {
|
||||||
if e.clock.GetFrame() != frame.FrameNumber-1 {
|
if e.clock.GetFrame().FrameNumber != frame.FrameNumber-1 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1102,7 +1154,7 @@ func (e *CeremonyExecutionEngine) publishTranscript(
|
|||||||
e.alreadyPublishedTranscript = true
|
e.alreadyPublishedTranscript = true
|
||||||
err := errors.Wrap(
|
err := errors.Wrap(
|
||||||
e.publishMessage(
|
e.publishMessage(
|
||||||
application.CEREMONY_ADDRESS,
|
e.intrinsicFilter,
|
||||||
app.UpdatedTranscript,
|
app.UpdatedTranscript,
|
||||||
),
|
),
|
||||||
"publish transcript share",
|
"publish transcript share",
|
||||||
|
10
node/go.mod
10
node/go.mod
@ -11,9 +11,11 @@ replace github.com/libp2p/go-libp2p-gostream => ../go-libp2p-gostream
|
|||||||
|
|
||||||
replace source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub => ../go-libp2p-blossomsub
|
replace source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub => ../go-libp2p-blossomsub
|
||||||
|
|
||||||
|
replace github.com/cockroachdb/pebble => ../pebble
|
||||||
|
|
||||||
require (
|
require (
|
||||||
filippo.io/edwards25519 v1.0.0-rc.1
|
filippo.io/edwards25519 v1.0.0-rc.1
|
||||||
github.com/cockroachdb/pebble v0.0.0-20231025190044-422dce910055
|
github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a
|
||||||
github.com/libp2p/go-libp2p v0.31.0
|
github.com/libp2p/go-libp2p v0.31.0
|
||||||
github.com/libp2p/go-libp2p-gostream v0.6.0
|
github.com/libp2p/go-libp2p-gostream v0.6.0
|
||||||
github.com/libp2p/go-libp2p-kad-dht v0.23.0
|
github.com/libp2p/go-libp2p-kad-dht v0.23.0
|
||||||
@ -57,11 +59,9 @@ require (
|
|||||||
github.com/quic-go/qtls-go1-19 v0.3.3 // indirect
|
github.com/quic-go/qtls-go1-19 v0.3.3 // indirect
|
||||||
github.com/quic-go/qtls-go1-20 v0.2.3 // indirect
|
github.com/quic-go/qtls-go1-20 v0.2.3 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
golang.org/x/term v0.14.0 // indirect
|
golang.org/x/term v0.14.0
|
||||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ require (
|
|||||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0
|
||||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
||||||
|
173
node/go.sum
173
node/go.sum
@ -9,22 +9,13 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D
|
|||||||
filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
|
filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
|
||||||
filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
||||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
|
|
||||||
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
|
|
||||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
|
||||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
|
||||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
|
||||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
|
||||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||||
@ -61,30 +52,16 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
|
|
||||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
||||||
github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
|
|
||||||
github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y=
|
|
||||||
github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
|
|
||||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
|
|
||||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
|
||||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||||
github.com/cockroachdb/pebble v0.0.0-20230527012508-ac69476c46ff h1:/F1VgP7wxZCRj8PzresPo2NbAdgPwmU7pi+CgZ8sBZw=
|
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
|
||||||
github.com/cockroachdb/pebble v0.0.0-20230527012508-ac69476c46ff/go.mod h1:TkdVsGYRqtULUppt2RbC+YaKtTHnHoWa2apfFrSKABw=
|
|
||||||
github.com/cockroachdb/pebble v0.0.0-20231025190044-422dce910055 h1:EigfnVX/iY/WTi3F+f4ezhAxJO+BePglQkEAKycNhqo=
|
|
||||||
github.com/cockroachdb/pebble v0.0.0-20231025190044-422dce910055/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
|
|
||||||
github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw=
|
|
||||||
github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
|
||||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM=
|
|
||||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
|
|
||||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
|
||||||
github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||||
github.com/consensys/gnark-crypto v0.5.3 h1:4xLFGZR3NWEH2zy+YzvzHicpToQR8FXFbfLNvpGB+rE=
|
github.com/consensys/gnark-crypto v0.5.3 h1:4xLFGZR3NWEH2zy+YzvzHicpToQR8FXFbfLNvpGB+rE=
|
||||||
github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0=
|
github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0=
|
||||||
@ -93,14 +70,10 @@ github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaD
|
|||||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||||
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY=
|
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY=
|
||||||
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
|
||||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
@ -114,14 +87,10 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il
|
|||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
|
||||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
|
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
|
||||||
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
@ -129,10 +98,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
|||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
|
||||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
|
||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
|
||||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
@ -140,43 +105,30 @@ github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJn
|
|||||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
|
||||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
|
||||||
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||||
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
|
||||||
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
|
|
||||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
|
||||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
|
||||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
|
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
|
||||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
|
||||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
|
||||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||||
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||||
@ -187,21 +139,18 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev
|
|||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
@ -231,7 +180,6 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk
|
|||||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
|
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
|
||||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
@ -245,20 +193,15 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
|
|||||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
|
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY=
|
github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY=
|
||||||
github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||||
github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
|
||||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
|
||||||
github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs=
|
github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs=
|
||||||
github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA=
|
github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA=
|
||||||
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
|
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
|
||||||
@ -276,10 +219,6 @@ github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
|
|||||||
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
|
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
|
||||||
github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g=
|
github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g=
|
||||||
github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M=
|
github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M=
|
||||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
|
||||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
|
||||||
github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
|
|
||||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
|
||||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||||
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
|
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
|
||||||
@ -295,23 +234,12 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
|
|||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
|
|
||||||
github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
|
|
||||||
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
|
|
||||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
|
||||||
github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
|
|
||||||
github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
|
|
||||||
github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
|
|
||||||
github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
|
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
|
||||||
github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
|
||||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||||
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
|
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
|
||||||
@ -325,8 +253,6 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
|
||||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
|
||||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||||
@ -357,14 +283,9 @@ github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5
|
|||||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
|
||||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
|
||||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
|
||||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
@ -373,14 +294,10 @@ github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+Ei
|
|||||||
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
|
||||||
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
|
||||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
|
||||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||||
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
|
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
|
||||||
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||||
@ -396,11 +313,8 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv
|
|||||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
|
||||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||||
@ -439,22 +353,14 @@ github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS
|
|||||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||||
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
|
|
||||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
|
||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
|
||||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
|
||||||
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
|
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
|
||||||
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
||||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
|
||||||
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
|
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
|
||||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||||
@ -464,9 +370,7 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr
|
|||||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
|
||||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
|
||||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
@ -494,8 +398,6 @@ github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9P
|
|||||||
github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
||||||
github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI=
|
github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI=
|
||||||
github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
||||||
github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI=
|
|
||||||
github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
|
|
||||||
github.com/quic-go/quic-go v0.36.3 h1:f+yOqeGhMoRX7/M3wmEw/djhzKWr15FtQysox85/834=
|
github.com/quic-go/quic-go v0.36.3 h1:f+yOqeGhMoRX7/M3wmEw/djhzKWr15FtQysox85/834=
|
||||||
github.com/quic-go/quic-go v0.36.3/go.mod h1:qxQumdeKw5GmWs1OsTZZnOxzSI+RJWuhf1O8FN35L2o=
|
github.com/quic-go/quic-go v0.36.3/go.mod h1:qxQumdeKw5GmWs1OsTZZnOxzSI+RJWuhf1O8FN35L2o=
|
||||||
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
|
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
|
||||||
@ -511,10 +413,7 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
|
|||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
|
||||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
|
||||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||||
@ -539,22 +438,14 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
|
|||||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
|
||||||
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
||||||
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
|
||||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
|
||||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
@ -569,29 +460,14 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
|||||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
|
||||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
|
||||||
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
|
||||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
|
||||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
|
||||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||||
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
|
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
|
||||||
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
|
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
|
||||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
|
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
|
||||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
|
||||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
|
||||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
|
||||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
@ -624,20 +500,16 @@ go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1
|
|||||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
|
||||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
|
||||||
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
||||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
@ -662,25 +534,18 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r
|
|||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
|
||||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
|
||||||
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
||||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
@ -702,20 +567,11 @@ golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -727,23 +583,14 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
|
|
||||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
|
||||||
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
|
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
|
||||||
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
|
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
|
||||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
|
||||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
@ -752,11 +599,9 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm
|
|||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
@ -784,7 +629,6 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
|
|||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
@ -793,14 +637,10 @@ google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRn
|
|||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
|
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||||
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
|
||||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
@ -808,12 +648,9 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi
|
|||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
|
||||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
|
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
|
||||||
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
|
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
@ -829,21 +666,15 @@ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs
|
|||||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
|
||||||
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
|
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
197
node/keys/inmem.go
Normal file
197
node/keys/inmem.go
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
package keys
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
|
||||||
|
"github.com/cloudflare/circl/sign/ed448"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InMemoryKeyManager struct {
|
||||||
|
key ByteString
|
||||||
|
store map[string]Key
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInMemoryKeyManager() *InMemoryKeyManager {
|
||||||
|
store := make(map[string]Key)
|
||||||
|
|
||||||
|
return &InMemoryKeyManager{
|
||||||
|
store: store,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSigningKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) CreateSigningKey(
|
||||||
|
id string,
|
||||||
|
keyType KeyType,
|
||||||
|
) (crypto.Signer, error) {
|
||||||
|
switch keyType {
|
||||||
|
case KeyTypeEd448:
|
||||||
|
pubkey, privkey, err := ed448.GenerateKey(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not generate key")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = f.save(
|
||||||
|
id,
|
||||||
|
Key{
|
||||||
|
Id: id,
|
||||||
|
Type: keyType,
|
||||||
|
PublicKey: ByteString(pubkey),
|
||||||
|
PrivateKey: ByteString(privkey),
|
||||||
|
},
|
||||||
|
); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not save")
|
||||||
|
}
|
||||||
|
|
||||||
|
return privkey, nil
|
||||||
|
// case KeyTypePCAS:
|
||||||
|
// _, privkey, err := addressing.GenerateKey(rand.Reader)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, errors.Wrap(err, "could not generate key")
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if err = f.save(id, privkey); err != nil {
|
||||||
|
// return nil, errors.Wrap(err, "could not save")
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return privkey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, UnsupportedKeyTypeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateAgreementKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) CreateAgreementKey(
|
||||||
|
id string,
|
||||||
|
keyType KeyType,
|
||||||
|
) (curves.Scalar, error) {
|
||||||
|
switch keyType {
|
||||||
|
case KeyTypeX448:
|
||||||
|
privkey := curves.ED448().Scalar.Random(rand.Reader)
|
||||||
|
pubkey := curves.ED448().NewGeneratorPoint().Mul(privkey)
|
||||||
|
|
||||||
|
if err := f.save(
|
||||||
|
id,
|
||||||
|
Key{
|
||||||
|
Id: id,
|
||||||
|
Type: KeyTypeX448,
|
||||||
|
PublicKey: pubkey.ToAffineCompressed(),
|
||||||
|
PrivateKey: privkey.Bytes(),
|
||||||
|
},
|
||||||
|
); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not save")
|
||||||
|
}
|
||||||
|
|
||||||
|
return privkey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, UnsupportedKeyTypeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAgreementKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) GetAgreementKey(id string) (curves.Scalar, error) {
|
||||||
|
key, err := f.read(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key.Type {
|
||||||
|
case KeyTypeX448:
|
||||||
|
privkey, err := curves.ED448().NewScalar().SetBytes(key.PrivateKey)
|
||||||
|
return privkey, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, UnsupportedKeyTypeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRawKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) GetRawKey(id string) (*Key, error) {
|
||||||
|
key, err := f.read(id)
|
||||||
|
return &key, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSigningKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) GetSigningKey(id string) (crypto.Signer, error) {
|
||||||
|
key, err := f.read(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key.Type {
|
||||||
|
case KeyTypeEd448:
|
||||||
|
privkey := (ed448.PrivateKey)(key.PrivateKey)
|
||||||
|
return privkey, err
|
||||||
|
// case KeyTypePCAS:
|
||||||
|
// privkey := (addressing.PCAS)(key.PrivateKey)
|
||||||
|
// return privkey, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, UnsupportedKeyTypeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutRawKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) PutRawKey(key *Key) error {
|
||||||
|
return f.save(key.Id, *key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) DeleteKey(id string) error {
|
||||||
|
delete(f.store, id)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetKey implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) GetKey(id string) (key *Key, err error) {
|
||||||
|
storeKey, err := f.read(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &storeKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListKeys implements KeyManager
|
||||||
|
func (f *InMemoryKeyManager) ListKeys() ([]*Key, error) {
|
||||||
|
keys := []*Key{}
|
||||||
|
|
||||||
|
for k := range f.store {
|
||||||
|
storeKey, err := f.read(k)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keys = append(keys, &storeKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ KeyManager = (*InMemoryKeyManager)(nil)
|
||||||
|
|
||||||
|
func (f *InMemoryKeyManager) save(id string, key Key) error {
|
||||||
|
f.store[id] = Key{
|
||||||
|
Id: key.Id,
|
||||||
|
Type: key.Type,
|
||||||
|
PublicKey: key.PublicKey,
|
||||||
|
PrivateKey: key.PrivateKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *InMemoryKeyManager) read(id string) (Key, error) {
|
||||||
|
k, ok := f.store[id]
|
||||||
|
if !ok {
|
||||||
|
return Key{}, KeyNotFoundErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return Key{
|
||||||
|
Id: k.Id,
|
||||||
|
Type: k.Type,
|
||||||
|
PublicKey: k.PublicKey,
|
||||||
|
PrivateKey: k.PrivateKey,
|
||||||
|
}, nil
|
||||||
|
}
|
@ -25,7 +25,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
configDirectory = flag.String(
|
configDirectory = flag.String(
|
||||||
"config",
|
"config",
|
||||||
"./.config/",
|
filepath.Join(".", ".config"),
|
||||||
"the configuration directory",
|
"the configuration directory",
|
||||||
)
|
)
|
||||||
importPrivKey = flag.String(
|
importPrivKey = flag.String(
|
||||||
@ -233,5 +233,5 @@ func printLogo() {
|
|||||||
|
|
||||||
func printVersion() {
|
func printVersion() {
|
||||||
fmt.Println(" ")
|
fmt.Println(" ")
|
||||||
fmt.Println(" Quilibrium Node - v1.1.8 – Dawn")
|
fmt.Println(" Quilibrium Node - v1.2.0 – Dawn")
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package p2p
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
@ -64,10 +65,10 @@ func generateBitSlices(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBloomFilterIndices returns a bloom filter index based on the data, however
|
// GetBloomFilter returns a bloom filter based on the data, however
|
||||||
// it assumes bitLength is a multiple of 32. If the filter size is not
|
// it assumes bitLength is a multiple of 32. If the filter size is not
|
||||||
// conformant, this will generate biased indices.
|
// conformant, this will generate biased indices.
|
||||||
func getBloomFilterIndices(data []byte, bitLength int, k int) []byte {
|
func GetBloomFilter(data []byte, bitLength int, k int) []byte {
|
||||||
size := big.NewInt(int64(bitLength)).BitLen() - 1
|
size := big.NewInt(int64(bitLength)).BitLen() - 1
|
||||||
digest := sha3.Sum256(data)
|
digest := sha3.Sum256(data)
|
||||||
output := make([]byte, bitLength/8)
|
output := make([]byte, bitLength/8)
|
||||||
@ -75,7 +76,7 @@ func getBloomFilterIndices(data []byte, bitLength int, k int) []byte {
|
|||||||
digestBI := new(big.Int).SetBytes(digest[:])
|
digestBI := new(big.Int).SetBytes(digest[:])
|
||||||
for i := 0; i < k; i++ {
|
for i := 0; i < k; i++ {
|
||||||
position := uint(0)
|
position := uint(0)
|
||||||
for j := size*(i+1) - 1; j >= size*i; j-- {
|
for j := size * i; j < size*(i+1); j++ {
|
||||||
position = position<<1 | (digestBI.Bit(j))
|
position = position<<1 | (digestBI.Bit(j))
|
||||||
}
|
}
|
||||||
if outputBI.Bit(int(position)) != 1 {
|
if outputBI.Bit(int(position)) != 1 {
|
||||||
@ -96,3 +97,51 @@ func getBloomFilterIndices(data []byte, bitLength int, k int) []byte {
|
|||||||
outputBI.FillBytes(output)
|
outputBI.FillBytes(output)
|
||||||
return output
|
return output
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBloomFilterIndices returns the indices of a bloom filter, in increasing
|
||||||
|
// order, assuming bitLength is a multiple of 32 as in GetBloomFilter.
|
||||||
|
func GetBloomFilterIndices(data []byte, bitLength int, k int) []byte {
|
||||||
|
size := big.NewInt(int64(bitLength)).BitLen() - 1
|
||||||
|
h := sha3.NewShake256()
|
||||||
|
_, err := h.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
digest := make([]byte, size*k/8)
|
||||||
|
_, err = h.Read(digest)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
indices := []string{}
|
||||||
|
for i := 0; i < k; i++ {
|
||||||
|
position := make([]byte, size/8)
|
||||||
|
for j := (size / 8) * i; j < (size/8)*(i+1); j++ {
|
||||||
|
position[j%(size/8)] = digest[j]
|
||||||
|
}
|
||||||
|
found := false
|
||||||
|
for _, ext := range indices {
|
||||||
|
if ext == string(position) {
|
||||||
|
k++
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
p := sort.SearchStrings(indices, string(position))
|
||||||
|
if len(indices) > p {
|
||||||
|
indices = append(indices[:p+1], indices[p:]...)
|
||||||
|
indices[p] = string(position)
|
||||||
|
} else {
|
||||||
|
indices = append(indices, string(position))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output := ""
|
||||||
|
for _, idx := range indices {
|
||||||
|
output += idx
|
||||||
|
}
|
||||||
|
return []byte(output)
|
||||||
|
}
|
||||||
|
91
node/p2p/bloom_utils_test.go
Normal file
91
node/p2p/bloom_utils_test.go
Normal file
File diff suppressed because one or more lines are too long
@ -17,6 +17,7 @@ import (
|
|||||||
libp2pconfig "github.com/libp2p/go-libp2p/config"
|
libp2pconfig "github.com/libp2p/go-libp2p/config"
|
||||||
"github.com/libp2p/go-libp2p/core/crypto"
|
"github.com/libp2p/go-libp2p/core/crypto"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/libp2p/go-libp2p/core/protocol"
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
"github.com/libp2p/go-libp2p/p2p/discovery/routing"
|
"github.com/libp2p/go-libp2p/p2p/discovery/routing"
|
||||||
@ -47,8 +48,6 @@ type BlossomSub struct {
|
|||||||
var _ PubSub = (*BlossomSub)(nil)
|
var _ PubSub = (*BlossomSub)(nil)
|
||||||
var ErrNoPeersAvailable = errors.New("no peers available")
|
var ErrNoPeersAvailable = errors.New("no peers available")
|
||||||
|
|
||||||
// Crucial note, bitmask lengths should always be a power of two so as to reduce
|
|
||||||
// index bias with hash functions
|
|
||||||
var BITMASK_ALL = []byte{
|
var BITMASK_ALL = []byte{
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
@ -180,7 +179,8 @@ func (b *BlossomSub) PublishToBitmask(bitmask []byte, data []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *BlossomSub) Publish(data []byte) error {
|
func (b *BlossomSub) Publish(data []byte) error {
|
||||||
bitmask := getBloomFilterIndices(data, 256, 3)
|
bitmask := GetBloomFilter(data, 256, 3)
|
||||||
|
bitmask = append(bitmask, GetBloomFilterIndices(data, 65536, 24)...)
|
||||||
return b.PublishToBitmask(bitmask, data)
|
return b.PublishToBitmask(bitmask, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -509,7 +509,8 @@ func discoverPeers(
|
|||||||
|
|
||||||
for peer := range peerChan {
|
for peer := range peerChan {
|
||||||
peer := peer
|
peer := peer
|
||||||
if peer.ID == h.ID() {
|
if peer.ID == h.ID() ||
|
||||||
|
h.Network().Connectedness(peer.ID) == network.Connected {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -535,10 +536,7 @@ func discoverPeers(
|
|||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
if len(h.Network().Peers()) == 0 {
|
discover()
|
||||||
logger.Info("reinitiating discovery")
|
|
||||||
discover()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
41
node/poor_mans_cd.sh
Executable file
41
node/poor_mans_cd.sh
Executable file
@ -0,0 +1,41 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
start_process() {
|
||||||
|
go run ./... &
|
||||||
|
process_pid=$!
|
||||||
|
child_process_pid=$(pgrep -P $process_pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
is_process_running() {
|
||||||
|
ps -p $process_pid > /dev/null 2>&1
|
||||||
|
return $?
|
||||||
|
}
|
||||||
|
|
||||||
|
kill_process() {
|
||||||
|
kill $process_pid
|
||||||
|
kill $child_process_pid
|
||||||
|
}
|
||||||
|
|
||||||
|
start_process
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
if ! is_process_running; then
|
||||||
|
echo "Process crashed or stopped. Restarting..."
|
||||||
|
start_process
|
||||||
|
fi
|
||||||
|
|
||||||
|
git fetch
|
||||||
|
|
||||||
|
local_head=$(git rev-parse HEAD)
|
||||||
|
remote_head=$(git rev-parse @{u})
|
||||||
|
|
||||||
|
if [ "$local_head" != "$remote_head" ]; then
|
||||||
|
kill_process
|
||||||
|
|
||||||
|
git pull
|
||||||
|
|
||||||
|
start_process
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 60
|
||||||
|
done
|
@ -748,7 +748,7 @@ type CeremonyInProgressState struct {
|
|||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
ActiveParticipants []*Ed448PublicKey `protobuf:"bytes,1,rep,name=active_participants,json=activeParticipants,proto3" json:"active_participants,omitempty"`
|
ActiveParticipants []*CeremonyLobbyJoin `protobuf:"bytes,1,rep,name=active_participants,json=activeParticipants,proto3" json:"active_participants,omitempty"`
|
||||||
LatestSeenProverAttestations []*CeremonySeenProverAttestation `protobuf:"bytes,2,rep,name=latest_seen_prover_attestations,json=latestSeenProverAttestations,proto3" json:"latest_seen_prover_attestations,omitempty"`
|
LatestSeenProverAttestations []*CeremonySeenProverAttestation `protobuf:"bytes,2,rep,name=latest_seen_prover_attestations,json=latestSeenProverAttestations,proto3" json:"latest_seen_prover_attestations,omitempty"`
|
||||||
DroppedParticipantAttestations []*CeremonyDroppedProverAttestation `protobuf:"bytes,3,rep,name=dropped_participant_attestations,json=droppedParticipantAttestations,proto3" json:"dropped_participant_attestations,omitempty"`
|
DroppedParticipantAttestations []*CeremonyDroppedProverAttestation `protobuf:"bytes,3,rep,name=dropped_participant_attestations,json=droppedParticipantAttestations,proto3" json:"dropped_participant_attestations,omitempty"`
|
||||||
TranscriptRoundAdvanceCommits []*CeremonyAdvanceRound `protobuf:"bytes,4,rep,name=transcript_round_advance_commits,json=transcriptRoundAdvanceCommits,proto3" json:"transcript_round_advance_commits,omitempty"`
|
TranscriptRoundAdvanceCommits []*CeremonyAdvanceRound `protobuf:"bytes,4,rep,name=transcript_round_advance_commits,json=transcriptRoundAdvanceCommits,proto3" json:"transcript_round_advance_commits,omitempty"`
|
||||||
@ -787,7 +787,7 @@ func (*CeremonyInProgressState) Descriptor() ([]byte, []int) {
|
|||||||
return file_ceremony_proto_rawDescGZIP(), []int{10}
|
return file_ceremony_proto_rawDescGZIP(), []int{10}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *CeremonyInProgressState) GetActiveParticipants() []*Ed448PublicKey {
|
func (x *CeremonyInProgressState) GetActiveParticipants() []*CeremonyLobbyJoin {
|
||||||
if x != nil {
|
if x != nil {
|
||||||
return x.ActiveParticipants
|
return x.ActiveParticipants
|
||||||
}
|
}
|
||||||
@ -827,7 +827,7 @@ type CeremonyFinalizingState struct {
|
|||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
ActiveParticipants []*Ed448PublicKey `protobuf:"bytes,1,rep,name=active_participants,json=activeParticipants,proto3" json:"active_participants,omitempty"`
|
ActiveParticipants []*CeremonyLobbyJoin `protobuf:"bytes,1,rep,name=active_participants,json=activeParticipants,proto3" json:"active_participants,omitempty"`
|
||||||
LatestSeenProverAttestations []*CeremonySeenProverAttestation `protobuf:"bytes,2,rep,name=latest_seen_prover_attestations,json=latestSeenProverAttestations,proto3" json:"latest_seen_prover_attestations,omitempty"`
|
LatestSeenProverAttestations []*CeremonySeenProverAttestation `protobuf:"bytes,2,rep,name=latest_seen_prover_attestations,json=latestSeenProverAttestations,proto3" json:"latest_seen_prover_attestations,omitempty"`
|
||||||
DroppedParticipantAttestations []*CeremonyDroppedProverAttestation `protobuf:"bytes,3,rep,name=dropped_participant_attestations,json=droppedParticipantAttestations,proto3" json:"dropped_participant_attestations,omitempty"`
|
DroppedParticipantAttestations []*CeremonyDroppedProverAttestation `protobuf:"bytes,3,rep,name=dropped_participant_attestations,json=droppedParticipantAttestations,proto3" json:"dropped_participant_attestations,omitempty"`
|
||||||
Commits []*CeremonyTranscriptCommit `protobuf:"bytes,4,rep,name=commits,proto3" json:"commits,omitempty"`
|
Commits []*CeremonyTranscriptCommit `protobuf:"bytes,4,rep,name=commits,proto3" json:"commits,omitempty"`
|
||||||
@ -867,7 +867,7 @@ func (*CeremonyFinalizingState) Descriptor() ([]byte, []int) {
|
|||||||
return file_ceremony_proto_rawDescGZIP(), []int{11}
|
return file_ceremony_proto_rawDescGZIP(), []int{11}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *CeremonyFinalizingState) GetActiveParticipants() []*Ed448PublicKey {
|
func (x *CeremonyFinalizingState) GetActiveParticipants() []*CeremonyLobbyJoin {
|
||||||
if x != nil {
|
if x != nil {
|
||||||
return x.ActiveParticipants
|
return x.ActiveParticipants
|
||||||
}
|
}
|
||||||
@ -1567,189 +1567,190 @@ var file_ceremony_proto_rawDesc = []byte{
|
|||||||
0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34,
|
0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34,
|
||||||
0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x15, 0x70, 0x72, 0x65, 0x66,
|
0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x15, 0x70, 0x72, 0x65, 0x66,
|
||||||
0x65, 0x72, 0x72, 0x65, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74,
|
0x65, 0x72, 0x72, 0x65, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74,
|
||||||
0x73, 0x22, 0xde, 0x04, 0x0a, 0x17, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x49, 0x6e,
|
0x73, 0x22, 0xe5, 0x04, 0x0a, 0x17, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x49, 0x6e,
|
||||||
0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x58, 0x0a,
|
0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a,
|
||||||
0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
|
0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
|
||||||
0x61, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69,
|
0x61, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x71, 0x75, 0x69,
|
||||||
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79,
|
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72,
|
||||||
0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
|
0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e,
|
||||||
0x4b, 0x65, 0x79, 0x52, 0x12, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
|
0x79, 0x4c, 0x6f, 0x62, 0x62, 0x79, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x63, 0x74, 0x69,
|
||||||
0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x81, 0x01, 0x0a, 0x1f, 0x6c, 0x61, 0x74, 0x65,
|
0x76, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x81,
|
||||||
0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x61,
|
0x01, 0x0a, 0x1f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x70,
|
||||||
0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f,
|
||||||
0x0b, 0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
|
0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
|
||||||
0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e,
|
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d,
|
||||||
0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x53, 0x65, 0x65, 0x6e, 0x50, 0x72, 0x6f, 0x76,
|
0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x53,
|
||||||
0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x6c,
|
0x65, 0x65, 0x6e, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61,
|
||||||
0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x41,
|
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e,
|
||||||
0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x87, 0x01, 0x0a, 0x20,
|
0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f,
|
||||||
0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
|
0x6e, 0x73, 0x12, 0x87, 0x01, 0x0a, 0x20, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x70,
|
||||||
0x61, 0x6e, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73,
|
||||||
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
|
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e,
|
||||||
|
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
|
||||||
|
0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65,
|
||||||
|
0x6d, 0x6f, 0x6e, 0x79, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x76, 0x65,
|
||||||
|
0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x64, 0x72,
|
||||||
|
0x6f, 0x70, 0x70, 0x65, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74,
|
||||||
|
0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x20,
|
||||||
|
0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64,
|
||||||
|
0x5f, 0x61, 0x64, 0x76, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73,
|
||||||
|
0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
|
||||||
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e,
|
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e,
|
||||||
0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x44, 0x72, 0x6f,
|
0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x41, 0x64, 0x76,
|
||||||
0x70, 0x70, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74,
|
0x61, 0x6e, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x1d, 0x74, 0x72, 0x61, 0x6e, 0x73,
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x50, 0x61,
|
0x63, 0x72, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x41, 0x64, 0x76, 0x61, 0x6e, 0x63,
|
||||||
0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61,
|
0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5f, 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74,
|
||||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72,
|
0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61,
|
||||||
0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x76, 0x61, 0x6e, 0x63,
|
0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c,
|
||||||
0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73,
|
||||||
0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
|
0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
|
||||||
0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65,
|
0x65, 0x79, 0x52, 0x15, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72,
|
||||||
0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x41, 0x64, 0x76, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x6f, 0x75,
|
0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x88, 0x05, 0x0a, 0x17, 0x43, 0x65,
|
||||||
0x6e, 0x64, 0x52, 0x1d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x52, 0x6f,
|
0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x69, 0x6e, 0x67,
|
||||||
0x75, 0x6e, 0x64, 0x41, 0x64, 0x76, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
|
||||||
0x73, 0x12, 0x5f, 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f,
|
0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||||
0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03,
|
0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
|
||||||
0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
|
|
||||||
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34,
|
|
||||||
0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x15, 0x6e, 0x65, 0x78,
|
|
||||||
0x74, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e,
|
|
||||||
0x74, 0x73, 0x22, 0x81, 0x05, 0x0a, 0x17, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x46,
|
|
||||||
0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x58,
|
|
||||||
0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69,
|
|
||||||
0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75,
|
|
||||||
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65,
|
|
||||||
0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
|
||||||
0x63, 0x4b, 0x65, 0x79, 0x52, 0x12, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x61, 0x72, 0x74,
|
|
||||||
0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x81, 0x01, 0x0a, 0x1f, 0x6c, 0x61, 0x74,
|
|
||||||
0x65, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f,
|
|
||||||
0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03,
|
|
||||||
0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
|
|
||||||
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62,
|
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62,
|
||||||
0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x53, 0x65, 0x65, 0x6e, 0x50, 0x72, 0x6f,
|
0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x4c, 0x6f, 0x62, 0x62, 0x79, 0x4a, 0x6f,
|
||||||
0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c,
|
0x69, 0x6e, 0x52, 0x12, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63,
|
||||||
0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72,
|
0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x81, 0x01, 0x0a, 0x1f, 0x6c, 0x61, 0x74, 0x65, 0x73,
|
||||||
0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x87, 0x01, 0x0a,
|
0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x74,
|
||||||
0x20, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69,
|
0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
|
||||||
0x70, 0x61, 0x6e, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
0x32, 0x3a, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f,
|
||||||
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
|
0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43,
|
||||||
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f,
|
0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x53, 0x65, 0x65, 0x6e, 0x50, 0x72, 0x6f, 0x76, 0x65,
|
||||||
0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x44, 0x72,
|
0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x6c, 0x61,
|
||||||
0x6f, 0x70, 0x70, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x73,
|
0x74, 0x65, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74,
|
||||||
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x50,
|
0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x87, 0x01, 0x0a, 0x20, 0x64,
|
||||||
0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74,
|
0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61,
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
0x6e, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
|
||||||
0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
|
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
|
||||||
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f,
|
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79,
|
||||||
0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72,
|
0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x44, 0x72, 0x6f, 0x70,
|
||||||
0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x07,
|
0x70, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61,
|
||||||
0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65,
|
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x50, 0x61, 0x72,
|
||||||
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
|
0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74,
|
||||||
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f,
|
0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18,
|
||||||
0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72,
|
0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
|
||||||
0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x52, 0x06, 0x73,
|
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79,
|
||||||
0x68, 0x61, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f,
|
0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72, 0x61, 0x6e,
|
||||||
0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73,
|
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x07, 0x63, 0x6f,
|
||||||
0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
|
0x6d, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65, 0x73, 0x18,
|
||||||
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62,
|
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
|
||||||
0x2e, 0x45, 0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52,
|
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79,
|
||||||
0x15, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63,
|
0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72, 0x61, 0x6e,
|
||||||
0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xab, 0x02, 0x0a, 0x17, 0x43, 0x65, 0x72, 0x65, 0x6d,
|
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x52, 0x06, 0x73, 0x68, 0x61,
|
||||||
0x6f, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61,
|
0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x6e,
|
||||||
0x74, 0x65, 0x12, 0x4f, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20,
|
0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06,
|
||||||
0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
|
|
||||||
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70,
|
|
||||||
0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63,
|
|
||||||
0x72, 0x69, 0x70, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d,
|
|
||||||
0x69, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74,
|
|
||||||
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
|
||||||
0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
|
|
||||||
0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65,
|
|
||||||
0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
|
||||||
0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72,
|
|
||||||
0x69, 0x70, 0x74, 0x12, 0x5f, 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x6e,
|
|
||||||
0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x03,
|
|
||||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
|
0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
|
||||||
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45,
|
0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45,
|
||||||
0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x15, 0x6e,
|
0x64, 0x34, 0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x15, 0x6e,
|
||||||
0x65, 0x78, 0x74, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
|
0x65, 0x78, 0x74, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70,
|
||||||
0x61, 0x6e, 0x74, 0x73, 0x22, 0x62, 0x0a, 0x18, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79,
|
0x61, 0x6e, 0x74, 0x73, 0x22, 0xab, 0x02, 0x0a, 0x17, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e,
|
||||||
0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65,
|
0x79, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65,
|
||||||
0x12, 0x46, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20,
|
0x12, 0x4f, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||||
0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d,
|
0x0b, 0x32, 0x35, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
|
||||||
0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70,
|
0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e,
|
||||||
0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x08,
|
0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69,
|
||||||
0x70, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x43, 0x65, 0x72,
|
0x70, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||||
0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65,
|
0x73, 0x12, 0x5e, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x61,
|
||||||
0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72,
|
0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
|
||||||
0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18,
|
|
||||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72,
|
|
||||||
0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
|
|
||||||
0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
|
|
||||||
0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
|
|
||||||
0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x76,
|
|
||||||
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x76, 0x65,
|
|
||||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
|
|
||||||
0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
|
|
||||||
0x75, 0x72, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65,
|
|
||||||
0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
|
|
||||||
0x65, 0x79, 0x22, 0xe0, 0x02, 0x0a, 0x16, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x43,
|
|
||||||
0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2a, 0x0a,
|
|
||||||
0x11, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62,
|
|
||||||
0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6d, 0x46, 0x72,
|
|
||||||
0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x6f, 0x5f,
|
|
||||||
0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
|
|
||||||
0x28, 0x04, 0x52, 0x0d, 0x74, 0x6f, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65,
|
|
||||||
0x72, 0x12, 0x5a, 0x0a, 0x16, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63,
|
|
||||||
0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
|
|
||||||
0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
|
|
||||||
0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f,
|
|
||||||
0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x14, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74,
|
|
||||||
0x65, 0x64, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a,
|
|
||||||
0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
|
|
||||||
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
|
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
|
||||||
0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x63, 0x6c,
|
0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65,
|
||||||
0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x4d, 0x61, 0x70, 0x52, 0x06,
|
0x6d, 0x6f, 0x6e, 0x79, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x52, 0x11,
|
||||||
0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x4d, 0x0a, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e,
|
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70,
|
||||||
0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
|
0x74, 0x12, 0x5f, 0x0a, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f,
|
||||||
|
0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03,
|
||||||
|
0x28, 0x0b, 0x32, 0x27, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
|
||||||
|
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x64, 0x34,
|
||||||
|
0x34, 0x38, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x15, 0x6e, 0x65, 0x78,
|
||||||
|
0x74, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e,
|
||||||
|
0x74, 0x73, 0x22, 0x62, 0x0a, 0x18, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x50, 0x65,
|
||||||
|
0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x12, 0x46,
|
||||||
|
0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||||
|
0x0b, 0x32, 0x29, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e,
|
||||||
|
0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e,
|
||||||
|
0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x08, 0x70, 0x65,
|
||||||
|
0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x65, 0x6d,
|
||||||
|
0x6f, 0x6e, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f,
|
||||||
|
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64,
|
||||||
|
0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x1b,
|
||||||
|
0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||||
|
0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74,
|
||||||
|
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09,
|
||||||
|
0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
|
||||||
|
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
|
||||||
|
0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
|
||||||
|
0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
|
||||||
|
0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
||||||
|
0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
|
||||||
|
0x22, 0xe0, 0x02, 0x0a, 0x16, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x43, 0x6f, 0x6d,
|
||||||
|
0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2a, 0x0a, 0x11, 0x66,
|
||||||
|
0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
|
||||||
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6d, 0x46, 0x72, 0x61, 0x6d,
|
||||||
|
0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x6f, 0x5f, 0x66, 0x72,
|
||||||
|
0x61, 0x6d, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||||
|
0x52, 0x0d, 0x74, 0x6f, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
|
||||||
|
0x5a, 0x0a, 0x16, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x6f,
|
||||||
|
0x63, 0x6b, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||||
|
0x24, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64,
|
||||||
|
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b,
|
||||||
|
0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x14, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64,
|
||||||
|
0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x06, 0x70,
|
||||||
|
0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x71, 0x75,
|
||||||
|
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65,
|
||||||
|
0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73,
|
||||||
|
0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x4d, 0x61, 0x70, 0x52, 0x06, 0x70, 0x72,
|
||||||
|
0x6f, 0x6f, 0x66, 0x73, 0x12, 0x4d, 0x0a, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73,
|
||||||
|
0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72,
|
||||||
|
0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e,
|
||||||
|
0x79, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65,
|
||||||
|
0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x4d, 0x61, 0x70, 0x52, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65,
|
||||||
|
0x6e, 0x74, 0x73, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f,
|
||||||
|
0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x4d, 0x61, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72,
|
||||||
|
0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
|
||||||
|
0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a,
|
||||||
|
0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72,
|
||||||
|
0x6f, 0x6f, 0x66, 0x12, 0x56, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e,
|
||||||
|
0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
|
||||||
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d,
|
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d,
|
||||||
0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e,
|
0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e,
|
||||||
0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x4d, 0x61, 0x70, 0x52, 0x08, 0x73, 0x65, 0x67,
|
0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x4d, 0x61, 0x70, 0x52, 0x0b,
|
||||||
0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73,
|
0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x3e, 0x0a, 0x14, 0x49,
|
||||||
0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x4d, 0x61, 0x70, 0x12, 0x21, 0x0a, 0x0c,
|
0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73,
|
||||||
0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01,
|
0x4d, 0x61, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||||
0x28, 0x0c, 0x52, 0x0b, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12,
|
0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18,
|
||||||
0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05,
|
0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x7b, 0x0a, 0x17, 0x49,
|
||||||
0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x56, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d,
|
0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65,
|
||||||
0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x71, 0x75, 0x69,
|
0x6e, 0x74, 0x73, 0x4d, 0x61, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||||
0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72,
|
0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x6d,
|
||||||
0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69,
|
0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75,
|
||||||
0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x4d, 0x61, 0x70,
|
0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72,
|
||||||
0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x3e, 0x0a,
|
0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73,
|
||||||
0x14, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e,
|
0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x65, 0x67, 0x6d, 0x65,
|
||||||
0x74, 0x73, 0x4d, 0x61, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20,
|
0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x32, 0x89, 0x02, 0x0a, 0x0f, 0x43, 0x65, 0x72,
|
||||||
0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74,
|
0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x17,
|
||||||
0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x7b, 0x0a,
|
0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e,
|
||||||
0x17, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
|
||||||
0x6d, 0x65, 0x6e, 0x74, 0x73, 0x4d, 0x61, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d,
|
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e,
|
||||||
0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f,
|
0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65,
|
||||||
0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65,
|
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
|
||||||
0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65,
|
0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79,
|
||||||
0x55, 0x72, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x68,
|
0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x43, 0x6f, 0x6d, 0x70,
|
||||||
0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x65, 0x67,
|
0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x30, 0x01, 0x12, 0x76, 0x0a, 0x10,
|
||||||
0x6d, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x32, 0x89, 0x02, 0x0a, 0x0f, 0x43,
|
0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
|
||||||
0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e,
|
0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f,
|
||||||
0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53,
|
0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32,
|
||||||
0x79, 0x6e, 0x63, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x71, 0x75, 0x69, 0x6c,
|
0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65,
|
||||||
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x63,
|
0x1a, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f,
|
||||||
0x6b, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73,
|
0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x32,
|
||||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62,
|
0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65,
|
||||||
0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x6d, 0x6f,
|
0x28, 0x01, 0x30, 0x01, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71,
|
||||||
0x6e, 0x79, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x65, 0x6d, 0x6f, 0x6e, 0x79, 0x43, 0x6f,
|
0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75,
|
||||||
0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x30, 0x01, 0x12, 0x76,
|
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70,
|
||||||
0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x43, 0x68, 0x61, 0x6e, 0x6e,
|
0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73,
|
||||||
0x65, 0x6c, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
|
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e,
|
|
||||||
0x50, 0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f,
|
|
||||||
0x70, 0x65, 0x1a, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e,
|
|
||||||
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x62, 0x2e,
|
|
||||||
0x50, 0x32, 0x50, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f,
|
|
||||||
0x70, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
|
||||||
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
|
||||||
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72,
|
|
||||||
0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
|
||||||
0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -1822,12 +1823,12 @@ var file_ceremony_proto_depIdxs = []int32{
|
|||||||
22, // 23: quilibrium.node.ceremony.pb.CeremonyLobbyJoin.public_key_signature_ed448:type_name -> quilibrium.node.keys.pb.Ed448Signature
|
22, // 23: quilibrium.node.ceremony.pb.CeremonyLobbyJoin.public_key_signature_ed448:type_name -> quilibrium.node.keys.pb.Ed448Signature
|
||||||
7, // 24: quilibrium.node.ceremony.pb.CeremonyOpenState.joined_participants:type_name -> quilibrium.node.ceremony.pb.CeremonyLobbyJoin
|
7, // 24: quilibrium.node.ceremony.pb.CeremonyOpenState.joined_participants:type_name -> quilibrium.node.ceremony.pb.CeremonyLobbyJoin
|
||||||
21, // 25: quilibrium.node.ceremony.pb.CeremonyOpenState.preferred_participants:type_name -> quilibrium.node.keys.pb.Ed448PublicKey
|
21, // 25: quilibrium.node.ceremony.pb.CeremonyOpenState.preferred_participants:type_name -> quilibrium.node.keys.pb.Ed448PublicKey
|
||||||
21, // 26: quilibrium.node.ceremony.pb.CeremonyInProgressState.active_participants:type_name -> quilibrium.node.keys.pb.Ed448PublicKey
|
7, // 26: quilibrium.node.ceremony.pb.CeremonyInProgressState.active_participants:type_name -> quilibrium.node.ceremony.pb.CeremonyLobbyJoin
|
||||||
2, // 27: quilibrium.node.ceremony.pb.CeremonyInProgressState.latest_seen_prover_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonySeenProverAttestation
|
2, // 27: quilibrium.node.ceremony.pb.CeremonyInProgressState.latest_seen_prover_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonySeenProverAttestation
|
||||||
3, // 28: quilibrium.node.ceremony.pb.CeremonyInProgressState.dropped_participant_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonyDroppedProverAttestation
|
3, // 28: quilibrium.node.ceremony.pb.CeremonyInProgressState.dropped_participant_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonyDroppedProverAttestation
|
||||||
6, // 29: quilibrium.node.ceremony.pb.CeremonyInProgressState.transcript_round_advance_commits:type_name -> quilibrium.node.ceremony.pb.CeremonyAdvanceRound
|
6, // 29: quilibrium.node.ceremony.pb.CeremonyInProgressState.transcript_round_advance_commits:type_name -> quilibrium.node.ceremony.pb.CeremonyAdvanceRound
|
||||||
21, // 30: quilibrium.node.ceremony.pb.CeremonyInProgressState.next_round_participants:type_name -> quilibrium.node.keys.pb.Ed448PublicKey
|
21, // 30: quilibrium.node.ceremony.pb.CeremonyInProgressState.next_round_participants:type_name -> quilibrium.node.keys.pb.Ed448PublicKey
|
||||||
21, // 31: quilibrium.node.ceremony.pb.CeremonyFinalizingState.active_participants:type_name -> quilibrium.node.keys.pb.Ed448PublicKey
|
7, // 31: quilibrium.node.ceremony.pb.CeremonyFinalizingState.active_participants:type_name -> quilibrium.node.ceremony.pb.CeremonyLobbyJoin
|
||||||
2, // 32: quilibrium.node.ceremony.pb.CeremonyFinalizingState.latest_seen_prover_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonySeenProverAttestation
|
2, // 32: quilibrium.node.ceremony.pb.CeremonyFinalizingState.latest_seen_prover_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonySeenProverAttestation
|
||||||
3, // 33: quilibrium.node.ceremony.pb.CeremonyFinalizingState.dropped_participant_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonyDroppedProverAttestation
|
3, // 33: quilibrium.node.ceremony.pb.CeremonyFinalizingState.dropped_participant_attestations:type_name -> quilibrium.node.ceremony.pb.CeremonyDroppedProverAttestation
|
||||||
5, // 34: quilibrium.node.ceremony.pb.CeremonyFinalizingState.commits:type_name -> quilibrium.node.ceremony.pb.CeremonyTranscriptCommit
|
5, // 34: quilibrium.node.ceremony.pb.CeremonyFinalizingState.commits:type_name -> quilibrium.node.ceremony.pb.CeremonyTranscriptCommit
|
||||||
|
@ -105,7 +105,7 @@ message CeremonyOpenState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message CeremonyInProgressState {
|
message CeremonyInProgressState {
|
||||||
repeated quilibrium.node.keys.pb.Ed448PublicKey active_participants = 1;
|
repeated CeremonyLobbyJoin active_participants = 1;
|
||||||
repeated CeremonySeenProverAttestation latest_seen_prover_attestations = 2;
|
repeated CeremonySeenProverAttestation latest_seen_prover_attestations = 2;
|
||||||
repeated CeremonyDroppedProverAttestation dropped_participant_attestations = 3;
|
repeated CeremonyDroppedProverAttestation dropped_participant_attestations = 3;
|
||||||
repeated CeremonyAdvanceRound transcript_round_advance_commits = 4;
|
repeated CeremonyAdvanceRound transcript_round_advance_commits = 4;
|
||||||
@ -113,7 +113,7 @@ message CeremonyInProgressState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message CeremonyFinalizingState {
|
message CeremonyFinalizingState {
|
||||||
repeated quilibrium.node.keys.pb.Ed448PublicKey active_participants = 1;
|
repeated CeremonyLobbyJoin active_participants = 1;
|
||||||
repeated CeremonySeenProverAttestation latest_seen_prover_attestations = 2;
|
repeated CeremonySeenProverAttestation latest_seen_prover_attestations = 2;
|
||||||
repeated CeremonyDroppedProverAttestation dropped_participant_attestations = 3;
|
repeated CeremonyDroppedProverAttestation dropped_participant_attestations = 3;
|
||||||
repeated CeremonyTranscriptCommit commits = 4;
|
repeated CeremonyTranscriptCommit commits = 4;
|
||||||
|
@ -121,7 +121,9 @@ func (frame *ClockFrame) VerifyMasterClockFrame() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (frame *ClockFrame) GetParentSelectorAndDistance() (
|
func (frame *ClockFrame) GetParentSelectorAndDistance(
|
||||||
|
discriminator *big.Int,
|
||||||
|
) (
|
||||||
*big.Int,
|
*big.Int,
|
||||||
*big.Int,
|
*big.Int,
|
||||||
*big.Int,
|
*big.Int,
|
||||||
@ -141,27 +143,20 @@ func (frame *ClockFrame) GetParentSelectorAndDistance() (
|
|||||||
|
|
||||||
parentSelector := new(big.Int).SetBytes(frame.ParentSelector)
|
parentSelector := new(big.Int).SetBytes(frame.ParentSelector)
|
||||||
|
|
||||||
var pubkey []byte
|
var distance *big.Int
|
||||||
ed448PublicKey := frame.GetPublicKeySignatureEd448()
|
if discriminator != nil {
|
||||||
if ed448PublicKey != nil {
|
l := new(big.Int).Mod(
|
||||||
pubkey = ed448PublicKey.PublicKey.KeyValue
|
new(big.Int).Sub(selector, discriminator),
|
||||||
} else {
|
ff.Modulus(),
|
||||||
return nil, nil, nil, errors.Wrap(
|
|
||||||
errors.New("no valid signature provided"),
|
|
||||||
"get parent selector and distance",
|
|
||||||
)
|
)
|
||||||
}
|
r := new(big.Int).Mod(
|
||||||
|
new(big.Int).Sub(discriminator, selector),
|
||||||
discriminator, err := poseidon.HashBytes(pubkey)
|
ff.Modulus(),
|
||||||
if err != nil {
|
)
|
||||||
return nil, nil, nil, errors.Wrap(err, "get parent selector and distance")
|
distance = r
|
||||||
}
|
if l.Cmp(r) == 1 {
|
||||||
|
distance = l
|
||||||
l := new(big.Int).Mod(new(big.Int).Sub(selector, discriminator), ff.Modulus())
|
}
|
||||||
r := new(big.Int).Mod(new(big.Int).Sub(discriminator, selector), ff.Modulus())
|
|
||||||
distance := r
|
|
||||||
if l.Cmp(r) == -1 {
|
|
||||||
distance = l
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return parentSelector, distance, selector, nil
|
return parentSelector, distance, selector, nil
|
||||||
|
1052
node/retroactive_peers.json
Normal file
1052
node/retroactive_peers.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -97,29 +97,29 @@ type ClockStore interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PebbleClockStore struct {
|
type PebbleClockStore struct {
|
||||||
db *pebble.DB
|
db KVDB
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ClockStore = (*PebbleClockStore)(nil)
|
var _ ClockStore = (*PebbleClockStore)(nil)
|
||||||
|
|
||||||
type PebbleMasterClockIterator struct {
|
type PebbleMasterClockIterator struct {
|
||||||
i *pebble.Iterator
|
i Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
type PebbleClockIterator struct {
|
type PebbleClockIterator struct {
|
||||||
i *pebble.Iterator
|
i Iterator
|
||||||
db *PebbleClockStore
|
db *PebbleClockStore
|
||||||
}
|
}
|
||||||
|
|
||||||
type PebbleCandidateClockIterator struct {
|
type PebbleCandidateClockIterator struct {
|
||||||
i *pebble.Iterator
|
i Iterator
|
||||||
db *PebbleClockStore
|
db *PebbleClockStore
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Iterator[*protobufs.ClockFrame] = (*PebbleMasterClockIterator)(nil)
|
var _ TypedIterator[*protobufs.ClockFrame] = (*PebbleMasterClockIterator)(nil)
|
||||||
var _ Iterator[*protobufs.ClockFrame] = (*PebbleClockIterator)(nil)
|
var _ TypedIterator[*protobufs.ClockFrame] = (*PebbleClockIterator)(nil)
|
||||||
var _ Iterator[*protobufs.ClockFrame] = (*PebbleCandidateClockIterator)(nil)
|
var _ TypedIterator[*protobufs.ClockFrame] = (*PebbleCandidateClockIterator)(nil)
|
||||||
|
|
||||||
func (p *PebbleMasterClockIterator) First() bool {
|
func (p *PebbleMasterClockIterator) First() bool {
|
||||||
return p.i.First()
|
return p.i.First()
|
||||||
@ -173,7 +173,7 @@ func (p *PebbleMasterClockIterator) Value() (*protobufs.ClockFrame, error) {
|
|||||||
return nil, errors.Wrap(err, "get master clock frame iterator value")
|
return nil, errors.Wrap(err, "get master clock frame iterator value")
|
||||||
}
|
}
|
||||||
|
|
||||||
frame.ParentSelector = parent.Bytes()
|
frame.ParentSelector = parent.FillBytes(make([]byte, 32))
|
||||||
|
|
||||||
return frame, nil
|
return frame, nil
|
||||||
}
|
}
|
||||||
@ -306,7 +306,7 @@ func (p *PebbleCandidateClockIterator) Close() error {
|
|||||||
return errors.Wrap(p.i.Close(), "closing candidate clock frame iterator")
|
return errors.Wrap(p.i.Close(), "closing candidate clock frame iterator")
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPebbleClockStore(db *pebble.DB, logger *zap.Logger) *PebbleClockStore {
|
func NewPebbleClockStore(db KVDB, logger *zap.Logger) *PebbleClockStore {
|
||||||
return &PebbleClockStore{
|
return &PebbleClockStore{
|
||||||
db,
|
db,
|
||||||
logger,
|
logger,
|
||||||
@ -446,9 +446,7 @@ func clockProverTrieKey(filter []byte, frameNumber uint64) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PebbleClockStore) NewTransaction() (Transaction, error) {
|
func (p *PebbleClockStore) NewTransaction() (Transaction, error) {
|
||||||
return &PebbleTransaction{
|
return p.db.NewBatch(), nil
|
||||||
b: p.db.NewBatch(),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEarliestMasterClockFrame implements ClockStore.
|
// GetEarliestMasterClockFrame implements ClockStore.
|
||||||
@ -530,7 +528,7 @@ func (p *PebbleClockStore) GetMasterClockFrame(
|
|||||||
return nil, errors.Wrap(err, "get master clock frame")
|
return nil, errors.Wrap(err, "get master clock frame")
|
||||||
}
|
}
|
||||||
|
|
||||||
frame.ParentSelector = parent.Bytes()
|
frame.ParentSelector = parent.FillBytes(make([]byte, 32))
|
||||||
|
|
||||||
return frame, nil
|
return frame, nil
|
||||||
}
|
}
|
||||||
@ -547,10 +545,10 @@ func (p *PebbleClockStore) RangeMasterClockFrames(
|
|||||||
startFrameNumber = temp
|
startFrameNumber = temp
|
||||||
}
|
}
|
||||||
|
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: clockMasterFrameKey(filter, startFrameNumber),
|
clockMasterFrameKey(filter, startFrameNumber),
|
||||||
UpperBound: clockMasterFrameKey(filter, endFrameNumber),
|
clockMasterFrameKey(filter, endFrameNumber),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "range master clock frames")
|
return nil, errors.Wrap(err, "range master clock frames")
|
||||||
}
|
}
|
||||||
@ -863,7 +861,7 @@ func (p *PebbleClockStore) PutCandidateDataClockFrame(
|
|||||||
frame *protobufs.ClockFrame,
|
frame *protobufs.ClockFrame,
|
||||||
txn Transaction,
|
txn Transaction,
|
||||||
) error {
|
) error {
|
||||||
if err := p.saveAggregateProofs(nil, frame); err != nil {
|
if err := p.saveAggregateProofs(txn, frame); err != nil {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
errors.Wrap(err, ErrInvalidData.Error()),
|
errors.Wrap(err, ErrInvalidData.Error()),
|
||||||
"put candidate data clock frame",
|
"put candidate data clock frame",
|
||||||
@ -920,7 +918,7 @@ func (p *PebbleClockStore) PutDataClockFrame(
|
|||||||
backfill bool,
|
backfill bool,
|
||||||
) error {
|
) error {
|
||||||
if frame.FrameNumber != 0 {
|
if frame.FrameNumber != 0 {
|
||||||
if err := p.saveAggregateProofs(nil, frame); err != nil {
|
if err := p.saveAggregateProofs(txn, frame); err != nil {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
errors.Wrap(err, ErrInvalidData.Error()),
|
errors.Wrap(err, ErrInvalidData.Error()),
|
||||||
"put candidate data clock frame",
|
"put candidate data clock frame",
|
||||||
@ -1004,8 +1002,8 @@ func (p *PebbleClockStore) GetCandidateDataClockFrames(
|
|||||||
filter []byte,
|
filter []byte,
|
||||||
frameNumber uint64,
|
frameNumber uint64,
|
||||||
) ([]*protobufs.ClockFrame, error) {
|
) ([]*protobufs.ClockFrame, error) {
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: clockDataCandidateFrameKey(
|
clockDataCandidateFrameKey(
|
||||||
filter,
|
filter,
|
||||||
frameNumber,
|
frameNumber,
|
||||||
[]byte{
|
[]byte{
|
||||||
@ -1021,7 +1019,7 @@ func (p *PebbleClockStore) GetCandidateDataClockFrames(
|
|||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
UpperBound: clockDataCandidateFrameKey(
|
clockDataCandidateFrameKey(
|
||||||
filter,
|
filter,
|
||||||
frameNumber,
|
frameNumber,
|
||||||
[]byte{
|
[]byte{
|
||||||
@ -1037,7 +1035,7 @@ func (p *PebbleClockStore) GetCandidateDataClockFrames(
|
|||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get candidate data clock frames")
|
return nil, errors.Wrap(err, "get candidate data clock frames")
|
||||||
}
|
}
|
||||||
@ -1084,8 +1082,8 @@ func (p *PebbleClockStore) RangeCandidateDataClockFrames(
|
|||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: clockDataCandidateFrameKey(
|
clockDataCandidateFrameKey(
|
||||||
filter,
|
filter,
|
||||||
frameNumber,
|
frameNumber,
|
||||||
fromParent,
|
fromParent,
|
||||||
@ -1096,7 +1094,7 @@ func (p *PebbleClockStore) RangeCandidateDataClockFrames(
|
|||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
UpperBound: clockDataCandidateFrameKey(
|
clockDataCandidateFrameKey(
|
||||||
filter,
|
filter,
|
||||||
frameNumber,
|
frameNumber,
|
||||||
toParent,
|
toParent,
|
||||||
@ -1107,7 +1105,7 @@ func (p *PebbleClockStore) RangeCandidateDataClockFrames(
|
|||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "range candidate data clock frames")
|
return nil, errors.Wrap(err, "range candidate data clock frames")
|
||||||
}
|
}
|
||||||
@ -1127,10 +1125,10 @@ func (p *PebbleClockStore) RangeDataClockFrames(
|
|||||||
startFrameNumber = temp
|
startFrameNumber = temp
|
||||||
}
|
}
|
||||||
|
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: clockDataFrameKey(filter, startFrameNumber),
|
clockDataFrameKey(filter, startFrameNumber),
|
||||||
UpperBound: clockDataFrameKey(filter, endFrameNumber),
|
clockDataFrameKey(filter, endFrameNumber),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get data clock frames")
|
return nil, errors.Wrap(err, "get data clock frames")
|
||||||
}
|
}
|
||||||
@ -1161,10 +1159,7 @@ func (p *PebbleClockStore) Deduplicate(filter []byte) error {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(from, to)
|
||||||
LowerBound: from,
|
|
||||||
UpperBound: to,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "deduplicate")
|
return errors.Wrap(err, "deduplicate")
|
||||||
}
|
}
|
||||||
@ -1187,7 +1182,7 @@ func (p *PebbleClockStore) Deduplicate(filter []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = p.db.Set(iter.Key(), newValue, &pebble.WriteOptions{Sync: true})
|
err = p.db.Set(iter.Key(), newValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1205,10 +1200,7 @@ func (p *PebbleClockStore) Deduplicate(filter []byte) error {
|
|||||||
from = clockDataFrameKey(filter, 1)
|
from = clockDataFrameKey(filter, 1)
|
||||||
to = clockDataFrameKey(filter, 20000)
|
to = clockDataFrameKey(filter, 20000)
|
||||||
|
|
||||||
iter, err = p.db.NewIter(&pebble.IterOptions{
|
iter, err = p.db.NewIter(from, to)
|
||||||
LowerBound: from,
|
|
||||||
UpperBound: to,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "deduplicate")
|
return errors.Wrap(err, "deduplicate")
|
||||||
}
|
}
|
||||||
@ -1231,7 +1223,7 @@ func (p *PebbleClockStore) Deduplicate(filter []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = p.db.Set(iter.Key(), newValue, &pebble.WriteOptions{Sync: true})
|
err = p.db.Set(iter.Key(), newValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1279,10 +1271,7 @@ func (p *PebbleClockStore) Deduplicate(filter []byte) error {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
iter, err = p.db.NewIter(&pebble.IterOptions{
|
iter, err = p.db.NewIter(from, to)
|
||||||
LowerBound: from,
|
|
||||||
UpperBound: to,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "deduplicate")
|
return errors.Wrap(err, "deduplicate")
|
||||||
}
|
}
|
||||||
@ -1305,7 +1294,7 @@ func (p *PebbleClockStore) Deduplicate(filter []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = p.db.Set(iter.Key(), newValue, &pebble.WriteOptions{Sync: true})
|
err = p.db.Set(iter.Key(), newValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1334,10 +1323,7 @@ func (p *PebbleClockStore) GetCompressedDataClockFrames(
|
|||||||
from := clockDataFrameKey(filter, fromFrameNumber)
|
from := clockDataFrameKey(filter, fromFrameNumber)
|
||||||
to := clockDataFrameKey(filter, toFrameNumber+1)
|
to := clockDataFrameKey(filter, toFrameNumber+1)
|
||||||
|
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(from, to)
|
||||||
LowerBound: from,
|
|
||||||
UpperBound: to,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get compressed data clock frames")
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
}
|
}
|
||||||
@ -1418,10 +1404,7 @@ func (p *PebbleClockStore) GetCompressedDataClockFrames(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(from, to)
|
||||||
LowerBound: from,
|
|
||||||
UpperBound: to,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get compressed data clock frames")
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
}
|
}
|
||||||
@ -1458,7 +1441,7 @@ func (p *PebbleClockStore) GetCompressedDataClockFrames(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get compressed data clock frames")
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
}
|
}
|
||||||
parentSelector, _, _, err := frame.GetParentSelectorAndDistance()
|
parentSelector, _, _, err := frame.GetParentSelectorAndDistance(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get compressed data clock frames")
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
}
|
}
|
||||||
@ -1480,8 +1463,28 @@ func (p *PebbleClockStore) GetCompressedDataClockFrames(
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
score := new(big.Int)
|
score := new(big.Int)
|
||||||
for _, p := range paths[i] {
|
for _, path := range paths[i] {
|
||||||
_, distance, _, err := p.GetParentSelectorAndDistance()
|
master, err := p.GetMasterClockFrame(
|
||||||
|
[]byte{
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
|
},
|
||||||
|
path.FrameNumber,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
|
}
|
||||||
|
|
||||||
|
discriminator, err := master.GetSelector()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, distance, _, err := path.GetParentSelectorAndDistance(
|
||||||
|
discriminator,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get compressed data clock frames")
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
}
|
}
|
||||||
@ -1535,10 +1538,13 @@ func (p *PebbleClockStore) GetCompressedDataClockFrames(
|
|||||||
return nil, errors.Wrap(err, "get compressed data clock frames")
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
}
|
}
|
||||||
|
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: dataProofInclusionKey(filter, []byte(k), 0),
|
dataProofInclusionKey(filter, []byte(k), 0),
|
||||||
UpperBound: dataProofInclusionKey(filter, []byte(k), limit+1),
|
dataProofInclusionKey(filter, []byte(k), limit+1),
|
||||||
})
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "get compressed data clock frames")
|
||||||
|
}
|
||||||
|
|
||||||
for iter.First(); iter.Valid(); iter.Next() {
|
for iter.First(); iter.Valid(); iter.Next() {
|
||||||
incCommit := iter.Value()
|
incCommit := iter.Value()
|
||||||
@ -1632,9 +1638,6 @@ func (p *PebbleClockStore) SetLatestDataClockFrameNumber(
|
|||||||
err := p.db.Set(
|
err := p.db.Set(
|
||||||
clockDataLatestIndex(filter),
|
clockDataLatestIndex(filter),
|
||||||
binary.BigEndian.AppendUint64(nil, frameNumber),
|
binary.BigEndian.AppendUint64(nil, frameNumber),
|
||||||
&pebble.WriteOptions{
|
|
||||||
Sync: true,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return errors.Wrap(err, "set latest data clock frame number")
|
return errors.Wrap(err, "set latest data clock frame number")
|
||||||
@ -1678,9 +1681,6 @@ func (p *PebbleClockStore) DeleteCandidateDataClockFrameRange(
|
|||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
&pebble.WriteOptions{
|
|
||||||
Sync: true,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
return errors.Wrap(err, "delete candidate data clock frame range")
|
return errors.Wrap(err, "delete candidate data clock frame range")
|
||||||
}
|
}
|
||||||
@ -1727,10 +1727,13 @@ func (p *PebbleClockStore) GetHighestCandidateDataClockFrame(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(from, to)
|
||||||
LowerBound: from,
|
if err != nil {
|
||||||
UpperBound: to,
|
return nil, errors.Wrap(
|
||||||
})
|
errors.Wrap(err, ErrInvalidData.Error()),
|
||||||
|
"get highest candidate data clock frame",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
found := iter.SeekLT(to)
|
found := iter.SeekLT(to)
|
||||||
if found {
|
if found {
|
||||||
|
@ -30,12 +30,12 @@ type DataProofStore interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PebbleDataProofStore struct {
|
type PebbleDataProofStore struct {
|
||||||
db *pebble.DB
|
db KVDB
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPebbleDataProofStore(
|
func NewPebbleDataProofStore(
|
||||||
db *pebble.DB,
|
db KVDB,
|
||||||
logger *zap.Logger,
|
logger *zap.Logger,
|
||||||
) *PebbleDataProofStore {
|
) *PebbleDataProofStore {
|
||||||
return &PebbleDataProofStore{
|
return &PebbleDataProofStore{
|
||||||
@ -81,13 +81,11 @@ func dataProofSegmentKey(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PebbleDataProofStore) NewTransaction() (Transaction, error) {
|
func (p *PebbleDataProofStore) NewTransaction() (Transaction, error) {
|
||||||
return &PebbleTransaction{
|
return p.db.NewBatch(), nil
|
||||||
b: p.db.NewBatch(),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func internalGetAggregateProof(
|
func internalGetAggregateProof(
|
||||||
db *pebble.DB,
|
db KVDB,
|
||||||
filter []byte,
|
filter []byte,
|
||||||
commitment []byte,
|
commitment []byte,
|
||||||
frameNumber uint64,
|
frameNumber uint64,
|
||||||
@ -114,10 +112,10 @@ func internalGetAggregateProof(
|
|||||||
Proof: copied,
|
Proof: copied,
|
||||||
}
|
}
|
||||||
|
|
||||||
iter, err := db.NewIter(&pebble.IterOptions{
|
iter, err := db.NewIter(
|
||||||
LowerBound: dataProofInclusionKey(filter, commitment, 0),
|
dataProofInclusionKey(filter, commitment, 0),
|
||||||
UpperBound: dataProofInclusionKey(filter, commitment, limit+1),
|
dataProofInclusionKey(filter, commitment, limit+1),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get aggregate proof")
|
return nil, errors.Wrap(err, "get aggregate proof")
|
||||||
}
|
}
|
||||||
@ -206,7 +204,7 @@ func (p *PebbleDataProofStore) GetAggregateProof(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func internalPutAggregateProof(
|
func internalPutAggregateProof(
|
||||||
db *pebble.DB,
|
db KVDB,
|
||||||
txn Transaction,
|
txn Transaction,
|
||||||
aggregateProof *protobufs.InclusionAggregateProof,
|
aggregateProof *protobufs.InclusionAggregateProof,
|
||||||
commitment []byte,
|
commitment []byte,
|
||||||
|
349
node/store/inmem.go
Normal file
349
node/store/inmem.go
Normal file
@ -0,0 +1,349 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/pebble"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InMemKVDB struct {
|
||||||
|
open bool
|
||||||
|
sortedKeys []string
|
||||||
|
store map[string][]byte
|
||||||
|
storeMx sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type Operation int
|
||||||
|
|
||||||
|
const (
|
||||||
|
SetOperation Operation = iota
|
||||||
|
DeleteOperation
|
||||||
|
)
|
||||||
|
|
||||||
|
type InMemKVDBOperation struct {
|
||||||
|
op Operation
|
||||||
|
key []byte
|
||||||
|
value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type InMemKVDBTransaction struct {
|
||||||
|
id int
|
||||||
|
changes []InMemKVDBOperation
|
||||||
|
db *InMemKVDB
|
||||||
|
}
|
||||||
|
|
||||||
|
type InMemKVDBIterator struct {
|
||||||
|
db *InMemKVDB
|
||||||
|
start []byte
|
||||||
|
end []byte
|
||||||
|
pos int
|
||||||
|
open bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) Key() []byte {
|
||||||
|
if !i.open {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
if _, ok := i.db.store[i.db.sortedKeys[i.pos]]; !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return []byte(i.db.sortedKeys[i.pos])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) First() bool {
|
||||||
|
if !i.open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
found := false
|
||||||
|
idx := sort.SearchStrings(i.db.sortedKeys, string(i.start))
|
||||||
|
final := sort.SearchStrings(i.db.sortedKeys, string(i.end))
|
||||||
|
if idx < final {
|
||||||
|
i.pos = idx
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) Next() bool {
|
||||||
|
if !i.open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
found := false
|
||||||
|
if _, ok := i.db.store[i.db.sortedKeys[i.pos]]; ok {
|
||||||
|
final := sort.SearchStrings(i.db.sortedKeys, string(i.end))
|
||||||
|
if i.pos < final {
|
||||||
|
i.pos = i.pos + 1
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) Prev() bool {
|
||||||
|
if !i.open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
found := false
|
||||||
|
if _, ok := i.db.store[i.db.sortedKeys[i.pos]]; ok {
|
||||||
|
start := sort.SearchStrings(i.db.sortedKeys, string(i.start))
|
||||||
|
if i.pos-1 > start {
|
||||||
|
i.pos = i.pos - 1
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) Valid() bool {
|
||||||
|
if !i.open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
start := sort.SearchStrings(i.db.sortedKeys, string(i.start))
|
||||||
|
final := sort.SearchStrings(i.db.sortedKeys, string(i.end))
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return i.pos < final && i.pos >= start
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) Value() []byte {
|
||||||
|
if !i.open {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
value := i.db.store[i.db.sortedKeys[i.pos]]
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) Close() error {
|
||||||
|
if !i.open {
|
||||||
|
return errors.New("already closed iterator")
|
||||||
|
}
|
||||||
|
|
||||||
|
i.open = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) SeekLT(lt []byte) bool {
|
||||||
|
if !i.open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
found := false
|
||||||
|
if _, ok := i.db.store[i.db.sortedKeys[i.pos]]; ok {
|
||||||
|
idx := sort.SearchStrings(i.db.sortedKeys, string(lt))
|
||||||
|
start := sort.SearchStrings(i.db.sortedKeys, string(i.start))
|
||||||
|
if idx >= start {
|
||||||
|
i.pos = idx + 1
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *InMemKVDBTransaction) Set(key []byte, value []byte) error {
|
||||||
|
if !t.db.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.changes = append(t.changes, InMemKVDBOperation{
|
||||||
|
op: SetOperation,
|
||||||
|
key: key,
|
||||||
|
value: value,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *InMemKVDBTransaction) Commit() error {
|
||||||
|
if !t.db.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
loop:
|
||||||
|
for _, op := range t.changes {
|
||||||
|
switch op.op {
|
||||||
|
case SetOperation:
|
||||||
|
err = t.db.Set(op.key, op.value)
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
case DeleteOperation:
|
||||||
|
err = t.db.Delete(op.key)
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *InMemKVDBTransaction) Delete(key []byte) error {
|
||||||
|
if !t.db.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.changes = append(t.changes, InMemKVDBOperation{
|
||||||
|
op: DeleteOperation,
|
||||||
|
key: key,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *InMemKVDBTransaction) Abort() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInMemKVDB() *InMemKVDB {
|
||||||
|
return &InMemKVDB{
|
||||||
|
open: true,
|
||||||
|
store: map[string][]byte{},
|
||||||
|
sortedKeys: []string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) Get(key []byte) ([]byte, io.Closer, error) {
|
||||||
|
if !d.open {
|
||||||
|
return nil, nil, errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.storeMx.Lock()
|
||||||
|
b, ok := d.store[string(key)]
|
||||||
|
d.storeMx.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, pebble.ErrNotFound
|
||||||
|
}
|
||||||
|
return b, io.NopCloser(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) Set(key, value []byte) error {
|
||||||
|
if !d.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.storeMx.Lock()
|
||||||
|
_, ok := d.store[string(key)]
|
||||||
|
if !ok {
|
||||||
|
i := sort.SearchStrings(d.sortedKeys, string(key))
|
||||||
|
if len(d.sortedKeys) > i {
|
||||||
|
d.sortedKeys = append(d.sortedKeys[:i+1], d.sortedKeys[i:]...)
|
||||||
|
d.sortedKeys[i] = string(key)
|
||||||
|
} else {
|
||||||
|
d.sortedKeys = append(d.sortedKeys, string(key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.store[string(key)] = value
|
||||||
|
|
||||||
|
d.storeMx.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) Delete(key []byte) error {
|
||||||
|
if !d.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.storeMx.Lock()
|
||||||
|
_, ok := d.store[string(key)]
|
||||||
|
if ok {
|
||||||
|
i := sort.SearchStrings(d.sortedKeys, string(key))
|
||||||
|
if len(d.sortedKeys)-1 > i {
|
||||||
|
d.sortedKeys = append(d.sortedKeys[:i], d.sortedKeys[i+1:]...)
|
||||||
|
} else {
|
||||||
|
d.sortedKeys = d.sortedKeys[:i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(d.store, string(key))
|
||||||
|
d.storeMx.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) NewBatch() Transaction {
|
||||||
|
if !d.open {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
id := rand.Int()
|
||||||
|
return &InMemKVDBTransaction{
|
||||||
|
id: id,
|
||||||
|
db: d,
|
||||||
|
changes: []InMemKVDBOperation{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) NewIter(lowerBound []byte, upperBound []byte) (Iterator, error) {
|
||||||
|
if !d.open {
|
||||||
|
return nil, errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &InMemKVDBIterator{
|
||||||
|
open: true,
|
||||||
|
db: d,
|
||||||
|
start: lowerBound,
|
||||||
|
end: upperBound,
|
||||||
|
pos: -1,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) Compact(start, end []byte, parallelize bool) error {
|
||||||
|
if !d.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) Close() error {
|
||||||
|
if !d.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.open = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *InMemKVDB) DeleteRange(start, end []byte) error {
|
||||||
|
if !d.open {
|
||||||
|
return errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
iter, err := d.NewIter(start, end)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for iter.First(); iter.Valid(); iter.Next() {
|
||||||
|
err = d.Delete(iter.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ KVDB = (*InMemKVDB)(nil)
|
90
node/store/inmem_test.go
Normal file
90
node/store/inmem_test.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package store_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIter(t *testing.T) {
|
||||||
|
db := store.NewInMemKVDB()
|
||||||
|
db.Set([]byte{0x01}, []byte{0x01})
|
||||||
|
db.Set([]byte{0x02}, []byte{0x02})
|
||||||
|
db.Set([]byte{0x03}, []byte{0x03})
|
||||||
|
db.Set([]byte{0x04}, []byte{0x04})
|
||||||
|
db.Set([]byte{0x06}, []byte{0x06})
|
||||||
|
db.Set([]byte{0x07}, []byte{0x07})
|
||||||
|
db.Set([]byte{0x08}, []byte{0x08})
|
||||||
|
db.Set([]byte{0x010}, []byte{0x010})
|
||||||
|
db.Set([]byte{0x012}, []byte{0x012})
|
||||||
|
db.Set([]byte{0x014}, []byte{0x014})
|
||||||
|
iter, err := db.NewIter([]byte{0x01}, []byte{0x04})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, iter.First())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x01})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x01})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x02})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x02})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x03})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x03})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.False(t, iter.Valid())
|
||||||
|
assert.NoError(t, iter.Close())
|
||||||
|
|
||||||
|
iter, err = db.NewIter([]byte{0x06}, []byte{0x09})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, iter.First())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x06})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x06})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x07})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x07})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x08})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x08})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.False(t, iter.Valid())
|
||||||
|
|
||||||
|
iter, err = db.NewIter([]byte{0x05}, []byte{0x09})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, iter.First())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x06})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x06})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x07})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x07})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x08})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x08})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.False(t, iter.Valid())
|
||||||
|
|
||||||
|
iter, err = db.NewIter([]byte{0x010}, []byte{0x015})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, iter.First())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x10})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x10})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x12})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x12})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.True(t, iter.Valid())
|
||||||
|
assert.ElementsMatch(t, iter.Value(), []byte{0x14})
|
||||||
|
assert.ElementsMatch(t, iter.Key(), []byte{0x14})
|
||||||
|
assert.True(t, iter.Next())
|
||||||
|
assert.False(t, iter.Valid())
|
||||||
|
}
|
@ -2,7 +2,18 @@ package store
|
|||||||
|
|
||||||
import "google.golang.org/protobuf/proto"
|
import "google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
type Iterator[T proto.Message] interface {
|
type Iterator interface {
|
||||||
|
Key() []byte
|
||||||
|
First() bool
|
||||||
|
Next() bool
|
||||||
|
Prev() bool
|
||||||
|
Valid() bool
|
||||||
|
Value() []byte
|
||||||
|
Close() error
|
||||||
|
SeekLT([]byte) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type TypedIterator[T proto.Message] interface {
|
||||||
First() bool
|
First() bool
|
||||||
Next() bool
|
Next() bool
|
||||||
Valid() bool
|
Valid() bool
|
||||||
|
@ -37,28 +37,28 @@ type KeyStore interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PebbleKeyStore struct {
|
type PebbleKeyStore struct {
|
||||||
db *pebble.DB
|
db KVDB
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
type PebbleProvingKeyIterator struct {
|
type PebbleProvingKeyIterator struct {
|
||||||
i *pebble.Iterator
|
i Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
type PebbleStagedProvingKeyIterator struct {
|
type PebbleStagedProvingKeyIterator struct {
|
||||||
i *pebble.Iterator
|
i Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
type PebbleKeyBundleIterator struct {
|
type PebbleKeyBundleIterator struct {
|
||||||
i *pebble.Iterator
|
i Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
var pki = (*PebbleProvingKeyIterator)(nil)
|
var pki = (*PebbleProvingKeyIterator)(nil)
|
||||||
var spki = (*PebbleStagedProvingKeyIterator)(nil)
|
var spki = (*PebbleStagedProvingKeyIterator)(nil)
|
||||||
var kbi = (*PebbleKeyBundleIterator)(nil)
|
var kbi = (*PebbleKeyBundleIterator)(nil)
|
||||||
var _ Iterator[*protobufs.InclusionCommitment] = pki
|
var _ TypedIterator[*protobufs.InclusionCommitment] = pki
|
||||||
var _ Iterator[*protobufs.ProvingKeyAnnouncement] = spki
|
var _ TypedIterator[*protobufs.ProvingKeyAnnouncement] = spki
|
||||||
var _ Iterator[*protobufs.InclusionCommitment] = kbi
|
var _ TypedIterator[*protobufs.InclusionCommitment] = kbi
|
||||||
var _ KeyStore = (*PebbleKeyStore)(nil)
|
var _ KeyStore = (*PebbleKeyStore)(nil)
|
||||||
|
|
||||||
func (p *PebbleProvingKeyIterator) First() bool {
|
func (p *PebbleProvingKeyIterator) First() bool {
|
||||||
@ -169,7 +169,7 @@ func (p *PebbleKeyBundleIterator) Close() error {
|
|||||||
return errors.Wrap(p.i.Close(), "closing iterator")
|
return errors.Wrap(p.i.Close(), "closing iterator")
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPebbleKeyStore(db *pebble.DB, logger *zap.Logger) *PebbleKeyStore {
|
func NewPebbleKeyStore(db KVDB, logger *zap.Logger) *PebbleKeyStore {
|
||||||
return &PebbleKeyStore{
|
return &PebbleKeyStore{
|
||||||
db,
|
db,
|
||||||
logger,
|
logger,
|
||||||
@ -217,9 +217,7 @@ func keyBundleEarliestKey(provingKey []byte) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PebbleKeyStore) NewTransaction() (Transaction, error) {
|
func (p *PebbleKeyStore) NewTransaction() (Transaction, error) {
|
||||||
return &PebbleTransaction{
|
return p.db.NewBatch(), nil
|
||||||
b: p.db.NewBatch(),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stages a proving key for later inclusion on proof of meaningful work.
|
// Stages a proving key for later inclusion on proof of meaningful work.
|
||||||
@ -235,9 +233,6 @@ func (p *PebbleKeyStore) StageProvingKey(
|
|||||||
err = p.db.Set(
|
err = p.db.Set(
|
||||||
stagedProvingKeyKey(provingKey.PublicKey()),
|
stagedProvingKeyKey(provingKey.PublicKey()),
|
||||||
data,
|
data,
|
||||||
&pebble.WriteOptions{
|
|
||||||
Sync: true,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "stage proving key")
|
return errors.Wrap(err, "stage proving key")
|
||||||
@ -462,8 +457,8 @@ func (p *PebbleKeyStore) PutKeyBundle(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *PebbleKeyStore) RangeProvingKeys() (*PebbleProvingKeyIterator, error) {
|
func (p *PebbleKeyStore) RangeProvingKeys() (*PebbleProvingKeyIterator, error) {
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: provingKeyKey([]byte{
|
provingKeyKey([]byte{
|
||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
@ -473,7 +468,7 @@ func (p *PebbleKeyStore) RangeProvingKeys() (*PebbleProvingKeyIterator, error) {
|
|||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x00,
|
0x00,
|
||||||
}),
|
}),
|
||||||
UpperBound: provingKeyKey([]byte{
|
provingKeyKey([]byte{
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
@ -483,7 +478,7 @@ func (p *PebbleKeyStore) RangeProvingKeys() (*PebbleProvingKeyIterator, error) {
|
|||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff,
|
0xff,
|
||||||
}),
|
}),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "range proving keys")
|
return nil, errors.Wrap(err, "range proving keys")
|
||||||
}
|
}
|
||||||
@ -495,8 +490,8 @@ func (p *PebbleKeyStore) RangeStagedProvingKeys() (
|
|||||||
*PebbleStagedProvingKeyIterator,
|
*PebbleStagedProvingKeyIterator,
|
||||||
error,
|
error,
|
||||||
) {
|
) {
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: stagedProvingKeyKey([]byte{
|
stagedProvingKeyKey([]byte{
|
||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
@ -506,7 +501,7 @@ func (p *PebbleKeyStore) RangeStagedProvingKeys() (
|
|||||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x00,
|
0x00,
|
||||||
}),
|
}),
|
||||||
UpperBound: stagedProvingKeyKey([]byte{
|
stagedProvingKeyKey([]byte{
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
@ -516,7 +511,7 @@ func (p *PebbleKeyStore) RangeStagedProvingKeys() (
|
|||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff,
|
0xff,
|
||||||
}),
|
}),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "range staged proving keys")
|
return nil, errors.Wrap(err, "range staged proving keys")
|
||||||
}
|
}
|
||||||
@ -528,10 +523,10 @@ func (p *PebbleKeyStore) RangeKeyBundleKeys(provingKey []byte) (
|
|||||||
*PebbleKeyBundleIterator,
|
*PebbleKeyBundleIterator,
|
||||||
error,
|
error,
|
||||||
) {
|
) {
|
||||||
iter, err := p.db.NewIter(&pebble.IterOptions{
|
iter, err := p.db.NewIter(
|
||||||
LowerBound: keyBundleKey(provingKey, 0),
|
keyBundleKey(provingKey, 0),
|
||||||
UpperBound: keyBundleKey(provingKey, 0xffffffffffffffff),
|
keyBundleKey(provingKey, 0xffffffffffffffff),
|
||||||
})
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "range key bundle keys")
|
return nil, errors.Wrap(err, "range key bundle keys")
|
||||||
}
|
}
|
||||||
|
16
node/store/kvdb.go
Normal file
16
node/store/kvdb.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type KVDB interface {
|
||||||
|
Get(key []byte) ([]byte, io.Closer, error)
|
||||||
|
Set(key, value []byte) error
|
||||||
|
Delete(key []byte) error
|
||||||
|
NewBatch() Transaction
|
||||||
|
NewIter(lowerBound []byte, upperBound []byte) (Iterator, error)
|
||||||
|
Compact(start, end []byte, parallelize bool) error
|
||||||
|
Close() error
|
||||||
|
DeleteRange(start, end []byte) error
|
||||||
|
}
|
@ -1,19 +1,67 @@
|
|||||||
package store
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/cockroachdb/pebble"
|
"github.com/cockroachdb/pebble"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewPebbleDB(config *config.DBConfig) *pebble.DB {
|
type PebbleDB struct {
|
||||||
|
db *pebble.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPebbleDB(config *config.DBConfig) *PebbleDB {
|
||||||
db, err := pebble.Open(config.Path, &pebble.Options{})
|
db, err := pebble.Open(config.Path, &pebble.Options{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return db
|
return &PebbleDB{db}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) Get(key []byte) ([]byte, io.Closer, error) {
|
||||||
|
return p.db.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) Set(key, value []byte) error {
|
||||||
|
return p.db.Set(key, value, &pebble.WriteOptions{Sync: true})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) Delete(key []byte) error {
|
||||||
|
return p.db.Delete(key, &pebble.WriteOptions{Sync: true})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) NewBatch() Transaction {
|
||||||
|
return &PebbleTransaction{
|
||||||
|
b: p.db.NewBatch(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) NewIter(lowerBound []byte, upperBound []byte) (
|
||||||
|
Iterator,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
return p.db.NewIter(&pebble.IterOptions{
|
||||||
|
LowerBound: lowerBound,
|
||||||
|
UpperBound: upperBound,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) Compact(start, end []byte, parallelize bool) error {
|
||||||
|
return p.db.Compact(start, end, parallelize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) Close() error {
|
||||||
|
return p.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PebbleDB) DeleteRange(start, end []byte) error {
|
||||||
|
return p.db.DeleteRange(start, end, &pebble.WriteOptions{Sync: true})
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ KVDB = (*PebbleDB)(nil)
|
||||||
|
|
||||||
type Transaction interface {
|
type Transaction interface {
|
||||||
Set(key []byte, value []byte) error
|
Set(key []byte, value []byte) error
|
||||||
Commit() error
|
Commit() error
|
||||||
|
10
pebble/.editorconfig
Normal file
10
pebble/.editorconfig
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# See http://editorconfig.org
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
charset = utf-8
|
||||||
|
|
||||||
|
# For non-go files, we indent with two spaces. In go files we indent
|
||||||
|
# with tabs but still set indent_size to control the github web viewer.
|
||||||
|
indent_size=2
|
160
pebble/.github/workflows/ci.yaml
vendored
Normal file
160
pebble/.github/workflows/ci.yaml
vendored
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
name: Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- crl-release-*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- crl-release-*
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
linux:
|
||||||
|
name: go-linux
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: make test generate
|
||||||
|
|
||||||
|
linux-32bit:
|
||||||
|
name: go-linux-32bit
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: GOARCH=386 make test
|
||||||
|
|
||||||
|
linux-crossversion:
|
||||||
|
name: go-linux-crossversion
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: make crossversion-meta
|
||||||
|
|
||||||
|
linux-race:
|
||||||
|
name: go-linux-race
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: make testrace TAGS=
|
||||||
|
|
||||||
|
linux-no-invariants:
|
||||||
|
name: go-linux-no-invariants
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: make test TAGS=
|
||||||
|
|
||||||
|
linux-no-cgo:
|
||||||
|
name: go-linux-no-cgo
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: CGO_ENABLED=0 make test TAGS=
|
||||||
|
|
||||||
|
darwin:
|
||||||
|
name: go-macos
|
||||||
|
runs-on: macos-12
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: make test
|
||||||
|
|
||||||
|
windows:
|
||||||
|
name: go-windows
|
||||||
|
runs-on: windows-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: go test -v ./...
|
||||||
|
|
||||||
|
bsds:
|
||||||
|
name: go-bsds
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- name: FreeBSD build
|
||||||
|
env:
|
||||||
|
GOOS: freebsd
|
||||||
|
run: go build -v ./...
|
||||||
|
|
||||||
|
- name: NetBSD build
|
||||||
|
env:
|
||||||
|
GOOS: netbsd
|
||||||
|
run: go build -v ./...
|
||||||
|
|
||||||
|
- name: OpenBSD build
|
||||||
|
env:
|
||||||
|
GOOS: openbsd
|
||||||
|
run: go build -v ./...
|
||||||
|
|
||||||
|
go-lint-checks:
|
||||||
|
name: go-lint-checks
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- name: mod-tidy-check
|
||||||
|
run: make mod-tidy-check
|
||||||
|
|
||||||
|
- name: format-check
|
||||||
|
run: make format-check
|
71
pebble/.github/workflows/code-cover-gen.yaml
vendored
Normal file
71
pebble/.github/workflows/code-cover-gen.yaml
vendored
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
name: PR code coverage (generate)
|
||||||
|
|
||||||
|
on:
|
||||||
|
# This workflow does not have access to secrets because it runs on top of
|
||||||
|
# potentially unsafe changes.
|
||||||
|
pull_request:
|
||||||
|
types: [ opened, reopened, synchronize ]
|
||||||
|
branches: [ master ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# The results of this job are uploaded as artifacts. A separate job will
|
||||||
|
# download the artifacts and upload them to a GCS bucket.
|
||||||
|
code-cover-gen:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
PR: ${{ github.event.pull_request.number }}
|
||||||
|
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
# By default, checkout merges the PR into the current master.
|
||||||
|
# Instead, we want to check out the PR as-is.
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
# Fetch all branches and history (we'll need the origin/master ref and
|
||||||
|
# the base commit).
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- name: Get list of changed packages
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euxo pipefail
|
||||||
|
# To get the base commit, we get the number of commits in the PR.
|
||||||
|
# Note that github.event.pull_request.base.sha is not what we want,
|
||||||
|
# that is the tip of master and not necessarily the PR fork point.
|
||||||
|
NUM_COMMITS=$(gh pr view $PR --json commits --jq '.commits | length')
|
||||||
|
BASE_SHA=$(git rev-parse HEAD~${NUM_COMMITS})
|
||||||
|
CHANGED_PKGS=$(scripts/changed-go-pkgs.sh ${BASE_SHA} ${HEAD_SHA})
|
||||||
|
echo "BASE_SHA=${BASE_SHA}" >> "${GITHUB_ENV}"
|
||||||
|
echo "CHANGED_PKGS=${CHANGED_PKGS}" >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Generate "after" coverage
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euxo pipefail
|
||||||
|
CHANGED_PKGS='${{ env.CHANGED_PKGS }}'
|
||||||
|
mkdir -p artifacts
|
||||||
|
# Make a copy of the script so that the "before" run below uses the
|
||||||
|
# same version.
|
||||||
|
cp scripts/pr-codecov-run-tests.sh ${RUNNER_TEMP}/
|
||||||
|
${RUNNER_TEMP}/pr-codecov-run-tests.sh artifacts/cover-${PR}-${HEAD_SHA}.json "${CHANGED_PKGS}"
|
||||||
|
|
||||||
|
- name: Generate "before" coverage
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euxo pipefail
|
||||||
|
BASE_SHA='${{ env.BASE_SHA }}'
|
||||||
|
CHANGED_PKGS='${{ env.CHANGED_PKGS }}'
|
||||||
|
git checkout -f ${BASE_SHA}
|
||||||
|
${RUNNER_TEMP}/pr-codecov-run-tests.sh artifacts/cover-${PR}-${BASE_SHA}.json "${CHANGED_PKGS}"
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: cover
|
||||||
|
path: artifacts/cover-*.json
|
55
pebble/.github/workflows/code-cover-publish.yaml
vendored
Normal file
55
pebble/.github/workflows/code-cover-publish.yaml
vendored
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
name: PR code coverage (publish)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_run:
|
||||||
|
workflows: [ "PR code coverage (generate)" ]
|
||||||
|
types: [ "completed" ]
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# This job downloads the artifacts genearted by the code-cover-gen job and
|
||||||
|
# uploads them to a GCS bucket, from where Reviewable can access them.
|
||||||
|
code-cover-publish:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: >
|
||||||
|
github.event.workflow_run.event == 'pull_request' &&
|
||||||
|
github.event.workflow_run.conclusion == 'success'
|
||||||
|
steps:
|
||||||
|
- name: 'Download artifact'
|
||||||
|
uses: actions/github-script@v3.1.0
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
var artifacts = await github.actions.listWorkflowRunArtifacts({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
run_id: ${{github.event.workflow_run.id }},
|
||||||
|
});
|
||||||
|
var matchArtifact = artifacts.data.artifacts.filter((artifact) => {
|
||||||
|
return artifact.name == "cover"
|
||||||
|
})[0];
|
||||||
|
var download = await github.actions.downloadArtifact({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
artifact_id: matchArtifact.id,
|
||||||
|
archive_format: 'zip',
|
||||||
|
});
|
||||||
|
var fs = require('fs');
|
||||||
|
fs.writeFileSync('${{github.workspace}}/cover.zip', Buffer.from(download.data));
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
mkdir -p cover
|
||||||
|
unzip cover.zip -d cover
|
||||||
|
|
||||||
|
- name: 'Authenticate to Google Cloud'
|
||||||
|
uses: 'google-github-actions/auth@v1'
|
||||||
|
with:
|
||||||
|
credentials_json: '${{ secrets.CODECOVER_SERVICE_ACCOUNT_KEY }}'
|
||||||
|
|
||||||
|
- name: 'Upload to GCS'
|
||||||
|
uses: 'google-github-actions/upload-cloud-storage@v1'
|
||||||
|
with:
|
||||||
|
path: 'cover'
|
||||||
|
glob: '**/cover-*.json'
|
||||||
|
parent: false
|
||||||
|
destination: 'crl-codecover-public/pr-pebble/'
|
||||||
|
process_gcloudignore: false
|
48
pebble/.github/workflows/nightly-code-cover.yaml
vendored
Normal file
48
pebble/.github/workflows/nightly-code-cover.yaml
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
name: Nightly code coverage
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '00 08 * * * '
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
coverage-gen-and-publish:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
# By default, checkout merges the PR into the current master.
|
||||||
|
# Instead, we want to check out the PR as-is.
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
# Fetch all branches and history (we'll need the origin/master ref and
|
||||||
|
# the base commit).
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- name: Generate coverage
|
||||||
|
run: scripts/code-coverage.sh
|
||||||
|
|
||||||
|
- name: Install lcov
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install lcov
|
||||||
|
|
||||||
|
- name: 'Authenticate to Google Cloud'
|
||||||
|
uses: 'google-github-actions/auth@v1'
|
||||||
|
with:
|
||||||
|
credentials_json: '${{ secrets.CODECOVER_SERVICE_ACCOUNT_KEY }}'
|
||||||
|
|
||||||
|
- name: 'Set up Cloud SDK'
|
||||||
|
uses: 'google-github-actions/setup-gcloud@v1'
|
||||||
|
with:
|
||||||
|
version: '>= 363.0.0'
|
||||||
|
|
||||||
|
- name: Publish coverage
|
||||||
|
run: scripts/code-coverage-publish.sh
|
32
pebble/.github/workflows/sanitizers.yaml
vendored
Normal file
32
pebble/.github/workflows/sanitizers.yaml
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
name: Sanitizers
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * *" # Midnight UTC, daily.
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
linux-asan:
|
||||||
|
name: go-linux-asan
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: make testasan
|
||||||
|
|
||||||
|
linux-msan:
|
||||||
|
name: go-linux-msan
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: "1.21"
|
||||||
|
|
||||||
|
- run: make testmsan
|
34
pebble/.github/workflows/stale.yml
vendored
Normal file
34
pebble/.github/workflows/stale.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
name: Mark stale issues and pull requests
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 11 * * 1-4"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v3
|
||||||
|
with:
|
||||||
|
operations-per-run: 1000
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
stale-issue-message: |
|
||||||
|
We have marked this issue as stale because it has been inactive for
|
||||||
|
18 months. If this issue is still relevant, removing the stale label
|
||||||
|
or adding a comment will keep it active. Otherwise, we'll close it
|
||||||
|
in 10 days to keep the issue queue tidy. Thank you for your
|
||||||
|
contribution to Pebble!
|
||||||
|
stale-pr-message: 'Stale pull request message'
|
||||||
|
stale-issue-label: 'no-issue-activity'
|
||||||
|
stale-pr-label: 'no-pr-activity'
|
||||||
|
close-issue-label: 'X-stale'
|
||||||
|
close-pr-label: 'X-stale'
|
||||||
|
# Disable this for PR's, by setting a very high bar
|
||||||
|
days-before-pr-stale: 99999
|
||||||
|
days-before-issue-stale: 540
|
||||||
|
days-before-close: 10
|
||||||
|
exempt-issue-labels: 'X-nostale'
|
9
pebble/.gitignore
vendored
Normal file
9
pebble/.gitignore
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# Github action artifacts.
|
||||||
|
artifacts
|
||||||
|
# Profiling artifacts.
|
||||||
|
cpu.*.prof
|
||||||
|
heap.prof
|
||||||
|
mutex.prof
|
||||||
|
coverprofile.out
|
||||||
|
# Testing artifacts
|
||||||
|
meta.*.test
|
27
pebble/LICENSE
Normal file
27
pebble/LICENSE
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
131
pebble/Makefile
Normal file
131
pebble/Makefile
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
GO := go
|
||||||
|
PKG := ./...
|
||||||
|
GOFLAGS :=
|
||||||
|
STRESSFLAGS :=
|
||||||
|
TAGS := invariants
|
||||||
|
TESTS := .
|
||||||
|
COVER_PROFILE := coverprofile.out
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
all:
|
||||||
|
@echo usage:
|
||||||
|
@echo " make test"
|
||||||
|
@echo " make testrace"
|
||||||
|
@echo " make stress"
|
||||||
|
@echo " make stressrace"
|
||||||
|
@echo " make stressmeta"
|
||||||
|
@echo " make crossversion-meta"
|
||||||
|
@echo " make testcoverage"
|
||||||
|
@echo " make mod-update"
|
||||||
|
@echo " make generate"
|
||||||
|
@echo " make generate-test-data"
|
||||||
|
@echo " make clean"
|
||||||
|
|
||||||
|
override testflags :=
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
${GO} test -tags '$(TAGS)' ${testflags} -run ${TESTS} ${PKG}
|
||||||
|
|
||||||
|
.PHONY: testcoverage
|
||||||
|
testcoverage:
|
||||||
|
${GO} test -tags '$(TAGS)' ${testflags} -run ${TESTS} ${PKG} -coverprofile ${COVER_PROFILE}
|
||||||
|
|
||||||
|
.PHONY: testrace
|
||||||
|
testrace: testflags += -race -timeout 20m
|
||||||
|
testrace: test
|
||||||
|
|
||||||
|
testasan: testflags += -asan -timeout 20m
|
||||||
|
testasan: test
|
||||||
|
|
||||||
|
testmsan: export CC=clang
|
||||||
|
testmsan: testflags += -msan -timeout 20m
|
||||||
|
testmsan: test
|
||||||
|
|
||||||
|
.PHONY: testobjiotracing
|
||||||
|
testobjiotracing:
|
||||||
|
${GO} test -tags '$(TAGS) pebble_obj_io_tracing' ${testflags} -run ${TESTS} ./objstorage/objstorageprovider/objiotracing
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint:
|
||||||
|
${GO} test -tags '$(TAGS)' ${testflags} -run ${TESTS} ./internal/lint
|
||||||
|
|
||||||
|
.PHONY: stress stressrace
|
||||||
|
stressrace: testflags += -race
|
||||||
|
stress stressrace: testflags += -exec 'stress ${STRESSFLAGS}' -timeout 0 -test.v
|
||||||
|
stress stressrace: test
|
||||||
|
|
||||||
|
.PHONY: stressmeta
|
||||||
|
stressmeta: override PKG = ./internal/metamorphic
|
||||||
|
stressmeta: override STRESSFLAGS += -p 1
|
||||||
|
stressmeta: override TESTS = TestMeta$$
|
||||||
|
stressmeta: stress
|
||||||
|
|
||||||
|
.PHONY: crossversion-meta
|
||||||
|
crossversion-meta:
|
||||||
|
$(eval LATEST_RELEASE := $(shell git fetch origin && git branch -r --list '*/crl-release-*' | grep -o 'crl-release-.*$$' | sort | tail -1))
|
||||||
|
git checkout ${LATEST_RELEASE}; \
|
||||||
|
${GO} test -c ./internal/metamorphic -o './internal/metamorphic/crossversion/${LATEST_RELEASE}.test'; \
|
||||||
|
git checkout -; \
|
||||||
|
${GO} test -c ./internal/metamorphic -o './internal/metamorphic/crossversion/head.test'; \
|
||||||
|
${GO} test -tags '$(TAGS)' ${testflags} -v -run 'TestMetaCrossVersion' ./internal/metamorphic/crossversion --version '${LATEST_RELEASE},${LATEST_RELEASE},${LATEST_RELEASE}.test' --version 'HEAD,HEAD,./head.test'
|
||||||
|
|
||||||
|
.PHONY: stress-crossversion
|
||||||
|
stress-crossversion:
|
||||||
|
STRESS=1 ./scripts/run-crossversion-meta.sh crl-release-21.2 crl-release-22.1 crl-release-22.2 crl-release-23.1 master
|
||||||
|
|
||||||
|
.PHONY: generate
|
||||||
|
generate:
|
||||||
|
${GO} generate ${PKG}
|
||||||
|
|
||||||
|
generate:
|
||||||
|
|
||||||
|
# Note that the output of generate-test-data is not deterministic. This should
|
||||||
|
# only be run manually as needed.
|
||||||
|
.PHONY: generate-test-data
|
||||||
|
generate-test-data:
|
||||||
|
${GO} run -tags make_incorrect_manifests ./tool/make_incorrect_manifests.go
|
||||||
|
${GO} run -tags make_test_find_db ./tool/make_test_find_db.go
|
||||||
|
${GO} run -tags make_test_sstables ./tool/make_test_sstables.go
|
||||||
|
${GO} run -tags make_test_remotecat ./tool/make_test_remotecat.go
|
||||||
|
|
||||||
|
mod-update:
|
||||||
|
${GO} get -u
|
||||||
|
${GO} mod tidy
|
||||||
|
|
||||||
|
.PHONY: clean
|
||||||
|
clean:
|
||||||
|
rm -f $(patsubst %,%.test,$(notdir $(shell go list ${PKG})))
|
||||||
|
|
||||||
|
git_dirty := $(shell git status -s)
|
||||||
|
|
||||||
|
.PHONY: git-clean-check
|
||||||
|
git-clean-check:
|
||||||
|
ifneq ($(git_dirty),)
|
||||||
|
@echo "Git repository is dirty!"
|
||||||
|
@false
|
||||||
|
else
|
||||||
|
@echo "Git repository is clean."
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: mod-tidy-check
|
||||||
|
mod-tidy-check:
|
||||||
|
ifneq ($(git_dirty),)
|
||||||
|
$(error mod-tidy-check must be invoked on a clean repository)
|
||||||
|
endif
|
||||||
|
@${GO} mod tidy
|
||||||
|
$(MAKE) git-clean-check
|
||||||
|
|
||||||
|
# TODO(radu): switch back to @latest once bogus doc changes are
|
||||||
|
# addressed; see https://github.com/cockroachdb/crlfmt/pull/44
|
||||||
|
.PHONY: format
|
||||||
|
format:
|
||||||
|
go install github.com/cockroachdb/crlfmt@44a36ec7 && crlfmt -w -tab 2 .
|
||||||
|
|
||||||
|
.PHONY: format-check
|
||||||
|
format-check:
|
||||||
|
ifneq ($(git_dirty),)
|
||||||
|
$(error format-check must be invoked on a clean repository)
|
||||||
|
endif
|
||||||
|
$(MAKE) format
|
||||||
|
git diff
|
||||||
|
$(MAKE) git-clean-check
|
226
pebble/README.md
Normal file
226
pebble/README.md
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
# Pebble [![Build Status](https://github.com/cockroachdb/pebble/actions/workflows/ci.yaml/badge.svg?branch=master)](https://github.com/cockroachdb/pebble/actions/workflows/ci.yaml) [![GoDoc](https://godoc.org/github.com/cockroachdb/pebble?status.svg)](https://godoc.org/github.com/cockroachdb/pebble) <sup><sub><sub>[Coverage](https://storage.googleapis.com/crl-codecover-public/pebble/index.html)</sub></sub></sup>
|
||||||
|
|
||||||
|
#### [Nightly benchmarks](https://cockroachdb.github.io/pebble/)
|
||||||
|
|
||||||
|
Pebble is a LevelDB/RocksDB inspired key-value store focused on
|
||||||
|
performance and internal usage by CockroachDB. Pebble inherits the
|
||||||
|
RocksDB file formats and a few extensions such as range deletion
|
||||||
|
tombstones, table-level bloom filters, and updates to the MANIFEST
|
||||||
|
format.
|
||||||
|
|
||||||
|
Pebble intentionally does not aspire to include every feature in RocksDB and
|
||||||
|
specifically targets the use case and feature set needed by CockroachDB:
|
||||||
|
|
||||||
|
* Block-based tables
|
||||||
|
* Checkpoints
|
||||||
|
* Indexed batches
|
||||||
|
* Iterator options (lower/upper bound, table filter)
|
||||||
|
* Level-based compaction
|
||||||
|
* Manual compaction
|
||||||
|
* Merge operator
|
||||||
|
* Prefix bloom filters
|
||||||
|
* Prefix iteration
|
||||||
|
* Range deletion tombstones
|
||||||
|
* Reverse iteration
|
||||||
|
* SSTable ingestion
|
||||||
|
* Single delete
|
||||||
|
* Snapshots
|
||||||
|
* Table-level bloom filters
|
||||||
|
|
||||||
|
RocksDB has a large number of features that are not implemented in
|
||||||
|
Pebble:
|
||||||
|
|
||||||
|
* Backups
|
||||||
|
* Column families
|
||||||
|
* Delete files in range
|
||||||
|
* FIFO compaction style
|
||||||
|
* Forward iterator / tailing iterator
|
||||||
|
* Hash table format
|
||||||
|
* Memtable bloom filter
|
||||||
|
* Persistent cache
|
||||||
|
* Pin iterator key / value
|
||||||
|
* Plain table format
|
||||||
|
* SSTable ingest-behind
|
||||||
|
* Sub-compactions
|
||||||
|
* Transactions
|
||||||
|
* Universal compaction style
|
||||||
|
|
||||||
|
***WARNING***: Pebble may silently corrupt data or behave incorrectly if
|
||||||
|
used with a RocksDB database that uses a feature Pebble doesn't
|
||||||
|
support. Caveat emptor!
|
||||||
|
|
||||||
|
## Production Ready
|
||||||
|
|
||||||
|
Pebble was introduced as an alternative storage engine to RocksDB in
|
||||||
|
CockroachDB v20.1 (released May 2020) and was used in production
|
||||||
|
successfully at that time. Pebble was made the default storage engine
|
||||||
|
in CockroachDB v20.2 (released Nov 2020). Pebble is being used in
|
||||||
|
production by users of CockroachDB at scale and is considered stable
|
||||||
|
and production ready.
|
||||||
|
|
||||||
|
## Advantages
|
||||||
|
|
||||||
|
Pebble offers several improvements over RocksDB:
|
||||||
|
|
||||||
|
* Faster reverse iteration via backwards links in the memtable's
|
||||||
|
skiplist.
|
||||||
|
* Faster commit pipeline that achieves better concurrency.
|
||||||
|
* Seamless merged iteration of indexed batches. The mutations in the
|
||||||
|
batch conceptually occupy another memtable level.
|
||||||
|
* L0 sublevels and flush splitting for concurrent compactions out of L0 and
|
||||||
|
reduced read-amplification during heavy write load.
|
||||||
|
* Faster LSM edits in LSMs with large numbers of sstables through use of a
|
||||||
|
copy-on-write B-tree to hold file metadata.
|
||||||
|
* Delete-only compactions that drop whole sstables that fall within the bounds
|
||||||
|
of a range deletion.
|
||||||
|
* Block-property collectors and filters that enable iterators to skip tables,
|
||||||
|
index blocks and data blocks that are irrelevant, according to user-defined
|
||||||
|
properties over key-value pairs.
|
||||||
|
* Range keys API, allowing KV pairs defined over a range of keyspace with
|
||||||
|
user-defined semantics and interleaved during iteration.
|
||||||
|
* Smaller, more approachable code base.
|
||||||
|
|
||||||
|
See the [Pebble vs RocksDB: Implementation
|
||||||
|
Differences](docs/rocksdb.md) doc for more details on implementation
|
||||||
|
differences.
|
||||||
|
|
||||||
|
## RocksDB Compatibility
|
||||||
|
|
||||||
|
Pebble strives for forward compatibility with RocksDB 6.2.1 (the latest
|
||||||
|
version of RocksDB used by CockroachDB). Forward compatibility means
|
||||||
|
that a DB generated by RocksDB can be used by Pebble. Currently, Pebble
|
||||||
|
provides bidirectional compatibility with RocksDB (a Pebble generated DB
|
||||||
|
can be used by RocksDB) when using its FormatMostCompatible format. New
|
||||||
|
functionality that is backwards incompatible is gated behind new format
|
||||||
|
major versions. In general, Pebble only provides compatibility with the
|
||||||
|
subset of functionality and configuration used by CockroachDB. The scope
|
||||||
|
of RocksDB functionality and configuration is too large to adequately
|
||||||
|
test and document all the incompatibilities. The list below contains
|
||||||
|
known incompatibilities.
|
||||||
|
|
||||||
|
* Pebble's use of WAL recycling is only compatible with RocksDB's
|
||||||
|
`kTolerateCorruptedTailRecords` WAL recovery mode. Older versions of
|
||||||
|
RocksDB would automatically map incompatible WAL recovery modes to
|
||||||
|
`kTolerateCorruptedTailRecords`. New versions of RocksDB will
|
||||||
|
disable WAL recycling.
|
||||||
|
* Column families. Pebble does not support column families, nor does
|
||||||
|
it attempt to detect their usage when opening a DB that may contain
|
||||||
|
them.
|
||||||
|
* Hash table format. Pebble does not support the hash table sstable
|
||||||
|
format.
|
||||||
|
* Plain table format. Pebble does not support the plain table sstable
|
||||||
|
format.
|
||||||
|
* SSTable format version 3 and 4. Pebble does not support version 3
|
||||||
|
and version 4 format sstables. The sstable format version is
|
||||||
|
controlled by the `BlockBasedTableOptions::format_version` option.
|
||||||
|
See [#97](https://github.com/cockroachdb/pebble/issues/97).
|
||||||
|
|
||||||
|
## Format major versions
|
||||||
|
|
||||||
|
Over time Pebble has introduced new physical file formats. Backwards
|
||||||
|
incompatible changes are made through the introduction of 'format major
|
||||||
|
versions'. By default, when Pebble opens a database, it defaults to
|
||||||
|
`FormatMostCompatible`. This version is bi-directionally compatible with RocksDB
|
||||||
|
6.2.1 (with the caveats described above).
|
||||||
|
|
||||||
|
To opt into new formats, a user may set `FormatMajorVersion` on the
|
||||||
|
[`Options`](https://pkg.go.dev/github.com/cockroachdb/pebble#Options)
|
||||||
|
supplied to
|
||||||
|
[`Open`](https://pkg.go.dev/github.com/cockroachdb/pebble#Open), or
|
||||||
|
upgrade the format major version at runtime using
|
||||||
|
[`DB.RatchetFormatMajorVersion`](https://pkg.go.dev/github.com/cockroachdb/pebble#DB.RatchetFormatMajorVersion).
|
||||||
|
Format major version upgrades are permanent; There is no option to
|
||||||
|
return to an earlier format.
|
||||||
|
|
||||||
|
The table below outlines the history of format major versions:
|
||||||
|
|
||||||
|
| Name | Value | Migration |
|
||||||
|
|------------------------------------|-------|------------|
|
||||||
|
| FormatMostCompatible | 1 | No |
|
||||||
|
| FormatVersioned | 3 | No |
|
||||||
|
| FormatSetWithDelete | 4 | No |
|
||||||
|
| FormatBlockPropertyCollector | 5 | No |
|
||||||
|
| FormatSplitUserKeysMarked | 6 | Background |
|
||||||
|
| FormatSplitUserKeysMarkedCompacted | 7 | Blocking |
|
||||||
|
| FormatRangeKeys | 8 | No |
|
||||||
|
| FormatMinTableFormatPebblev1 | 9 | No |
|
||||||
|
| FormatPrePebblev1Marked | 10 | Background |
|
||||||
|
| FormatSSTableValueBlocks | 12 | No |
|
||||||
|
| FormatFlushableIngest | 13 | No |
|
||||||
|
| FormatPrePebblev1MarkedCompacted | 14 | Blocking |
|
||||||
|
| FormatDeleteSizedAndObsolete | 15 | No |
|
||||||
|
| FormatVirtualSSTables | 16 | No |
|
||||||
|
|
||||||
|
Upgrading to a format major version with 'Background' in the migration
|
||||||
|
column may trigger background activity to rewrite physical file
|
||||||
|
formats, typically through compactions. Upgrading to a format major
|
||||||
|
version with 'Blocking' in the migration column will block until a
|
||||||
|
migration is complete. The database may continue to serve reads and
|
||||||
|
writes if upgrading a live database through
|
||||||
|
`RatchetFormatMajorVersion`, but the method call will not return until
|
||||||
|
the migration is complete.
|
||||||
|
|
||||||
|
For reference, the table below lists the range of supported Pebble format major
|
||||||
|
versions for CockroachDB releases.
|
||||||
|
|
||||||
|
| CockroachDB release | Earliest supported | Latest supported |
|
||||||
|
|---------------------|------------------------------------|---------------------------|
|
||||||
|
| 20.1 through 21.1 | FormatMostCompatible | FormatMostCompatible |
|
||||||
|
| 21.2 | FormatMostCompatible | FormatSetWithDelete |
|
||||||
|
| 21.2 | FormatMostCompatible | FormatSetWithDelete |
|
||||||
|
| 22.1 | FormatMostCompatible | FormatSplitUserKeysMarked |
|
||||||
|
| 22.2 | FormatMostCompatible | FormatPrePebblev1Marked |
|
||||||
|
| 23.1 | FormatSplitUserKeysMarkedCompacted | FormatFlushableIngest |
|
||||||
|
| 23.2 | FormatSplitUserKeysMarkedCompacted | FormatVirtualSSTables |
|
||||||
|
| 24.1 plan | FormatSSTableValueBlocks | |
|
||||||
|
|
||||||
|
## Pedigree
|
||||||
|
|
||||||
|
Pebble is based on the incomplete Go version of LevelDB:
|
||||||
|
|
||||||
|
https://github.com/golang/leveldb
|
||||||
|
|
||||||
|
The Go version of LevelDB is based on the C++ original:
|
||||||
|
|
||||||
|
https://github.com/google/leveldb
|
||||||
|
|
||||||
|
Optimizations and inspiration were drawn from RocksDB:
|
||||||
|
|
||||||
|
https://github.com/facebook/rocksdb
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### Example Code
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/pebble"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
db, err := pebble.Open("demo", &pebble.Options{})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
key := []byte("hello")
|
||||||
|
if err := db.Set(key, []byte("world"), pebble.Sync); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
value, closer, err := db.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("%s %s\n", key, value)
|
||||||
|
if err := closer.Close(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := db.Close(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
2312
pebble/batch.go
Normal file
2312
pebble/batch.go
Normal file
File diff suppressed because it is too large
Load Diff
1652
pebble/batch_test.go
Normal file
1652
pebble/batch_test.go
Normal file
File diff suppressed because it is too large
Load Diff
250
pebble/bloom/bloom.go
Normal file
250
pebble/bloom/bloom.go
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
// Copyright 2013 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
// Package bloom implements Bloom filters.
|
||||||
|
package bloom // import "github.com/cockroachdb/pebble/bloom"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/pebble/internal/base"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cacheLineSize = 64
|
||||||
|
cacheLineBits = cacheLineSize * 8
|
||||||
|
)
|
||||||
|
|
||||||
|
type tableFilter []byte
|
||||||
|
|
||||||
|
func (f tableFilter) MayContain(key []byte) bool {
|
||||||
|
if len(f) <= 5 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
n := len(f) - 5
|
||||||
|
nProbes := f[n]
|
||||||
|
nLines := binary.LittleEndian.Uint32(f[n+1:])
|
||||||
|
cacheLineBits := 8 * (uint32(n) / nLines)
|
||||||
|
|
||||||
|
h := hash(key)
|
||||||
|
delta := h>>17 | h<<15
|
||||||
|
b := (h % nLines) * cacheLineBits
|
||||||
|
|
||||||
|
for j := uint8(0); j < nProbes; j++ {
|
||||||
|
bitPos := b + (h % cacheLineBits)
|
||||||
|
if f[bitPos/8]&(1<<(bitPos%8)) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
h += delta
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateProbes(bitsPerKey int) uint32 {
|
||||||
|
// We intentionally round down to reduce probing cost a little bit
|
||||||
|
n := uint32(float64(bitsPerKey) * 0.69) // 0.69 =~ ln(2)
|
||||||
|
if n < 1 {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
if n > 30 {
|
||||||
|
n = 30
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// extend appends n zero bytes to b. It returns the overall slice (of length
|
||||||
|
// n+len(originalB)) and the slice of n trailing zeroes.
|
||||||
|
func extend(b []byte, n int) (overall, trailer []byte) {
|
||||||
|
want := n + len(b)
|
||||||
|
if want <= cap(b) {
|
||||||
|
overall = b[:want]
|
||||||
|
trailer = overall[len(b):]
|
||||||
|
for i := range trailer {
|
||||||
|
trailer[i] = 0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Grow the capacity exponentially, with a 1KiB minimum.
|
||||||
|
c := 1024
|
||||||
|
for c < want {
|
||||||
|
c += c / 4
|
||||||
|
}
|
||||||
|
overall = make([]byte, want, c)
|
||||||
|
trailer = overall[len(b):]
|
||||||
|
copy(overall, b)
|
||||||
|
}
|
||||||
|
return overall, trailer
|
||||||
|
}
|
||||||
|
|
||||||
|
// hash implements a hashing algorithm similar to the Murmur hash.
|
||||||
|
func hash(b []byte) uint32 {
|
||||||
|
const (
|
||||||
|
seed = 0xbc9f1d34
|
||||||
|
m = 0xc6a4a793
|
||||||
|
)
|
||||||
|
h := uint32(seed) ^ uint32(uint64(uint32(len(b))*m))
|
||||||
|
for ; len(b) >= 4; b = b[4:] {
|
||||||
|
h += uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||||
|
h *= m
|
||||||
|
h ^= h >> 16
|
||||||
|
}
|
||||||
|
|
||||||
|
// The code below first casts each byte to a signed 8-bit integer. This is
|
||||||
|
// necessary to match RocksDB's behavior. Note that the `byte` type in Go is
|
||||||
|
// unsigned. What is the difference between casting a signed 8-bit value vs
|
||||||
|
// unsigned 8-bit value into an unsigned 32-bit value?
|
||||||
|
// Sign-extension. Consider the value 250 which has the bit pattern 11111010:
|
||||||
|
//
|
||||||
|
// uint32(250) = 00000000000000000000000011111010
|
||||||
|
// uint32(int8(250)) = 11111111111111111111111111111010
|
||||||
|
//
|
||||||
|
// Note that the original LevelDB code did not explicitly cast to a signed
|
||||||
|
// 8-bit value which left the behavior dependent on whether C characters were
|
||||||
|
// signed or unsigned which is a compiler flag for gcc (-funsigned-char).
|
||||||
|
switch len(b) {
|
||||||
|
case 3:
|
||||||
|
h += uint32(int8(b[2])) << 16
|
||||||
|
fallthrough
|
||||||
|
case 2:
|
||||||
|
h += uint32(int8(b[1])) << 8
|
||||||
|
fallthrough
|
||||||
|
case 1:
|
||||||
|
h += uint32(int8(b[0]))
|
||||||
|
h *= m
|
||||||
|
h ^= h >> 24
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
const hashBlockLen = 16384
|
||||||
|
|
||||||
|
type hashBlock [hashBlockLen]uint32
|
||||||
|
|
||||||
|
var hashBlockPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &hashBlock{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type tableFilterWriter struct {
|
||||||
|
bitsPerKey int
|
||||||
|
|
||||||
|
numHashes int
|
||||||
|
// We store the hashes in blocks.
|
||||||
|
blocks []*hashBlock
|
||||||
|
lastHash uint32
|
||||||
|
|
||||||
|
// Initial "in-line" storage for the blocks slice (to avoid some small
|
||||||
|
// allocations).
|
||||||
|
blocksBuf [16]*hashBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTableFilterWriter(bitsPerKey int) *tableFilterWriter {
|
||||||
|
w := &tableFilterWriter{
|
||||||
|
bitsPerKey: bitsPerKey,
|
||||||
|
}
|
||||||
|
w.blocks = w.blocksBuf[:0]
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddKey implements the base.FilterWriter interface.
|
||||||
|
func (w *tableFilterWriter) AddKey(key []byte) {
|
||||||
|
h := hash(key)
|
||||||
|
if w.numHashes != 0 && h == w.lastHash {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ofs := w.numHashes % hashBlockLen
|
||||||
|
if ofs == 0 {
|
||||||
|
// Time for a new block.
|
||||||
|
w.blocks = append(w.blocks, hashBlockPool.Get().(*hashBlock))
|
||||||
|
}
|
||||||
|
w.blocks[len(w.blocks)-1][ofs] = h
|
||||||
|
w.numHashes++
|
||||||
|
w.lastHash = h
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish implements the base.FilterWriter interface.
|
||||||
|
func (w *tableFilterWriter) Finish(buf []byte) []byte {
|
||||||
|
// The table filter format matches the RocksDB full-file filter format.
|
||||||
|
var nLines int
|
||||||
|
if w.numHashes != 0 {
|
||||||
|
nLines = (w.numHashes*w.bitsPerKey + cacheLineBits - 1) / (cacheLineBits)
|
||||||
|
// Make nLines an odd number to make sure more bits are involved when
|
||||||
|
// determining which block.
|
||||||
|
if nLines%2 == 0 {
|
||||||
|
nLines++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nBytes := nLines * cacheLineSize
|
||||||
|
// +5: 4 bytes for num-lines, 1 byte for num-probes
|
||||||
|
buf, filter := extend(buf, nBytes+5)
|
||||||
|
|
||||||
|
if nLines != 0 {
|
||||||
|
nProbes := calculateProbes(w.bitsPerKey)
|
||||||
|
for bIdx, b := range w.blocks {
|
||||||
|
length := hashBlockLen
|
||||||
|
if bIdx == len(w.blocks)-1 && w.numHashes%hashBlockLen != 0 {
|
||||||
|
length = w.numHashes % hashBlockLen
|
||||||
|
}
|
||||||
|
for _, h := range b[:length] {
|
||||||
|
delta := h>>17 | h<<15 // rotate right 17 bits
|
||||||
|
b := (h % uint32(nLines)) * (cacheLineBits)
|
||||||
|
for i := uint32(0); i < nProbes; i++ {
|
||||||
|
bitPos := b + (h % cacheLineBits)
|
||||||
|
filter[bitPos/8] |= (1 << (bitPos % 8))
|
||||||
|
h += delta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
filter[nBytes] = byte(nProbes)
|
||||||
|
binary.LittleEndian.PutUint32(filter[nBytes+1:], uint32(nLines))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release the hash blocks.
|
||||||
|
for i, b := range w.blocks {
|
||||||
|
hashBlockPool.Put(b)
|
||||||
|
w.blocks[i] = nil
|
||||||
|
}
|
||||||
|
w.blocks = w.blocks[:0]
|
||||||
|
w.numHashes = 0
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterPolicy implements the FilterPolicy interface from the pebble package.
|
||||||
|
//
|
||||||
|
// The integer value is the approximate number of bits used per key. A good
|
||||||
|
// value is 10, which yields a filter with ~ 1% false positive rate.
|
||||||
|
type FilterPolicy int
|
||||||
|
|
||||||
|
var _ base.FilterPolicy = FilterPolicy(0)
|
||||||
|
|
||||||
|
// Name implements the pebble.FilterPolicy interface.
|
||||||
|
func (p FilterPolicy) Name() string {
|
||||||
|
// This string looks arbitrary, but its value is written to LevelDB .sst
|
||||||
|
// files, and should be this exact value to be compatible with those files
|
||||||
|
// and with the C++ LevelDB code.
|
||||||
|
return "rocksdb.BuiltinBloomFilter"
|
||||||
|
}
|
||||||
|
|
||||||
|
// MayContain implements the pebble.FilterPolicy interface.
|
||||||
|
func (p FilterPolicy) MayContain(ftype base.FilterType, f, key []byte) bool {
|
||||||
|
switch ftype {
|
||||||
|
case base.TableFilter:
|
||||||
|
return tableFilter(f).MayContain(key)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown filter type: %v", ftype))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter implements the pebble.FilterPolicy interface.
|
||||||
|
func (p FilterPolicy) NewWriter(ftype base.FilterType) base.FilterWriter {
|
||||||
|
switch ftype {
|
||||||
|
case base.TableFilter:
|
||||||
|
return newTableFilterWriter(int(p))
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown filter type: %v", ftype))
|
||||||
|
}
|
||||||
|
}
|
219
pebble/bloom/bloom_test.go
Normal file
219
pebble/bloom/bloom_test.go
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
// Copyright 2013 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
package bloom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/pebble/internal/base"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f tableFilter) String() string {
|
||||||
|
var buf strings.Builder
|
||||||
|
for i, x := range f {
|
||||||
|
if i > 0 {
|
||||||
|
if i%8 == 0 {
|
||||||
|
buf.WriteString("\n")
|
||||||
|
} else {
|
||||||
|
buf.WriteString(" ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := uint(0); j < 8; j++ {
|
||||||
|
if x&(1<<(7-j)) != 0 {
|
||||||
|
buf.WriteString("1")
|
||||||
|
} else {
|
||||||
|
buf.WriteString(".")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("\n")
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTableFilter(bitsPerKey int, keys ...[]byte) tableFilter {
|
||||||
|
w := FilterPolicy(bitsPerKey).NewWriter(base.TableFilter)
|
||||||
|
for _, key := range keys {
|
||||||
|
w.AddKey(key)
|
||||||
|
}
|
||||||
|
return tableFilter(w.Finish(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmallBloomFilter(t *testing.T) {
|
||||||
|
f := newTableFilter(10, []byte("hello"), []byte("world"))
|
||||||
|
|
||||||
|
// The magic expected string comes from running RocksDB's util/bloom_test.cc:FullBloomTest.FullSmall.
|
||||||
|
want := `
|
||||||
|
........ ........ ........ .......1 ........ ........ ........ ........
|
||||||
|
........ .1...... ........ .1...... ........ ........ ........ ........
|
||||||
|
...1.... ........ ........ ........ ........ ........ ........ ........
|
||||||
|
........ ........ ........ ........ ........ ........ ........ ...1....
|
||||||
|
........ ........ ........ ........ .....1.. ........ ........ ........
|
||||||
|
.......1 ........ ........ ........ ........ ........ .1...... ........
|
||||||
|
........ ........ ........ ........ ........ ...1.... ........ ........
|
||||||
|
.......1 ........ ........ ........ .1...1.. ........ ........ ........
|
||||||
|
.....11. .......1 ........ ........ ........
|
||||||
|
`
|
||||||
|
want = strings.TrimLeft(want, "\n")
|
||||||
|
require.EqualValues(t, want, f.String())
|
||||||
|
|
||||||
|
m := map[string]bool{
|
||||||
|
"hello": true,
|
||||||
|
"world": true,
|
||||||
|
"x": false,
|
||||||
|
"foo": false,
|
||||||
|
}
|
||||||
|
for k, want := range m {
|
||||||
|
require.EqualValues(t, want, f.MayContain([]byte(k)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBloomFilter(t *testing.T) {
|
||||||
|
nextLength := func(x int) int {
|
||||||
|
if x < 10 {
|
||||||
|
return x + 1
|
||||||
|
}
|
||||||
|
if x < 100 {
|
||||||
|
return x + 10
|
||||||
|
}
|
||||||
|
if x < 1000 {
|
||||||
|
return x + 100
|
||||||
|
}
|
||||||
|
return x + 1000
|
||||||
|
}
|
||||||
|
le32 := func(i int) []byte {
|
||||||
|
b := make([]byte, 4)
|
||||||
|
b[0] = uint8(uint32(i) >> 0)
|
||||||
|
b[1] = uint8(uint32(i) >> 8)
|
||||||
|
b[2] = uint8(uint32(i) >> 16)
|
||||||
|
b[3] = uint8(uint32(i) >> 24)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
nMediocreFilters, nGoodFilters := 0, 0
|
||||||
|
loop:
|
||||||
|
for length := 1; length <= 10000; length = nextLength(length) {
|
||||||
|
keys := make([][]byte, 0, length)
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
keys = append(keys, le32(i))
|
||||||
|
}
|
||||||
|
f := newTableFilter(10, keys...)
|
||||||
|
// The size of the table bloom filter is measured in multiples of the
|
||||||
|
// cache line size. The '+2' contribution captures the rounding up in the
|
||||||
|
// length division plus preferring an odd number of cache lines. As such,
|
||||||
|
// this formula isn't exact, but the exact formula is hard to read.
|
||||||
|
maxLen := 5 + ((length*10)/cacheLineBits+2)*cacheLineSize
|
||||||
|
if len(f) > maxLen {
|
||||||
|
t.Errorf("length=%d: len(f)=%d > max len %d", length, len(f), maxLen)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// All added keys must match.
|
||||||
|
for _, key := range keys {
|
||||||
|
if !f.MayContain(key) {
|
||||||
|
t.Errorf("length=%d: did not contain key %q", length, key)
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check false positive rate.
|
||||||
|
nFalsePositive := 0
|
||||||
|
for i := 0; i < 10000; i++ {
|
||||||
|
if f.MayContain(le32(1e9 + i)) {
|
||||||
|
nFalsePositive++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nFalsePositive > 0.02*10000 {
|
||||||
|
t.Errorf("length=%d: %d false positives in 10000", length, nFalsePositive)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if nFalsePositive > 0.0125*10000 {
|
||||||
|
nMediocreFilters++
|
||||||
|
} else {
|
||||||
|
nGoodFilters++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nMediocreFilters > nGoodFilters/5 {
|
||||||
|
t.Errorf("%d mediocre filters but only %d good filters", nMediocreFilters, nGoodFilters)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHash(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
s string
|
||||||
|
expected uint32
|
||||||
|
}{
|
||||||
|
// The magic expected numbers come from RocksDB's util/hash_test.cc:TestHash.
|
||||||
|
{"", 3164544308},
|
||||||
|
{"\x08", 422599524},
|
||||||
|
{"\x17", 3168152998},
|
||||||
|
{"\x9a", 3195034349},
|
||||||
|
{"\x1c", 2651681383},
|
||||||
|
{"\x4d\x76", 2447836956},
|
||||||
|
{"\x52\xd5", 3854228105},
|
||||||
|
{"\x91\xf7", 31066776},
|
||||||
|
{"\xd6\x27", 1806091603},
|
||||||
|
{"\x30\x46\x0b", 3808221797},
|
||||||
|
{"\x56\xdc\xd6", 2157698265},
|
||||||
|
{"\xd4\x52\x33", 1721992661},
|
||||||
|
{"\x6a\xb5\xf4", 2469105222},
|
||||||
|
{"\x67\x53\x81\x1c", 118283265},
|
||||||
|
{"\x69\xb8\xc0\x88", 3416318611},
|
||||||
|
{"\x1e\x84\xaf\x2d", 3315003572},
|
||||||
|
{"\x46\xdc\x54\xbe", 447346355},
|
||||||
|
{"\xd0\x7a\x6e\xea\x56", 4255445370},
|
||||||
|
{"\x86\x83\xd5\xa4\xd8", 2390603402},
|
||||||
|
{"\xb7\x46\xbb\x77\xce", 2048907743},
|
||||||
|
{"\x6c\xa8\xbc\xe5\x99", 2177978500},
|
||||||
|
{"\x5c\x5e\xe1\xa0\x73\x81", 1036846008},
|
||||||
|
{"\x08\x5d\x73\x1c\xe5\x2e", 229980482},
|
||||||
|
{"\x42\xfb\xf2\x52\xb4\x10", 3655585422},
|
||||||
|
{"\x73\xe1\xff\x56\x9c\xce", 3502708029},
|
||||||
|
{"\x5c\xbe\x97\x75\x54\x9a\x52", 815120748},
|
||||||
|
{"\x16\x82\x39\x49\x88\x2b\x36", 3056033698},
|
||||||
|
{"\x59\x77\xf0\xa7\x24\xf4\x78", 587205227},
|
||||||
|
{"\xd3\xa5\x7c\x0e\xc0\x02\x07", 2030937252},
|
||||||
|
{"\x31\x1b\x98\x75\x96\x22\xd3\x9a", 469635402},
|
||||||
|
{"\x38\xd6\xf7\x28\x20\xb4\x8a\xe9", 3530274698},
|
||||||
|
{"\xbb\x18\x5d\xf4\x12\x03\xf7\x99", 1974545809},
|
||||||
|
{"\x80\xd4\x3b\x3b\xae\x22\xa2\x78", 3563570120},
|
||||||
|
{"\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 2706087434},
|
||||||
|
{"\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 1534654151},
|
||||||
|
{"\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 2355554696},
|
||||||
|
{"\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 1400800912},
|
||||||
|
{"\x22\x6f\x39\x1f\xf8\xdd\x4f\x52\x17\x94", 3420325137},
|
||||||
|
{"\x32\x89\x2a\x75\x48\x3a\x4a\x02\x69\xdd", 3427803584},
|
||||||
|
{"\x06\x92\x5c\xf4\x88\x0e\x7e\x68\x38\x3e", 1152407945},
|
||||||
|
{"\xbd\x2c\x63\x38\xbf\xe9\x78\xb7\xbf\x15", 3382479516},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
require.EqualValues(t, tc.expected, hash([]byte(tc.s)))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBloomFilter(b *testing.B) {
|
||||||
|
const keyLen = 128
|
||||||
|
const numKeys = 1024
|
||||||
|
keys := make([][]byte, numKeys)
|
||||||
|
for i := range keys {
|
||||||
|
keys[i] = make([]byte, keyLen)
|
||||||
|
_, _ = rand.Read(keys[i])
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
policy := FilterPolicy(10)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
w := policy.NewWriter(base.TableFilter)
|
||||||
|
for _, key := range keys {
|
||||||
|
w.AddKey(key)
|
||||||
|
}
|
||||||
|
w.Finish(nil)
|
||||||
|
}
|
||||||
|
}
|
23
pebble/cache.go
Normal file
23
pebble/cache.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
package pebble
|
||||||
|
|
||||||
|
import "github.com/cockroachdb/pebble/internal/cache"
|
||||||
|
|
||||||
|
// Cache exports the cache.Cache type.
|
||||||
|
type Cache = cache.Cache
|
||||||
|
|
||||||
|
// NewCache creates a new cache of the specified size. Memory for the cache is
|
||||||
|
// allocated on demand, not during initialization. The cache is created with a
|
||||||
|
// reference count of 1. Each DB it is associated with adds a reference, so the
|
||||||
|
// creator of the cache should usually release their reference after the DB is
|
||||||
|
// created.
|
||||||
|
//
|
||||||
|
// c := pebble.NewCache(...)
|
||||||
|
// defer c.Unref()
|
||||||
|
// d, err := pebble.Open(pebble.Options{Cache: c})
|
||||||
|
func NewCache(size int64) *cache.Cache {
|
||||||
|
return cache.New(size)
|
||||||
|
}
|
428
pebble/checkpoint.go
Normal file
428
pebble/checkpoint.go
Normal file
@ -0,0 +1,428 @@
|
|||||||
|
// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
package pebble
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors/oserror"
|
||||||
|
"github.com/cockroachdb/pebble/internal/base"
|
||||||
|
"github.com/cockroachdb/pebble/record"
|
||||||
|
"github.com/cockroachdb/pebble/vfs"
|
||||||
|
"github.com/cockroachdb/pebble/vfs/atomicfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// checkpointOptions hold the optional parameters to construct checkpoint
|
||||||
|
// snapshots.
|
||||||
|
type checkpointOptions struct {
|
||||||
|
// flushWAL set to true will force a flush and sync of the WAL prior to
|
||||||
|
// checkpointing.
|
||||||
|
flushWAL bool
|
||||||
|
|
||||||
|
// If set, any SSTs that don't overlap with these spans are excluded from a checkpoint.
|
||||||
|
restrictToSpans []CheckpointSpan
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckpointOption set optional parameters used by `DB.Checkpoint`.
|
||||||
|
type CheckpointOption func(*checkpointOptions)
|
||||||
|
|
||||||
|
// WithFlushedWAL enables flushing and syncing the WAL prior to constructing a
|
||||||
|
// checkpoint. This guarantees that any writes committed before calling
|
||||||
|
// DB.Checkpoint will be part of that checkpoint.
|
||||||
|
//
|
||||||
|
// Note that this setting can only be useful in cases when some writes are
|
||||||
|
// performed with Sync = false. Otherwise, the guarantee will already be met.
|
||||||
|
//
|
||||||
|
// Passing this option is functionally equivalent to calling
|
||||||
|
// DB.LogData(nil, Sync) right before DB.Checkpoint.
|
||||||
|
func WithFlushedWAL() CheckpointOption {
|
||||||
|
return func(opt *checkpointOptions) {
|
||||||
|
opt.flushWAL = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRestrictToSpans specifies spans of interest for the checkpoint. Any SSTs
|
||||||
|
// that don't overlap with any of these spans are excluded from the checkpoint.
|
||||||
|
//
|
||||||
|
// Note that the checkpoint can still surface keys outside of these spans (from
|
||||||
|
// the WAL and from SSTs that partially overlap with these spans). Moreover,
|
||||||
|
// these surface keys aren't necessarily "valid" in that they could have been
|
||||||
|
// modified but the SST containing the modification is excluded.
|
||||||
|
func WithRestrictToSpans(spans []CheckpointSpan) CheckpointOption {
|
||||||
|
return func(opt *checkpointOptions) {
|
||||||
|
opt.restrictToSpans = spans
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckpointSpan is a key range [Start, End) (inclusive on Start, exclusive on
|
||||||
|
// End) of interest for a checkpoint.
|
||||||
|
type CheckpointSpan struct {
|
||||||
|
Start []byte
|
||||||
|
End []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// excludeFromCheckpoint returns true if an SST file should be excluded from the
|
||||||
|
// checkpoint because it does not overlap with the spans of interest
|
||||||
|
// (opt.restrictToSpans).
|
||||||
|
func excludeFromCheckpoint(f *fileMetadata, opt *checkpointOptions, cmp Compare) bool {
|
||||||
|
if len(opt.restrictToSpans) == 0 {
|
||||||
|
// Option not set; don't exclude anything.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, s := range opt.restrictToSpans {
|
||||||
|
if f.Overlaps(cmp, s.Start, s.End, true /* exclusiveEnd */) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// None of the restrictToSpans overlapped; we can exclude this file.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkdirAllAndSyncParents creates destDir and any of its missing parents.
|
||||||
|
// Those missing parents, as well as the closest existing ancestor, are synced.
|
||||||
|
// Returns a handle to the directory created at destDir.
|
||||||
|
func mkdirAllAndSyncParents(fs vfs.FS, destDir string) (vfs.File, error) {
|
||||||
|
// Collect paths for all directories between destDir (excluded) and its
|
||||||
|
// closest existing ancestor (included).
|
||||||
|
var parentPaths []string
|
||||||
|
foundExistingAncestor := false
|
||||||
|
for parentPath := fs.PathDir(destDir); parentPath != "."; parentPath = fs.PathDir(parentPath) {
|
||||||
|
parentPaths = append(parentPaths, parentPath)
|
||||||
|
_, err := fs.Stat(parentPath)
|
||||||
|
if err == nil {
|
||||||
|
// Exit loop at the closest existing ancestor.
|
||||||
|
foundExistingAncestor = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !oserror.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Handle empty filesystem edge case.
|
||||||
|
if !foundExistingAncestor {
|
||||||
|
parentPaths = append(parentPaths, "")
|
||||||
|
}
|
||||||
|
// Create destDir and any of its missing parents.
|
||||||
|
if err := fs.MkdirAll(destDir, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Sync all the parent directories up to the closest existing ancestor,
|
||||||
|
// included.
|
||||||
|
for _, parentPath := range parentPaths {
|
||||||
|
parentDir, err := fs.OpenDir(parentPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = parentDir.Sync()
|
||||||
|
if err != nil {
|
||||||
|
_ = parentDir.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = parentDir.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fs.OpenDir(destDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checkpoint constructs a snapshot of the DB instance in the specified
|
||||||
|
// directory. The WAL, MANIFEST, OPTIONS, and sstables will be copied into the
|
||||||
|
// snapshot. Hard links will be used when possible. Beware of the significant
|
||||||
|
// space overhead for a checkpoint if hard links are disabled. Also beware that
|
||||||
|
// even if hard links are used, the space overhead for the checkpoint will
|
||||||
|
// increase over time as the DB performs compactions.
|
||||||
|
func (d *DB) Checkpoint(
|
||||||
|
destDir string, opts ...CheckpointOption,
|
||||||
|
) (
|
||||||
|
ckErr error, /* used in deferred cleanup */
|
||||||
|
) {
|
||||||
|
opt := &checkpointOptions{}
|
||||||
|
for _, fn := range opts {
|
||||||
|
fn(opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := d.opts.FS.Stat(destDir); !oserror.IsNotExist(err) {
|
||||||
|
if err == nil {
|
||||||
|
return &os.PathError{
|
||||||
|
Op: "checkpoint",
|
||||||
|
Path: destDir,
|
||||||
|
Err: oserror.ErrExist,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.flushWAL && !d.opts.DisableWAL {
|
||||||
|
// Write an empty log-data record to flush and sync the WAL.
|
||||||
|
if err := d.LogData(nil /* data */, Sync); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable file deletions.
|
||||||
|
d.mu.Lock()
|
||||||
|
d.disableFileDeletions()
|
||||||
|
defer func() {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
d.enableFileDeletions()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(peter): RocksDB provides the option to roll the manifest if the
|
||||||
|
// MANIFEST size is too large. Should we do this too?
|
||||||
|
|
||||||
|
// Lock the manifest before getting the current version. We need the
|
||||||
|
// length of the manifest that we read to match the current version that
|
||||||
|
// we read, otherwise we might copy a versionEdit not reflected in the
|
||||||
|
// sstables we copy/link.
|
||||||
|
d.mu.versions.logLock()
|
||||||
|
// Get the unflushed log files, the current version, and the current manifest
|
||||||
|
// file number.
|
||||||
|
memQueue := d.mu.mem.queue
|
||||||
|
current := d.mu.versions.currentVersion()
|
||||||
|
formatVers := d.FormatMajorVersion()
|
||||||
|
manifestFileNum := d.mu.versions.manifestFileNum
|
||||||
|
manifestSize := d.mu.versions.manifest.Size()
|
||||||
|
optionsFileNum := d.optionsFileNum
|
||||||
|
virtualBackingFiles := make(map[base.DiskFileNum]struct{})
|
||||||
|
for diskFileNum := range d.mu.versions.backingState.fileBackingMap {
|
||||||
|
virtualBackingFiles[diskFileNum] = struct{}{}
|
||||||
|
}
|
||||||
|
// Release the manifest and DB.mu so we don't block other operations on
|
||||||
|
// the database.
|
||||||
|
d.mu.versions.logUnlock()
|
||||||
|
d.mu.Unlock()
|
||||||
|
|
||||||
|
// Wrap the normal filesystem with one which wraps newly created files with
|
||||||
|
// vfs.NewSyncingFile.
|
||||||
|
fs := vfs.NewSyncingFS(d.opts.FS, vfs.SyncingFileOptions{
|
||||||
|
NoSyncOnClose: d.opts.NoSyncOnClose,
|
||||||
|
BytesPerSync: d.opts.BytesPerSync,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create the dir and its parents (if necessary), and sync them.
|
||||||
|
var dir vfs.File
|
||||||
|
defer func() {
|
||||||
|
if dir != nil {
|
||||||
|
_ = dir.Close()
|
||||||
|
}
|
||||||
|
if ckErr != nil {
|
||||||
|
// Attempt to cleanup on error.
|
||||||
|
_ = fs.RemoveAll(destDir)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
dir, ckErr = mkdirAllAndSyncParents(fs, destDir)
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Link or copy the OPTIONS.
|
||||||
|
srcPath := base.MakeFilepath(fs, d.dirname, fileTypeOptions, optionsFileNum)
|
||||||
|
destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
|
||||||
|
ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Set the format major version in the destination directory.
|
||||||
|
var versionMarker *atomicfs.Marker
|
||||||
|
versionMarker, _, ckErr = atomicfs.LocateMarker(fs, destDir, formatVersionMarkerName)
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// We use the marker to encode the active format version in the
|
||||||
|
// marker filename. Unlike other uses of the atomic marker,
|
||||||
|
// there is no file with the filename `formatVers.String()` on
|
||||||
|
// the filesystem.
|
||||||
|
ckErr = versionMarker.Move(formatVers.String())
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
ckErr = versionMarker.Close()
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var excludedFiles map[deletedFileEntry]*fileMetadata
|
||||||
|
// Set of FileBacking.DiskFileNum which will be required by virtual sstables
|
||||||
|
// in the checkpoint.
|
||||||
|
requiredVirtualBackingFiles := make(map[base.DiskFileNum]struct{})
|
||||||
|
// Link or copy the sstables.
|
||||||
|
for l := range current.Levels {
|
||||||
|
iter := current.Levels[l].Iter()
|
||||||
|
for f := iter.First(); f != nil; f = iter.Next() {
|
||||||
|
if excludeFromCheckpoint(f, opt, d.cmp) {
|
||||||
|
if excludedFiles == nil {
|
||||||
|
excludedFiles = make(map[deletedFileEntry]*fileMetadata)
|
||||||
|
}
|
||||||
|
excludedFiles[deletedFileEntry{
|
||||||
|
Level: l,
|
||||||
|
FileNum: f.FileNum,
|
||||||
|
}] = f
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fileBacking := f.FileBacking
|
||||||
|
if f.Virtual {
|
||||||
|
if _, ok := requiredVirtualBackingFiles[fileBacking.DiskFileNum]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
requiredVirtualBackingFiles[fileBacking.DiskFileNum] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
srcPath := base.MakeFilepath(fs, d.dirname, fileTypeTable, fileBacking.DiskFileNum)
|
||||||
|
destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
|
||||||
|
ckErr = vfs.LinkOrCopy(fs, srcPath, destPath)
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var removeBackingTables []base.DiskFileNum
|
||||||
|
for diskFileNum := range virtualBackingFiles {
|
||||||
|
if _, ok := requiredVirtualBackingFiles[diskFileNum]; !ok {
|
||||||
|
// The backing sstable associated with fileNum is no longer
|
||||||
|
// required.
|
||||||
|
removeBackingTables = append(removeBackingTables, diskFileNum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ckErr = d.writeCheckpointManifest(
|
||||||
|
fs, formatVers, destDir, dir, manifestFileNum, manifestSize,
|
||||||
|
excludedFiles, removeBackingTables,
|
||||||
|
)
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the WAL files. We copy rather than link because WAL file recycling
|
||||||
|
// will cause the WAL files to be reused which would invalidate the
|
||||||
|
// checkpoint.
|
||||||
|
for i := range memQueue {
|
||||||
|
logNum := memQueue[i].logNum
|
||||||
|
if logNum == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
srcPath := base.MakeFilepath(fs, d.walDirname, fileTypeLog, logNum)
|
||||||
|
destPath := fs.PathJoin(destDir, fs.PathBase(srcPath))
|
||||||
|
ckErr = vfs.Copy(fs, srcPath, destPath)
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync and close the checkpoint directory.
|
||||||
|
ckErr = dir.Sync()
|
||||||
|
if ckErr != nil {
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
ckErr = dir.Close()
|
||||||
|
dir = nil
|
||||||
|
return ckErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DB) writeCheckpointManifest(
|
||||||
|
fs vfs.FS,
|
||||||
|
formatVers FormatMajorVersion,
|
||||||
|
destDirPath string,
|
||||||
|
destDir vfs.File,
|
||||||
|
manifestFileNum base.DiskFileNum,
|
||||||
|
manifestSize int64,
|
||||||
|
excludedFiles map[deletedFileEntry]*fileMetadata,
|
||||||
|
removeBackingTables []base.DiskFileNum,
|
||||||
|
) error {
|
||||||
|
// Copy the MANIFEST, and create a pointer to it. We copy rather
|
||||||
|
// than link because additional version edits added to the
|
||||||
|
// MANIFEST after we took our snapshot of the sstables will
|
||||||
|
// reference sstables that aren't in our checkpoint. For a
|
||||||
|
// similar reason, we need to limit how much of the MANIFEST we
|
||||||
|
// copy.
|
||||||
|
// If some files are excluded from the checkpoint, also append a block that
|
||||||
|
// records those files as deleted.
|
||||||
|
if err := func() error {
|
||||||
|
srcPath := base.MakeFilepath(fs, d.dirname, fileTypeManifest, manifestFileNum)
|
||||||
|
destPath := fs.PathJoin(destDirPath, fs.PathBase(srcPath))
|
||||||
|
src, err := fs.Open(srcPath, vfs.SequentialReadsOption)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer src.Close()
|
||||||
|
|
||||||
|
dst, err := fs.Create(destPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer dst.Close()
|
||||||
|
|
||||||
|
// Copy all existing records. We need to copy at the record level in case we
|
||||||
|
// need to append another record with the excluded files (we cannot simply
|
||||||
|
// append a record after a raw data copy; see
|
||||||
|
// https://github.com/cockroachdb/cockroach/issues/100935).
|
||||||
|
r := record.NewReader(&io.LimitedReader{R: src, N: manifestSize}, manifestFileNum)
|
||||||
|
w := record.NewWriter(dst)
|
||||||
|
for {
|
||||||
|
rr, err := r.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rw, err := w.Next()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.Copy(rw, rr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(excludedFiles) > 0 {
|
||||||
|
// Write out an additional VersionEdit that deletes the excluded SST files.
|
||||||
|
ve := versionEdit{
|
||||||
|
DeletedFiles: excludedFiles,
|
||||||
|
RemovedBackingTables: removeBackingTables,
|
||||||
|
}
|
||||||
|
|
||||||
|
rw, err := w.Next()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ve.Encode(rw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return dst.Sync()
|
||||||
|
}(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recent format versions use an atomic marker for setting the
|
||||||
|
// active manifest. Older versions use the CURRENT file. The
|
||||||
|
// setCurrentFunc function will return a closure that will
|
||||||
|
// take the appropriate action for the database's format
|
||||||
|
// version.
|
||||||
|
var manifestMarker *atomicfs.Marker
|
||||||
|
manifestMarker, _, err := atomicfs.LocateMarker(fs, destDirPath, manifestMarkerName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := setCurrentFunc(formatVers, manifestMarker, fs, destDirPath, destDir)(manifestFileNum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return manifestMarker.Close()
|
||||||
|
}
|
415
pebble/checkpoint_test.go
Normal file
415
pebble/checkpoint_test.go
Normal file
@ -0,0 +1,415 @@
|
|||||||
|
// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
package pebble
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/datadriven"
|
||||||
|
"github.com/cockroachdb/pebble/internal/base"
|
||||||
|
"github.com/cockroachdb/pebble/vfs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCheckpoint(t *testing.T) {
|
||||||
|
dbs := make(map[string]*DB)
|
||||||
|
defer func() {
|
||||||
|
for _, db := range dbs {
|
||||||
|
if db.closed.Load() == nil {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mem := vfs.NewMem()
|
||||||
|
var memLog base.InMemLogger
|
||||||
|
opts := &Options{
|
||||||
|
FS: vfs.WithLogging(mem, memLog.Infof),
|
||||||
|
FormatMajorVersion: internalFormatNewest,
|
||||||
|
L0CompactionThreshold: 10,
|
||||||
|
DisableAutomaticCompactions: true,
|
||||||
|
}
|
||||||
|
opts.private.disableTableStats = true
|
||||||
|
opts.private.testingAlwaysWaitForCleanup = true
|
||||||
|
|
||||||
|
datadriven.RunTest(t, "testdata/checkpoint", func(t *testing.T, td *datadriven.TestData) string {
|
||||||
|
switch td.Cmd {
|
||||||
|
case "batch":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "batch <db>"
|
||||||
|
}
|
||||||
|
memLog.Reset()
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
b := d.NewBatch()
|
||||||
|
if err := runBatchDefineCmd(td, b); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
if err := b.Commit(Sync); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "checkpoint":
|
||||||
|
if !(len(td.CmdArgs) == 2 || (len(td.CmdArgs) == 3 && td.CmdArgs[2].Key == "restrict")) {
|
||||||
|
return "checkpoint <db> <dir> [restrict=(start-end, ...)]"
|
||||||
|
}
|
||||||
|
var opts []CheckpointOption
|
||||||
|
if len(td.CmdArgs) == 3 {
|
||||||
|
var spans []CheckpointSpan
|
||||||
|
for _, v := range td.CmdArgs[2].Vals {
|
||||||
|
splits := strings.SplitN(v, "-", 2)
|
||||||
|
if len(splits) != 2 {
|
||||||
|
return fmt.Sprintf("invalid restrict range %q", v)
|
||||||
|
}
|
||||||
|
spans = append(spans, CheckpointSpan{
|
||||||
|
Start: []byte(splits[0]),
|
||||||
|
End: []byte(splits[1]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
opts = append(opts, WithRestrictToSpans(spans))
|
||||||
|
}
|
||||||
|
memLog.Reset()
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
if err := d.Checkpoint(td.CmdArgs[1].String(), opts...); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "ingest-and-excise":
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
|
||||||
|
// Hacky but the command doesn't expect a db string. Get rid of it.
|
||||||
|
td.CmdArgs = td.CmdArgs[1:]
|
||||||
|
if err := runIngestAndExciseCmd(td, d, mem); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
|
||||||
|
case "build":
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
|
||||||
|
// Hacky but the command doesn't expect a db string. Get rid of it.
|
||||||
|
td.CmdArgs = td.CmdArgs[1:]
|
||||||
|
if err := runBuildCmd(td, d, mem); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
|
||||||
|
case "lsm":
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
|
||||||
|
// Hacky but the command doesn't expect a db string. Get rid of it.
|
||||||
|
td.CmdArgs = td.CmdArgs[1:]
|
||||||
|
return runLSMCmd(td, d)
|
||||||
|
|
||||||
|
case "compact":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "compact <db>"
|
||||||
|
}
|
||||||
|
memLog.Reset()
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
if err := d.Compact(nil, []byte("\xff"), false); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
d.TestOnlyWaitForCleaning()
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "print-backing":
|
||||||
|
// prints contents of the file backing map in the version. Used to
|
||||||
|
// test whether the checkpoint removed the filebackings correctly.
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "print-backing <db>"
|
||||||
|
}
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
d.mu.Lock()
|
||||||
|
d.mu.versions.logLock()
|
||||||
|
var fileNums []base.DiskFileNum
|
||||||
|
for _, b := range d.mu.versions.backingState.fileBackingMap {
|
||||||
|
fileNums = append(fileNums, b.DiskFileNum)
|
||||||
|
}
|
||||||
|
d.mu.versions.logUnlock()
|
||||||
|
d.mu.Unlock()
|
||||||
|
|
||||||
|
slices.Sort(fileNums)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for _, f := range fileNums {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s\n", f.String()))
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
|
||||||
|
case "close":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "close <db>"
|
||||||
|
}
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
require.NoError(t, d.Close())
|
||||||
|
return ""
|
||||||
|
|
||||||
|
case "flush":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "flush <db>"
|
||||||
|
}
|
||||||
|
memLog.Reset()
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
if err := d.Flush(); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "list":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "list <dir>"
|
||||||
|
}
|
||||||
|
paths, err := mem.List(td.CmdArgs[0].String())
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
sort.Strings(paths)
|
||||||
|
return fmt.Sprintf("%s\n", strings.Join(paths, "\n"))
|
||||||
|
|
||||||
|
case "open":
|
||||||
|
if len(td.CmdArgs) != 1 && len(td.CmdArgs) != 2 {
|
||||||
|
return "open <dir> [readonly]"
|
||||||
|
}
|
||||||
|
opts.ReadOnly = false
|
||||||
|
if len(td.CmdArgs) == 2 {
|
||||||
|
if td.CmdArgs[1].String() != "readonly" {
|
||||||
|
return "open <dir> [readonly]"
|
||||||
|
}
|
||||||
|
opts.ReadOnly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
memLog.Reset()
|
||||||
|
dir := td.CmdArgs[0].String()
|
||||||
|
d, err := Open(dir, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
dbs[dir] = d
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "scan":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "scan <db>"
|
||||||
|
}
|
||||||
|
memLog.Reset()
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
iter, _ := d.NewIter(nil)
|
||||||
|
for valid := iter.First(); valid; valid = iter.Next() {
|
||||||
|
memLog.Infof("%s %s", iter.Key(), iter.Value())
|
||||||
|
}
|
||||||
|
memLog.Infof(".")
|
||||||
|
if err := iter.Close(); err != nil {
|
||||||
|
memLog.Infof("%v\n", err)
|
||||||
|
}
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("unknown command: %s", td.Cmd)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckpointCompaction(t *testing.T) {
|
||||||
|
fs := vfs.NewMem()
|
||||||
|
d, err := Open("", &Options{FS: fs})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(4)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer wg.Done()
|
||||||
|
for i := 0; ctx.Err() == nil; i++ {
|
||||||
|
if err := d.Set([]byte(fmt.Sprintf("key%06d", i)), nil, nil); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer wg.Done()
|
||||||
|
for ctx.Err() == nil {
|
||||||
|
if err := d.Compact([]byte("key"), []byte("key999999"), false); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
check := make(chan string, 100)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer close(check)
|
||||||
|
defer wg.Done()
|
||||||
|
for i := 0; ctx.Err() == nil && i < 200; i++ {
|
||||||
|
dir := fmt.Sprintf("checkpoint%06d", i)
|
||||||
|
if err := d.Checkpoint(dir); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case check <- dir:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
opts := &Options{FS: fs}
|
||||||
|
defer cancel()
|
||||||
|
defer wg.Done()
|
||||||
|
for dir := range check {
|
||||||
|
d2, err := Open(dir, opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Check the checkpoint has all the sstables that the manifest
|
||||||
|
// claims it has.
|
||||||
|
tableInfos, _ := d2.SSTables()
|
||||||
|
for _, tables := range tableInfos {
|
||||||
|
for _, tbl := range tables {
|
||||||
|
if tbl.Virtual {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, err := fs.Stat(base.MakeFilepath(fs, dir, base.FileTypeTable, tbl.FileNum.DiskFileNum())); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := d2.Close(); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
<-ctx.Done()
|
||||||
|
wg.Wait()
|
||||||
|
require.NoError(t, d.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckpointFlushWAL(t *testing.T) {
|
||||||
|
const checkpointPath = "checkpoints/checkpoint"
|
||||||
|
fs := vfs.NewStrictMem()
|
||||||
|
opts := &Options{FS: fs}
|
||||||
|
key, value := []byte("key"), []byte("value")
|
||||||
|
|
||||||
|
// Create a checkpoint from an unsynced DB.
|
||||||
|
{
|
||||||
|
d, err := Open("", opts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
{
|
||||||
|
wb := d.NewBatch()
|
||||||
|
err = wb.Set(key, value, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = d.Apply(wb, NoSync)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
err = d.Checkpoint(checkpointPath, WithFlushedWAL())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, d.Close())
|
||||||
|
fs.ResetToSyncedState()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the WAL has been flushed in the checkpoint.
|
||||||
|
{
|
||||||
|
files, err := fs.List(checkpointPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
hasLogFile := false
|
||||||
|
for _, f := range files {
|
||||||
|
info, err := fs.Stat(fs.PathJoin(checkpointPath, f))
|
||||||
|
require.NoError(t, err)
|
||||||
|
if strings.HasSuffix(f, ".log") {
|
||||||
|
hasLogFile = true
|
||||||
|
require.NotZero(t, info.Size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(t, hasLogFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the checkpoint contains the expected data.
|
||||||
|
{
|
||||||
|
d, err := Open(checkpointPath, opts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
iter, _ := d.NewIter(nil)
|
||||||
|
require.True(t, iter.First())
|
||||||
|
require.Equal(t, key, iter.Key())
|
||||||
|
require.Equal(t, value, iter.Value())
|
||||||
|
require.False(t, iter.Next())
|
||||||
|
require.NoError(t, iter.Close())
|
||||||
|
require.NoError(t, d.Close())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckpointManyFiles(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping because of short flag")
|
||||||
|
}
|
||||||
|
const checkpointPath = "checkpoint"
|
||||||
|
opts := &Options{
|
||||||
|
FS: vfs.NewMem(),
|
||||||
|
FormatMajorVersion: internalFormatNewest,
|
||||||
|
DisableAutomaticCompactions: true,
|
||||||
|
}
|
||||||
|
// Disable compression to speed up the test.
|
||||||
|
opts.EnsureDefaults()
|
||||||
|
for i := range opts.Levels {
|
||||||
|
opts.Levels[i].Compression = NoCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := Open("", opts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer d.Close()
|
||||||
|
|
||||||
|
mkKey := func(x int) []byte {
|
||||||
|
return []byte(fmt.Sprintf("key%06d", x))
|
||||||
|
}
|
||||||
|
// We want to test the case where the appended record with the excluded files
|
||||||
|
// makes the manifest cross 32KB. This will happen for a range of values
|
||||||
|
// around 450.
|
||||||
|
n := 400 + rand.Intn(100)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
err := d.Set(mkKey(i), nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = d.Flush()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
err = d.Checkpoint(checkpointPath, WithRestrictToSpans([]CheckpointSpan{
|
||||||
|
{
|
||||||
|
Start: mkKey(0),
|
||||||
|
End: mkKey(10),
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Open the checkpoint and iterate through all the keys.
|
||||||
|
{
|
||||||
|
d, err := Open(checkpointPath, opts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
iter, _ := d.NewIter(nil)
|
||||||
|
require.True(t, iter.First())
|
||||||
|
require.NoError(t, iter.Error())
|
||||||
|
n := 1
|
||||||
|
for iter.Next() {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
require.NoError(t, iter.Error())
|
||||||
|
require.NoError(t, iter.Close())
|
||||||
|
require.NoError(t, d.Close())
|
||||||
|
require.Equal(t, 10, n)
|
||||||
|
}
|
||||||
|
}
|
295
pebble/cleaner.go
Normal file
295
pebble/cleaner.go
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
package pebble
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"runtime/pprof"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors/oserror"
|
||||||
|
"github.com/cockroachdb/pebble/internal/base"
|
||||||
|
"github.com/cockroachdb/pebble/internal/invariants"
|
||||||
|
"github.com/cockroachdb/pebble/objstorage"
|
||||||
|
"github.com/cockroachdb/tokenbucket"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cleaner exports the base.Cleaner type.
|
||||||
|
type Cleaner = base.Cleaner
|
||||||
|
|
||||||
|
// DeleteCleaner exports the base.DeleteCleaner type.
|
||||||
|
type DeleteCleaner = base.DeleteCleaner
|
||||||
|
|
||||||
|
// ArchiveCleaner exports the base.ArchiveCleaner type.
|
||||||
|
type ArchiveCleaner = base.ArchiveCleaner
|
||||||
|
|
||||||
|
type cleanupManager struct {
|
||||||
|
opts *Options
|
||||||
|
objProvider objstorage.Provider
|
||||||
|
onTableDeleteFn func(fileSize uint64)
|
||||||
|
deletePacer *deletionPacer
|
||||||
|
|
||||||
|
// jobsCh is used as the cleanup job queue.
|
||||||
|
jobsCh chan *cleanupJob
|
||||||
|
// waitGroup is used to wait for the background goroutine to exit.
|
||||||
|
waitGroup sync.WaitGroup
|
||||||
|
|
||||||
|
mu struct {
|
||||||
|
sync.Mutex
|
||||||
|
// totalJobs is the total number of enqueued jobs (completed or in progress).
|
||||||
|
totalJobs int
|
||||||
|
completedJobs int
|
||||||
|
completedJobsCond sync.Cond
|
||||||
|
jobsQueueWarningIssued bool
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can queue this many jobs before we have to block EnqueueJob.
|
||||||
|
const jobsQueueDepth = 1000
|
||||||
|
|
||||||
|
// obsoleteFile holds information about a file that needs to be deleted soon.
|
||||||
|
type obsoleteFile struct {
|
||||||
|
dir string
|
||||||
|
fileNum base.DiskFileNum
|
||||||
|
fileType fileType
|
||||||
|
fileSize uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type cleanupJob struct {
|
||||||
|
jobID int
|
||||||
|
obsoleteFiles []obsoleteFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// openCleanupManager creates a cleanupManager and starts its background goroutine.
|
||||||
|
// The cleanupManager must be Close()d.
|
||||||
|
func openCleanupManager(
|
||||||
|
opts *Options,
|
||||||
|
objProvider objstorage.Provider,
|
||||||
|
onTableDeleteFn func(fileSize uint64),
|
||||||
|
getDeletePacerInfo func() deletionPacerInfo,
|
||||||
|
) *cleanupManager {
|
||||||
|
cm := &cleanupManager{
|
||||||
|
opts: opts,
|
||||||
|
objProvider: objProvider,
|
||||||
|
onTableDeleteFn: onTableDeleteFn,
|
||||||
|
deletePacer: newDeletionPacer(time.Now(), int64(opts.TargetByteDeletionRate), getDeletePacerInfo),
|
||||||
|
jobsCh: make(chan *cleanupJob, jobsQueueDepth),
|
||||||
|
}
|
||||||
|
cm.mu.completedJobsCond.L = &cm.mu.Mutex
|
||||||
|
cm.waitGroup.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
pprof.Do(context.Background(), gcLabels, func(context.Context) {
|
||||||
|
cm.mainLoop()
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
return cm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close stops the background goroutine, waiting until all queued jobs are completed.
|
||||||
|
// Delete pacing is disabled for the remaining jobs.
|
||||||
|
func (cm *cleanupManager) Close() {
|
||||||
|
close(cm.jobsCh)
|
||||||
|
cm.waitGroup.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnqueueJob adds a cleanup job to the manager's queue.
|
||||||
|
func (cm *cleanupManager) EnqueueJob(jobID int, obsoleteFiles []obsoleteFile) {
|
||||||
|
job := &cleanupJob{
|
||||||
|
jobID: jobID,
|
||||||
|
obsoleteFiles: obsoleteFiles,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report deleted bytes to the pacer, which can use this data to potentially
|
||||||
|
// increase the deletion rate to keep up. We want to do this at enqueue time
|
||||||
|
// rather than when we get to the job, otherwise the reported bytes will be
|
||||||
|
// subject to the throttling rate which defeats the purpose.
|
||||||
|
var pacingBytes uint64
|
||||||
|
for _, of := range obsoleteFiles {
|
||||||
|
if cm.needsPacing(of.fileType, of.fileNum) {
|
||||||
|
pacingBytes += of.fileSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pacingBytes > 0 {
|
||||||
|
cm.deletePacer.ReportDeletion(time.Now(), pacingBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
cm.mu.Lock()
|
||||||
|
cm.mu.totalJobs++
|
||||||
|
cm.maybeLogLocked()
|
||||||
|
cm.mu.Unlock()
|
||||||
|
|
||||||
|
if invariants.Enabled && len(cm.jobsCh) >= cap(cm.jobsCh)-2 {
|
||||||
|
panic("cleanup jobs queue full")
|
||||||
|
}
|
||||||
|
|
||||||
|
cm.jobsCh <- job
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until the completion of all jobs that were already queued.
|
||||||
|
//
|
||||||
|
// Does not wait for jobs that are enqueued during the call.
|
||||||
|
//
|
||||||
|
// Note that DB.mu should not be held while calling this method; the background
|
||||||
|
// goroutine needs to acquire DB.mu to update deleted table metrics.
|
||||||
|
func (cm *cleanupManager) Wait() {
|
||||||
|
cm.mu.Lock()
|
||||||
|
defer cm.mu.Unlock()
|
||||||
|
n := cm.mu.totalJobs
|
||||||
|
for cm.mu.completedJobs < n {
|
||||||
|
cm.mu.completedJobsCond.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mainLoop runs the manager's background goroutine.
|
||||||
|
func (cm *cleanupManager) mainLoop() {
|
||||||
|
defer cm.waitGroup.Done()
|
||||||
|
|
||||||
|
var tb tokenbucket.TokenBucket
|
||||||
|
// Use a token bucket with 1 token / second refill rate and 1 token burst.
|
||||||
|
tb.Init(1.0, 1.0)
|
||||||
|
for job := range cm.jobsCh {
|
||||||
|
for _, of := range job.obsoleteFiles {
|
||||||
|
if of.fileType != fileTypeTable {
|
||||||
|
path := base.MakeFilepath(cm.opts.FS, of.dir, of.fileType, of.fileNum)
|
||||||
|
cm.deleteObsoleteFile(of.fileType, job.jobID, path, of.fileNum, of.fileSize)
|
||||||
|
} else {
|
||||||
|
cm.maybePace(&tb, of.fileType, of.fileNum, of.fileSize)
|
||||||
|
cm.onTableDeleteFn(of.fileSize)
|
||||||
|
cm.deleteObsoleteObject(fileTypeTable, job.jobID, of.fileNum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cm.mu.Lock()
|
||||||
|
cm.mu.completedJobs++
|
||||||
|
cm.mu.completedJobsCond.Broadcast()
|
||||||
|
cm.maybeLogLocked()
|
||||||
|
cm.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *cleanupManager) needsPacing(fileType base.FileType, fileNum base.DiskFileNum) bool {
|
||||||
|
if fileType != fileTypeTable {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
meta, err := cm.objProvider.Lookup(fileType, fileNum)
|
||||||
|
if err != nil {
|
||||||
|
// The object was already removed from the provider; we won't actually
|
||||||
|
// delete anything, so we don't need to pace.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Don't throttle deletion of remote objects.
|
||||||
|
return !meta.IsRemote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybePace sleeps before deleting an object if appropriate. It is always
|
||||||
|
// called from the background goroutine.
|
||||||
|
func (cm *cleanupManager) maybePace(
|
||||||
|
tb *tokenbucket.TokenBucket, fileType base.FileType, fileNum base.DiskFileNum, fileSize uint64,
|
||||||
|
) {
|
||||||
|
if !cm.needsPacing(fileType, fileNum) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens := cm.deletePacer.PacingDelay(time.Now(), fileSize)
|
||||||
|
if tokens == 0.0 {
|
||||||
|
// The token bucket might be in debt; it could make us wait even for 0
|
||||||
|
// tokens. We don't want that if the pacer decided throttling should be
|
||||||
|
// disabled.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Wait for tokens. We use a token bucket instead of sleeping outright because
|
||||||
|
// the token bucket accumulates up to one second of unused tokens.
|
||||||
|
for {
|
||||||
|
ok, d := tb.TryToFulfill(tokenbucket.Tokens(tokens))
|
||||||
|
if ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteObsoleteFile deletes a (non-object) file that is no longer needed.
|
||||||
|
func (cm *cleanupManager) deleteObsoleteFile(
|
||||||
|
fileType fileType, jobID int, path string, fileNum base.DiskFileNum, fileSize uint64,
|
||||||
|
) {
|
||||||
|
// TODO(peter): need to handle this error, probably by re-adding the
|
||||||
|
// file that couldn't be deleted to one of the obsolete slices map.
|
||||||
|
err := cm.opts.Cleaner.Clean(cm.opts.FS, fileType, path)
|
||||||
|
if oserror.IsNotExist(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fileType {
|
||||||
|
case fileTypeLog:
|
||||||
|
cm.opts.EventListener.WALDeleted(WALDeleteInfo{
|
||||||
|
JobID: jobID,
|
||||||
|
Path: path,
|
||||||
|
FileNum: fileNum.FileNum(),
|
||||||
|
Err: err,
|
||||||
|
})
|
||||||
|
case fileTypeManifest:
|
||||||
|
cm.opts.EventListener.ManifestDeleted(ManifestDeleteInfo{
|
||||||
|
JobID: jobID,
|
||||||
|
Path: path,
|
||||||
|
FileNum: fileNum.FileNum(),
|
||||||
|
Err: err,
|
||||||
|
})
|
||||||
|
case fileTypeTable:
|
||||||
|
panic("invalid deletion of object file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *cleanupManager) deleteObsoleteObject(
|
||||||
|
fileType fileType, jobID int, fileNum base.DiskFileNum,
|
||||||
|
) {
|
||||||
|
if fileType != fileTypeTable {
|
||||||
|
panic("not an object")
|
||||||
|
}
|
||||||
|
|
||||||
|
var path string
|
||||||
|
meta, err := cm.objProvider.Lookup(fileType, fileNum)
|
||||||
|
if err != nil {
|
||||||
|
path = "<nil>"
|
||||||
|
} else {
|
||||||
|
path = cm.objProvider.Path(meta)
|
||||||
|
err = cm.objProvider.Remove(fileType, fileNum)
|
||||||
|
}
|
||||||
|
if cm.objProvider.IsNotExistError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fileType {
|
||||||
|
case fileTypeTable:
|
||||||
|
cm.opts.EventListener.TableDeleted(TableDeleteInfo{
|
||||||
|
JobID: jobID,
|
||||||
|
Path: path,
|
||||||
|
FileNum: fileNum.FileNum(),
|
||||||
|
Err: err,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// maybeLogLocked issues a log if the job queue gets 75% full and issues a log
|
||||||
|
// when the job queue gets back to less than 10% full.
|
||||||
|
//
|
||||||
|
// Must be called with cm.mu locked.
|
||||||
|
func (cm *cleanupManager) maybeLogLocked() {
|
||||||
|
const highThreshold = jobsQueueDepth * 3 / 4
|
||||||
|
const lowThreshold = jobsQueueDepth / 10
|
||||||
|
|
||||||
|
jobsInQueue := cm.mu.totalJobs - cm.mu.completedJobs
|
||||||
|
|
||||||
|
if !cm.mu.jobsQueueWarningIssued && jobsInQueue > highThreshold {
|
||||||
|
cm.mu.jobsQueueWarningIssued = true
|
||||||
|
cm.opts.Logger.Infof("cleanup falling behind; job queue has over %d jobs", highThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cm.mu.jobsQueueWarningIssued && jobsInQueue < lowThreshold {
|
||||||
|
cm.mu.jobsQueueWarningIssued = false
|
||||||
|
cm.opts.Logger.Infof("cleanup back to normal; job queue has under %d jobs", lowThreshold)
|
||||||
|
}
|
||||||
|
}
|
137
pebble/cleaner_test.go
Normal file
137
pebble/cleaner_test.go
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
package pebble
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/datadriven"
|
||||||
|
"github.com/cockroachdb/pebble/internal/base"
|
||||||
|
"github.com/cockroachdb/pebble/vfs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCleaner(t *testing.T) {
|
||||||
|
dbs := make(map[string]*DB)
|
||||||
|
defer func() {
|
||||||
|
for _, db := range dbs {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mem := vfs.NewMem()
|
||||||
|
var memLog base.InMemLogger
|
||||||
|
fs := vfs.WithLogging(mem, memLog.Infof)
|
||||||
|
datadriven.RunTest(t, "testdata/cleaner", func(t *testing.T, td *datadriven.TestData) string {
|
||||||
|
memLog.Reset()
|
||||||
|
switch td.Cmd {
|
||||||
|
case "batch":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "batch <db>"
|
||||||
|
}
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
b := d.NewBatch()
|
||||||
|
if err := runBatchDefineCmd(td, b); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
if err := b.Commit(Sync); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "compact":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "compact <db>"
|
||||||
|
}
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
if err := d.Compact(nil, []byte("\xff"), false); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "flush":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "flush <db>"
|
||||||
|
}
|
||||||
|
d := dbs[td.CmdArgs[0].String()]
|
||||||
|
if err := d.Flush(); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "close":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "close <db>"
|
||||||
|
}
|
||||||
|
dbDir := td.CmdArgs[0].String()
|
||||||
|
d := dbs[dbDir]
|
||||||
|
if err := d.Close(); err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
delete(dbs, dbDir)
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "list":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "list <dir>"
|
||||||
|
}
|
||||||
|
paths, err := mem.List(td.CmdArgs[0].String())
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
sort.Strings(paths)
|
||||||
|
return fmt.Sprintf("%s\n", strings.Join(paths, "\n"))
|
||||||
|
|
||||||
|
case "open":
|
||||||
|
if len(td.CmdArgs) < 1 || len(td.CmdArgs) > 3 {
|
||||||
|
return "open <dir> [archive] [readonly]"
|
||||||
|
}
|
||||||
|
dir := td.CmdArgs[0].String()
|
||||||
|
opts := (&Options{
|
||||||
|
FS: fs,
|
||||||
|
WALDir: dir + "_wal",
|
||||||
|
}).WithFSDefaults()
|
||||||
|
|
||||||
|
for i := 1; i < len(td.CmdArgs); i++ {
|
||||||
|
switch td.CmdArgs[i].String() {
|
||||||
|
case "readonly":
|
||||||
|
opts.ReadOnly = true
|
||||||
|
case "archive":
|
||||||
|
opts.Cleaner = ArchiveCleaner{}
|
||||||
|
default:
|
||||||
|
return "open <dir> [archive] [readonly]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Asynchronous table stats retrieval makes the output flaky.
|
||||||
|
opts.private.disableTableStats = true
|
||||||
|
opts.private.testingAlwaysWaitForCleanup = true
|
||||||
|
d, err := Open(dir, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
d.TestOnlyWaitForCleaning()
|
||||||
|
dbs[dir] = d
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
case "create-bogus-file":
|
||||||
|
if len(td.CmdArgs) != 1 {
|
||||||
|
return "create-bogus-file <db/file>"
|
||||||
|
}
|
||||||
|
dst, err := fs.Create(td.CmdArgs[0].String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = dst.Write([]byte("bogus data"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, dst.Sync())
|
||||||
|
require.NoError(t, dst.Close())
|
||||||
|
return memLog.String()
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("unknown command: %s", td.Cmd)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
1
pebble/cmd/pebble/.gitignore
vendored
Normal file
1
pebble/cmd/pebble/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
pebble
|
168
pebble/cmd/pebble/db.go
Normal file
168
pebble/cmd/pebble/db.go
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/pebble"
|
||||||
|
"github.com/cockroachdb/pebble/bloom"
|
||||||
|
"github.com/cockroachdb/pebble/internal/bytealloc"
|
||||||
|
"github.com/cockroachdb/pebble/objstorage/remote"
|
||||||
|
"github.com/cockroachdb/pebble/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DB specifies the minimal interfaces that need to be implemented to support
|
||||||
|
// the pebble command.
|
||||||
|
type DB interface {
|
||||||
|
NewIter(*pebble.IterOptions) iterator
|
||||||
|
NewBatch() batch
|
||||||
|
Scan(iter iterator, key []byte, count int64, reverse bool) error
|
||||||
|
Metrics() *pebble.Metrics
|
||||||
|
Flush() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type iterator interface {
|
||||||
|
SeekLT(key []byte) bool
|
||||||
|
SeekGE(key []byte) bool
|
||||||
|
Valid() bool
|
||||||
|
Key() []byte
|
||||||
|
Value() []byte
|
||||||
|
First() bool
|
||||||
|
Next() bool
|
||||||
|
Last() bool
|
||||||
|
Prev() bool
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type batch interface {
|
||||||
|
Close() error
|
||||||
|
Commit(opts *pebble.WriteOptions) error
|
||||||
|
Set(key, value []byte, opts *pebble.WriteOptions) error
|
||||||
|
Delete(key []byte, opts *pebble.WriteOptions) error
|
||||||
|
LogData(data []byte, opts *pebble.WriteOptions) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adapters for Pebble. Since the interfaces above are based on Pebble's
|
||||||
|
// interfaces, it can simply forward calls for everything.
|
||||||
|
type pebbleDB struct {
|
||||||
|
d *pebble.DB
|
||||||
|
ballast []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPebbleDB(dir string) DB {
|
||||||
|
cache := pebble.NewCache(cacheSize)
|
||||||
|
defer cache.Unref()
|
||||||
|
opts := &pebble.Options{
|
||||||
|
Cache: cache,
|
||||||
|
Comparer: mvccComparer,
|
||||||
|
DisableWAL: disableWAL,
|
||||||
|
FormatMajorVersion: pebble.FormatNewest,
|
||||||
|
L0CompactionThreshold: 2,
|
||||||
|
L0StopWritesThreshold: 1000,
|
||||||
|
LBaseMaxBytes: 64 << 20, // 64 MB
|
||||||
|
Levels: make([]pebble.LevelOptions, 7),
|
||||||
|
MaxOpenFiles: 16384,
|
||||||
|
MemTableSize: 64 << 20,
|
||||||
|
MemTableStopWritesThreshold: 4,
|
||||||
|
Merger: &pebble.Merger{
|
||||||
|
Name: "cockroach_merge_operator",
|
||||||
|
},
|
||||||
|
MaxConcurrentCompactions: func() int {
|
||||||
|
return 3
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(opts.Levels); i++ {
|
||||||
|
l := &opts.Levels[i]
|
||||||
|
l.BlockSize = 32 << 10 // 32 KB
|
||||||
|
l.IndexBlockSize = 256 << 10 // 256 KB
|
||||||
|
l.FilterPolicy = bloom.FilterPolicy(10)
|
||||||
|
l.FilterType = pebble.TableFilter
|
||||||
|
if i > 0 {
|
||||||
|
l.TargetFileSize = opts.Levels[i-1].TargetFileSize * 2
|
||||||
|
}
|
||||||
|
l.EnsureDefaults()
|
||||||
|
}
|
||||||
|
opts.Levels[6].FilterPolicy = nil
|
||||||
|
opts.FlushSplitBytes = opts.Levels[0].TargetFileSize
|
||||||
|
|
||||||
|
opts.EnsureDefaults()
|
||||||
|
|
||||||
|
if verbose {
|
||||||
|
lel := pebble.MakeLoggingEventListener(nil)
|
||||||
|
opts.EventListener = &lel
|
||||||
|
opts.EventListener.TableDeleted = nil
|
||||||
|
opts.EventListener.TableIngested = nil
|
||||||
|
opts.EventListener.WALCreated = nil
|
||||||
|
opts.EventListener.WALDeleted = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if pathToLocalSharedStorage != "" {
|
||||||
|
opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
|
||||||
|
// Store all shared objects on local disk, for convenience.
|
||||||
|
"": remote.NewLocalFS(pathToLocalSharedStorage, vfs.Default),
|
||||||
|
})
|
||||||
|
opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
|
||||||
|
if secondaryCacheSize != 0 {
|
||||||
|
opts.Experimental.SecondaryCacheSizeBytes = secondaryCacheSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := pebble.Open(dir, opts)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if pathToLocalSharedStorage != "" {
|
||||||
|
if err := p.SetCreatorID(1); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pebbleDB{
|
||||||
|
d: p,
|
||||||
|
ballast: make([]byte, 1<<30),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pebbleDB) Flush() error {
|
||||||
|
return p.d.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pebbleDB) NewIter(opts *pebble.IterOptions) iterator {
|
||||||
|
iter, _ := p.d.NewIter(opts)
|
||||||
|
return iter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pebbleDB) NewBatch() batch {
|
||||||
|
return p.d.NewBatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pebbleDB) Scan(iter iterator, key []byte, count int64, reverse bool) error {
|
||||||
|
var data bytealloc.A
|
||||||
|
if reverse {
|
||||||
|
for i, valid := 0, iter.SeekLT(key); valid; valid = iter.Prev() {
|
||||||
|
data, _ = data.Copy(iter.Key())
|
||||||
|
data, _ = data.Copy(iter.Value())
|
||||||
|
i++
|
||||||
|
if i >= int(count) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i, valid := 0, iter.SeekGE(key); valid; valid = iter.Next() {
|
||||||
|
data, _ = data.Copy(iter.Key())
|
||||||
|
data, _ = data.Copy(iter.Value())
|
||||||
|
i++
|
||||||
|
if i >= int(count) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pebbleDB) Metrics() *pebble.Metrics {
|
||||||
|
return p.d.Metrics()
|
||||||
|
}
|
707
pebble/cmd/pebble/fsbench.go
Normal file
707
pebble/cmd/pebble/fsbench.go
Normal file
@ -0,0 +1,707 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
"github.com/cockroachdb/pebble/vfs"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var fsBenchCmd = &cobra.Command{
|
||||||
|
Use: "fs <dir>",
|
||||||
|
Short: "Run file system benchmarks.",
|
||||||
|
Long: `
|
||||||
|
Run file system benchmarks. Each benchmark is predefined and can be
|
||||||
|
run using the command "bench fs <dir> --bench-name <benchmark>".
|
||||||
|
Each possible <benchmark> which can be run is defined in the code.
|
||||||
|
Benchmarks may require the specification of a --duration or
|
||||||
|
--max-ops flag, to prevent the benchmark from running forever
|
||||||
|
or running out of memory.
|
||||||
|
|
||||||
|
The --num-times flag can be used to run the entire benchmark, more than
|
||||||
|
once. If the flag isn't provided, then the benchmark is only run once.
|
||||||
|
`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runFsBench,
|
||||||
|
}
|
||||||
|
|
||||||
|
const writeBatchSize = 1 << 10
|
||||||
|
|
||||||
|
var fsConfig struct {
|
||||||
|
// An upper limit on the number of ops which can be run.
|
||||||
|
maxOps int
|
||||||
|
|
||||||
|
// Benchmark to run.
|
||||||
|
benchname string
|
||||||
|
|
||||||
|
// Number of times each benchmark should be run.
|
||||||
|
numTimes int
|
||||||
|
|
||||||
|
fs vfs.FS
|
||||||
|
|
||||||
|
precomputedWriteBatch []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fsBenchCmd.Flags().IntVar(
|
||||||
|
&fsConfig.maxOps, "max-ops", 0,
|
||||||
|
"Maximum number of times the operation which is being benchmarked should be run.",
|
||||||
|
)
|
||||||
|
|
||||||
|
fsBenchCmd.Flags().StringVar(
|
||||||
|
&fsConfig.benchname, "bench-name", "", "The benchmark to run.")
|
||||||
|
fsBenchCmd.MarkFlagRequired("bench-name")
|
||||||
|
|
||||||
|
fsBenchCmd.Flags().IntVar(
|
||||||
|
&fsConfig.numTimes, "num-times", 1,
|
||||||
|
"Number of times each benchmark should be run.")
|
||||||
|
|
||||||
|
// Add subcommand to list
|
||||||
|
fsBenchCmd.AddCommand(listFsBench)
|
||||||
|
|
||||||
|
// Just use the default vfs implementation for now.
|
||||||
|
fsConfig.fs = vfs.Default
|
||||||
|
|
||||||
|
fsConfig.precomputedWriteBatch = bytes.Repeat([]byte("a"), writeBatchSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// State relevant to a benchmark.
|
||||||
|
type fsBench struct {
|
||||||
|
// A short name for the benchmark.
|
||||||
|
name string
|
||||||
|
|
||||||
|
// A one line description for the benchmark.
|
||||||
|
description string
|
||||||
|
|
||||||
|
// numOps is the total number of ops which
|
||||||
|
// have been run for the benchmark. This is used
|
||||||
|
// to make sure that we don't benchmark the operation
|
||||||
|
// more than max-ops times.
|
||||||
|
numOps int
|
||||||
|
|
||||||
|
// directory under which the benchmark is run.
|
||||||
|
dir vfs.File
|
||||||
|
dirName string
|
||||||
|
|
||||||
|
// Stats associated with the benchmark.
|
||||||
|
reg *histogramRegistry
|
||||||
|
|
||||||
|
// The operation which we're benchmarking. This
|
||||||
|
// will be called over and over again.
|
||||||
|
// Returns false if run should no longer be called.
|
||||||
|
run func(*namedHistogram) bool
|
||||||
|
|
||||||
|
// Stop the benchmark from executing any further.
|
||||||
|
// Stop is safe to call concurrently with run.
|
||||||
|
stop func()
|
||||||
|
|
||||||
|
// A cleanup func which must be called after
|
||||||
|
// the benchmark has finished running.
|
||||||
|
// Clean should be only called after making sure
|
||||||
|
// that the run function is no longer executing.
|
||||||
|
clean func()
|
||||||
|
}
|
||||||
|
|
||||||
|
// createFile can be used to create an empty file.
|
||||||
|
// Invariant: File shouldn't already exist.
|
||||||
|
func createFile(filepath string) vfs.File {
|
||||||
|
fh, err := fsConfig.fs.Create(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
return fh
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invariant: file with filepath should exist.
|
||||||
|
func deleteFile(filepath string) {
|
||||||
|
err := fsConfig.fs.Remove(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write size bytes to the file in batches.
|
||||||
|
func writeToFile(fh vfs.File, size int64) {
|
||||||
|
for size > 0 {
|
||||||
|
var toWrite []byte
|
||||||
|
if size >= writeBatchSize {
|
||||||
|
toWrite = fsConfig.precomputedWriteBatch
|
||||||
|
} else {
|
||||||
|
toWrite = fsConfig.precomputedWriteBatch[:size]
|
||||||
|
}
|
||||||
|
written, err := fh.Write(toWrite)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
if written != len(toWrite) {
|
||||||
|
log.Fatalf("Couldn't write %d bytes to file\n", size)
|
||||||
|
}
|
||||||
|
size -= int64(len(toWrite))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func syncFile(fh vfs.File) {
|
||||||
|
err := fh.Sync()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeFile(fh vfs.File) {
|
||||||
|
err := fh.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDiskUsage(filepath string) {
|
||||||
|
_, err := fsConfig.fs.GetDiskUsage(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func openDir(filepath string) vfs.File {
|
||||||
|
fh, err := fsConfig.fs.OpenDir(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
return fh
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkDir(filepath string) {
|
||||||
|
err := fsConfig.fs.MkdirAll(filepath, 0755)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeAllFiles(filepath string) {
|
||||||
|
err := fsConfig.fs.RemoveAll(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileSize is in bytes.
|
||||||
|
func createBench(benchName string, benchDescription string) fsBenchmark {
|
||||||
|
createBench := func(dirpath string) *fsBench {
|
||||||
|
bench := &fsBench{}
|
||||||
|
mkDir(dirpath)
|
||||||
|
fh := openDir(dirpath)
|
||||||
|
|
||||||
|
bench.dir = fh
|
||||||
|
bench.dirName = dirpath
|
||||||
|
bench.reg = newHistogramRegistry()
|
||||||
|
bench.numOps = 0
|
||||||
|
bench.name = benchName
|
||||||
|
bench.description = benchDescription
|
||||||
|
|
||||||
|
// setup the operation to benchmark, and the cleanup functions.
|
||||||
|
pref := "temp_"
|
||||||
|
var numFiles int
|
||||||
|
var done atomic.Bool
|
||||||
|
|
||||||
|
bench.run = func(hist *namedHistogram) bool {
|
||||||
|
if done.Load() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
fh := createFile(path.Join(dirpath, fmt.Sprintf("%s%d", pref, numFiles)))
|
||||||
|
syncFile(bench.dir)
|
||||||
|
hist.Record(time.Since(start))
|
||||||
|
|
||||||
|
closeFile(fh)
|
||||||
|
numFiles++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.stop = func() {
|
||||||
|
done.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.clean = func() {
|
||||||
|
removeAllFiles(dirpath)
|
||||||
|
closeFile(bench.dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bench
|
||||||
|
}
|
||||||
|
|
||||||
|
return fsBenchmark{
|
||||||
|
createBench,
|
||||||
|
benchName,
|
||||||
|
benchDescription,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This benchmark prepopulates a directory with some files of a given size. Then, it creates and deletes
|
||||||
|
// a file of some size, while measuring only the performance of the delete.
|
||||||
|
func deleteBench(
|
||||||
|
benchName string, benchDescription string, preNumFiles int, preFileSize int64, fileSize int64,
|
||||||
|
) fsBenchmark {
|
||||||
|
|
||||||
|
createBench := func(dirpath string) *fsBench {
|
||||||
|
bench := &fsBench{}
|
||||||
|
mkDir(dirpath)
|
||||||
|
fh := openDir(dirpath)
|
||||||
|
|
||||||
|
bench.dir = fh
|
||||||
|
bench.dirName = dirpath
|
||||||
|
bench.reg = newHistogramRegistry()
|
||||||
|
bench.numOps = 0
|
||||||
|
bench.name = benchName
|
||||||
|
bench.description = benchDescription
|
||||||
|
|
||||||
|
// prepopulate the directory
|
||||||
|
prePref := "pre_temp_"
|
||||||
|
for i := 0; i < preNumFiles; i++ {
|
||||||
|
fh := createFile(path.Join(dirpath, fmt.Sprintf("%s%d", prePref, i)))
|
||||||
|
if preFileSize > 0 {
|
||||||
|
writeToFile(fh, preFileSize)
|
||||||
|
syncFile(fh)
|
||||||
|
}
|
||||||
|
closeFile(fh)
|
||||||
|
}
|
||||||
|
syncFile(bench.dir)
|
||||||
|
|
||||||
|
var done atomic.Bool
|
||||||
|
bench.run = func(hist *namedHistogram) bool {
|
||||||
|
if done.Load() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := "newfile"
|
||||||
|
fh := createFile(path.Join(dirpath, filename))
|
||||||
|
writeToFile(fh, fileSize)
|
||||||
|
syncFile(fh)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
deleteFile(path.Join(dirpath, filename))
|
||||||
|
hist.Record(time.Since(start))
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.stop = func() {
|
||||||
|
done.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.clean = func() {
|
||||||
|
removeAllFiles(dirpath)
|
||||||
|
closeFile(bench.dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bench
|
||||||
|
}
|
||||||
|
|
||||||
|
return fsBenchmark{
|
||||||
|
createBench,
|
||||||
|
benchName,
|
||||||
|
benchDescription,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This benchmark creates some files in a directory, and then measures the performance
|
||||||
|
// of the vfs.Remove function.
|
||||||
|
// fileSize is in bytes.
|
||||||
|
func deleteUniformBench(
|
||||||
|
benchName string, benchDescription string, numFiles int, fileSize int64,
|
||||||
|
) fsBenchmark {
|
||||||
|
createBench := func(dirpath string) *fsBench {
|
||||||
|
bench := &fsBench{}
|
||||||
|
mkDir(dirpath)
|
||||||
|
fh := openDir(dirpath)
|
||||||
|
|
||||||
|
bench.dir = fh
|
||||||
|
bench.dirName = dirpath
|
||||||
|
bench.reg = newHistogramRegistry()
|
||||||
|
bench.numOps = 0
|
||||||
|
bench.name = benchName
|
||||||
|
bench.description = benchDescription
|
||||||
|
|
||||||
|
// setup the operation to benchmark, and the cleaup functions.
|
||||||
|
pref := "temp_"
|
||||||
|
for i := 0; i < numFiles; i++ {
|
||||||
|
fh := createFile(path.Join(dirpath, fmt.Sprintf("%s%d", pref, i)))
|
||||||
|
if fileSize > 0 {
|
||||||
|
writeToFile(fh, fileSize)
|
||||||
|
syncFile(fh)
|
||||||
|
}
|
||||||
|
closeFile(fh)
|
||||||
|
}
|
||||||
|
syncFile(bench.dir)
|
||||||
|
|
||||||
|
var done atomic.Bool
|
||||||
|
bench.run = func(hist *namedHistogram) bool {
|
||||||
|
if done.Load() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if numFiles == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
deleteFile(path.Join(dirpath, fmt.Sprintf("%s%d", pref, numFiles-1)))
|
||||||
|
hist.Record(time.Since(start))
|
||||||
|
|
||||||
|
numFiles--
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.stop = func() {
|
||||||
|
done.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.clean = func() {
|
||||||
|
removeAll(dirpath)
|
||||||
|
closeFile(bench.dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bench
|
||||||
|
}
|
||||||
|
|
||||||
|
return fsBenchmark{
|
||||||
|
createBench,
|
||||||
|
benchName,
|
||||||
|
benchDescription,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests the performance of syncing data to disk.
|
||||||
|
// Only measures the sync performance.
|
||||||
|
// The writes will be synced after every writeSize bytes have been written.
|
||||||
|
func writeSyncBench(
|
||||||
|
benchName string, benchDescription string, maxFileSize int64, writeSize int64,
|
||||||
|
) fsBenchmark {
|
||||||
|
|
||||||
|
if writeSize > maxFileSize {
|
||||||
|
log.Fatalln("File write threshold is greater than max file size.")
|
||||||
|
}
|
||||||
|
|
||||||
|
createBench := func(dirpath string) *fsBench {
|
||||||
|
bench := &fsBench{}
|
||||||
|
mkDir(dirpath)
|
||||||
|
fh := openDir(dirpath)
|
||||||
|
|
||||||
|
bench.dir = fh
|
||||||
|
bench.dirName = dirpath
|
||||||
|
bench.reg = newHistogramRegistry()
|
||||||
|
bench.numOps = 0
|
||||||
|
bench.name = benchName
|
||||||
|
bench.description = benchDescription
|
||||||
|
|
||||||
|
pref := "temp_"
|
||||||
|
var benchData struct {
|
||||||
|
done atomic.Bool
|
||||||
|
fh vfs.File
|
||||||
|
fileNum int
|
||||||
|
bytesWritten int64
|
||||||
|
}
|
||||||
|
benchData.fh = createFile(path.Join(dirpath, fmt.Sprintf("%s%d", pref, benchData.fileNum)))
|
||||||
|
|
||||||
|
bench.run = func(hist *namedHistogram) bool {
|
||||||
|
if benchData.done.Load() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if benchData.bytesWritten+writeSize > maxFileSize {
|
||||||
|
closeFile(benchData.fh)
|
||||||
|
benchData.fileNum++
|
||||||
|
benchData.bytesWritten = 0
|
||||||
|
benchData.fh = createFile(path.Join(dirpath, fmt.Sprintf("%s%d", pref, benchData.fileNum)))
|
||||||
|
}
|
||||||
|
|
||||||
|
benchData.bytesWritten += writeSize
|
||||||
|
writeToFile(benchData.fh, writeSize)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
syncFile(benchData.fh)
|
||||||
|
hist.Record(time.Since(start))
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.stop = func() {
|
||||||
|
benchData.done.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.clean = func() {
|
||||||
|
closeFile(benchData.fh)
|
||||||
|
removeAllFiles(dirpath)
|
||||||
|
closeFile(bench.dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bench
|
||||||
|
}
|
||||||
|
|
||||||
|
return fsBenchmark{
|
||||||
|
createBench,
|
||||||
|
benchName,
|
||||||
|
benchDescription,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests the peformance of calling the vfs.GetDiskUsage call on a directory,
|
||||||
|
// as the number of files/total size of files in the directory grows.
|
||||||
|
func diskUsageBench(
|
||||||
|
benchName string, benchDescription string, maxFileSize int64, writeSize int64,
|
||||||
|
) fsBenchmark {
|
||||||
|
|
||||||
|
if writeSize > maxFileSize {
|
||||||
|
log.Fatalln("File write threshold is greater than max file size.")
|
||||||
|
}
|
||||||
|
|
||||||
|
createBench := func(dirpath string) *fsBench {
|
||||||
|
bench := &fsBench{}
|
||||||
|
mkDir(dirpath)
|
||||||
|
fh := openDir(dirpath)
|
||||||
|
|
||||||
|
bench.dir = fh
|
||||||
|
bench.dirName = dirpath
|
||||||
|
bench.reg = newHistogramRegistry()
|
||||||
|
bench.numOps = 0
|
||||||
|
bench.name = benchName
|
||||||
|
bench.description = benchDescription
|
||||||
|
|
||||||
|
pref := "temp_"
|
||||||
|
var benchData struct {
|
||||||
|
done atomic.Bool
|
||||||
|
fh vfs.File
|
||||||
|
fileNum int
|
||||||
|
bytesWritten int64
|
||||||
|
}
|
||||||
|
benchData.fh = createFile(path.Join(dirpath, fmt.Sprintf("%s%d", pref, benchData.fileNum)))
|
||||||
|
|
||||||
|
bench.run = func(hist *namedHistogram) bool {
|
||||||
|
if benchData.done.Load() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if benchData.bytesWritten+writeSize > maxFileSize {
|
||||||
|
closeFile(benchData.fh)
|
||||||
|
benchData.fileNum++
|
||||||
|
benchData.bytesWritten = 0
|
||||||
|
benchData.fh = createFile(path.Join(dirpath, fmt.Sprintf("%s%d", pref, benchData.fileNum)))
|
||||||
|
}
|
||||||
|
|
||||||
|
benchData.bytesWritten += writeSize
|
||||||
|
writeToFile(benchData.fh, writeSize)
|
||||||
|
syncFile(benchData.fh)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
getDiskUsage(dirpath)
|
||||||
|
hist.Record(time.Since(start))
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.stop = func() {
|
||||||
|
benchData.done.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
bench.clean = func() {
|
||||||
|
closeFile(benchData.fh)
|
||||||
|
removeAllFiles(dirpath)
|
||||||
|
closeFile(bench.dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bench
|
||||||
|
}
|
||||||
|
|
||||||
|
return fsBenchmark{
|
||||||
|
createBench,
|
||||||
|
benchName,
|
||||||
|
benchDescription,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A benchmark is a function which takes a directory
|
||||||
|
// as input and returns the fsBench struct which has
|
||||||
|
// all the information required to run the benchmark.
|
||||||
|
type fsBenchmark struct {
|
||||||
|
createBench func(string) *fsBench
|
||||||
|
name string
|
||||||
|
description string
|
||||||
|
}
|
||||||
|
|
||||||
|
// The various benchmarks which can be run.
|
||||||
|
var benchmarks = map[string]fsBenchmark{
|
||||||
|
"create_empty": createBench("create_empty", "create empty file, sync par dir"),
|
||||||
|
"delete_10k_2MiB": deleteUniformBench(
|
||||||
|
"delete_10k_2MiB", "create 10k 2MiB size files, measure deletion times", 10_000, 2<<20,
|
||||||
|
),
|
||||||
|
"delete_100k_2MiB": deleteUniformBench(
|
||||||
|
"delete_100k_2MiB", "create 100k 2MiB size files, measure deletion times", 100_000, 2<<20,
|
||||||
|
),
|
||||||
|
"delete_200k_2MiB": deleteUniformBench(
|
||||||
|
"delete_200k_2MiB", "create 200k 2MiB size files, measure deletion times", 200_000, 2<<20,
|
||||||
|
),
|
||||||
|
"write_sync_1MiB": writeSyncBench(
|
||||||
|
"write_sync_1MiB", "Write 1MiB to a file, then sync, while timing the sync.", 2<<30, 1<<20,
|
||||||
|
),
|
||||||
|
"write_sync_16MiB": writeSyncBench(
|
||||||
|
"write_sync_16MiB", "Write 16MiB to a file, then sync, while timing the sync.", 2<<30, 16<<20,
|
||||||
|
),
|
||||||
|
"write_sync_128MiB": writeSyncBench(
|
||||||
|
"write_sync_128MiB", "Write 128MiB to a file, then sync, while timing the sync.", 2<<30, 128<<20,
|
||||||
|
),
|
||||||
|
"disk_usage_128MB": diskUsageBench(
|
||||||
|
"disk_usage_128MB",
|
||||||
|
"Write 128MiB to a file, measure GetDiskUsage call. Create a new file, when file size is 1GB.",
|
||||||
|
1<<30, 128<<20,
|
||||||
|
),
|
||||||
|
"disk_usage_many_files": diskUsageBench(
|
||||||
|
"disk_usage_many_files",
|
||||||
|
"Create new file, Write 128KiB to a file, measure GetDiskUsage call.",
|
||||||
|
128<<10, 128<<10,
|
||||||
|
),
|
||||||
|
"delete_large_dir_256MiB": deleteBench(
|
||||||
|
"delete_large_dir_256MiB", "Prepopulate directory with 100k 1MiB files, measure delete peformance of 256MiB files",
|
||||||
|
1e5, 1<<20, 256<<20,
|
||||||
|
),
|
||||||
|
"delete_large_dir_2MiB": deleteBench(
|
||||||
|
"delete_large_dir_2MiB", "Prepopulate directory with 100k 1MiB files, measure delete peformance of 2MiB files",
|
||||||
|
1e5, 1<<20, 2<<20,
|
||||||
|
),
|
||||||
|
"delete_small_dir_2GiB": deleteBench(
|
||||||
|
"delete_small_dir_2GiB", "Prepopulate directory with 1k 1MiB files, measure delete peformance of 2GiB files",
|
||||||
|
1e3, 1<<20, 2<<30,
|
||||||
|
),
|
||||||
|
"delete_small_dir_256MiB": deleteBench(
|
||||||
|
"delete_small_dir_256MiB", "Prepopulate directory with 1k 1MiB files, measure delete peformance of 256MiB files",
|
||||||
|
1e3, 1<<20, 256<<20,
|
||||||
|
),
|
||||||
|
"delete_small_dir_2MiB": deleteBench(
|
||||||
|
"delete_small_dir_2MiB", "Prepopulate directory with 1k 1MiB files, measure delete peformance of 2MiB files",
|
||||||
|
1e3, 1<<20, 2<<20,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
func runFsBench(_ *cobra.Command, args []string) error {
|
||||||
|
benchmark, ok := benchmarks[fsConfig.benchname]
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("trying to run an unknown benchmark: %s", fsConfig.benchname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the benchmark a comple of times.
|
||||||
|
fmt.Printf("The benchmark will be run %d time(s).\n", fsConfig.numTimes)
|
||||||
|
for i := 0; i < fsConfig.numTimes; i++ {
|
||||||
|
fmt.Println("Starting benchmark:", i)
|
||||||
|
benchStruct := benchmark.createBench(args[0])
|
||||||
|
runTestWithoutDB(testWithoutDB{
|
||||||
|
init: benchStruct.init,
|
||||||
|
tick: benchStruct.tick,
|
||||||
|
done: benchStruct.done,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bench *fsBench) init(wg *sync.WaitGroup) {
|
||||||
|
fmt.Println("Running benchmark:", bench.name)
|
||||||
|
fmt.Println("Description:", bench.description)
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go bench.execute(wg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bench *fsBench) execute(wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
latencyHist := bench.reg.Register(bench.name)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// run the op which we're benchmarking.
|
||||||
|
bench.numOps++
|
||||||
|
|
||||||
|
// The running function will determine exactly what to latency
|
||||||
|
// it wants to measure.
|
||||||
|
continueBench := bench.run(latencyHist)
|
||||||
|
if !continueBench || (fsConfig.maxOps > 0 && bench.numOps >= fsConfig.maxOps) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bench *fsBench) tick(elapsed time.Duration, i int) {
|
||||||
|
if i%20 == 0 {
|
||||||
|
fmt.Println("____optype__elapsed__ops/sec(inst)___ops/sec(cum)__p50(ms)__p95(ms)__p99(ms)__pMax(ms)")
|
||||||
|
}
|
||||||
|
bench.reg.Tick(func(tick histogramTick) {
|
||||||
|
h := tick.Hist
|
||||||
|
|
||||||
|
fmt.Printf("%10s %8s %14.1f %14.1f %5.6f %5.6f %5.6f %5.6f\n",
|
||||||
|
tick.Name[:10],
|
||||||
|
time.Duration(elapsed.Seconds()+0.5)*time.Second,
|
||||||
|
float64(h.TotalCount())/tick.Elapsed.Seconds(),
|
||||||
|
float64(tick.Cumulative.TotalCount())/elapsed.Seconds(),
|
||||||
|
time.Duration(h.ValueAtQuantile(50)).Seconds()*1000,
|
||||||
|
time.Duration(h.ValueAtQuantile(95)).Seconds()*1000,
|
||||||
|
time.Duration(h.ValueAtQuantile(99)).Seconds()*1000,
|
||||||
|
time.Duration(h.ValueAtQuantile(100)).Seconds()*1000,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bench *fsBench) done(wg *sync.WaitGroup, elapsed time.Duration) {
|
||||||
|
// Do the cleanup.
|
||||||
|
bench.stop()
|
||||||
|
wg.Wait()
|
||||||
|
defer bench.clean()
|
||||||
|
|
||||||
|
fmt.Println("\n____optype__elapsed_____ops(total)___ops/sec(cum)__avg(ms)__p50(ms)__p95(ms)__p99(ms)__pMax(ms)")
|
||||||
|
|
||||||
|
resultTick := histogramTick{}
|
||||||
|
bench.reg.Tick(func(tick histogramTick) {
|
||||||
|
h := tick.Cumulative
|
||||||
|
if resultTick.Cumulative == nil {
|
||||||
|
resultTick.Now = tick.Now
|
||||||
|
resultTick.Cumulative = h
|
||||||
|
} else {
|
||||||
|
resultTick.Cumulative.Merge(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%10s %7.1fs %14d %14.1f %5.6f %5.6f %5.6f %5.6f %5.6f\n",
|
||||||
|
tick.Name[:10], elapsed.Seconds(), h.TotalCount(),
|
||||||
|
float64(h.TotalCount())/elapsed.Seconds(),
|
||||||
|
time.Duration(h.Mean()).Seconds()*1000,
|
||||||
|
time.Duration(h.ValueAtQuantile(50)).Seconds()*1000,
|
||||||
|
time.Duration(h.ValueAtQuantile(95)).Seconds()*1000,
|
||||||
|
time.Duration(h.ValueAtQuantile(99)).Seconds()*1000,
|
||||||
|
time.Duration(h.ValueAtQuantile(100)).Seconds()*1000,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
resultHist := resultTick.Cumulative
|
||||||
|
|
||||||
|
fmt.Printf("Benchmarkfsbench/%s %d %0.1f ops/sec\n\n",
|
||||||
|
bench.name,
|
||||||
|
resultHist.TotalCount(),
|
||||||
|
float64(resultHist.TotalCount())/elapsed.Seconds(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func verbosef(fmtstr string, args ...interface{}) {
|
||||||
|
if verbose {
|
||||||
|
fmt.Printf(fmtstr, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeAll(dir string) {
|
||||||
|
verbosef("Removing %q.\n", dir)
|
||||||
|
if err := os.RemoveAll(dir); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
39
pebble/cmd/pebble/fsbenchlist.go
Normal file
39
pebble/cmd/pebble/fsbenchlist.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var listFsBench = &cobra.Command{
|
||||||
|
Use: "list [<name>] [<name>] ...",
|
||||||
|
Short: "List the available file system benchmarks.",
|
||||||
|
Long: `
|
||||||
|
List the available file system benchmarks. If no <name> is supplied
|
||||||
|
as an argument, then all the available benchmark names are printed.
|
||||||
|
If one or more <name>s are supplied as arguments, then the benchmark
|
||||||
|
descriptions are printed out for those names.
|
||||||
|
`,
|
||||||
|
RunE: runListFsBench,
|
||||||
|
}
|
||||||
|
|
||||||
|
func runListFsBench(_ *cobra.Command, args []string) error {
|
||||||
|
if len(args) == 0 {
|
||||||
|
fmt.Println("Available benchmarks:")
|
||||||
|
for name := range benchmarks {
|
||||||
|
fmt.Println(name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, v := range args {
|
||||||
|
benchStruct, ok := benchmarks[v]
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("trying to print out the description for unknown benchmark: %s", v)
|
||||||
|
}
|
||||||
|
fmt.Println("Name:", benchStruct.name)
|
||||||
|
fmt.Println("Description:", benchStruct.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user