Merge branch 'v1.4.19-p1' into release-cdn

This commit is contained in:
Cassandra Heart 2024-06-15 11:16:16 -05:00
commit 26db825be8
No known key found for this signature in database
GPG Key ID: 6352152859385958
122 changed files with 454343 additions and 9126 deletions

View File

@ -1,18 +1,28 @@
# Local development
# Contributing
The following software is required for local development (assuming MacOS ARM):
## Testing
- Go 1.20
- Rust toolchain
- GMP 6.3: `brew install gmp`
- Install the Go plugin for uniffi-rs: `cargo install uniffi-bindgen-go --git https://github.com/NordSecurity/uniffi-bindgen-go --tag v0.2.1+v0.25.0`
Testing the [`vdf`](./vdf) and [`node`](./node) packages requires linking the
[native VDF](./crates/vdf). The `test.sh` scripts in the respective directories
help with this.
# Building release binaries
## Pull Requests
The following is software is required to build release binaries (assuming MacOS
ARM) :
Contributions are welcome a new network is rife with opportunities. We are
in the process of updating our JIRA board so that it can be made public. The
repository has basic coding guidelines:
- [Local development](#local-development) dependencies
- 80 character line limit, with the exception where gofmt or the syntax is
impossible to achieve otherwise
- Error wrapping matching function names
- Interface composition and dependency injection with Wire
## Building release binaries
The following software is required to build release binaries (assuming MacOS
ARM):
- [Running from source](README.md#running-from-source) dependencies
- Docker
- [Taskfile](https://taskfile.dev/)
@ -22,13 +32,8 @@ that statically link the [native VDF](./crates/vdf) for the supported platforms:
```shell
task build_node_arm64_macos
task build_node_arm64_linux
task build_node_arm64_macos
task build_node_amd64_linux
```
The output binaries will be in `node/build`.
# Testing
Testing the [`vdf`](./vdf) and [`node`](./node) packages requires linking the
[native VDF](./crates/vdf). The `test.sh` scripts in the respective directories
help with this.

17
Cargo.lock generated
View File

@ -164,6 +164,15 @@ dependencies = [
"byte-tools",
]
[[package]]
name = "bls48581"
version = "0.1.0"
dependencies = [
"hex 0.4.3",
"serde_json",
"uniffi",
]
[[package]]
name = "bumpalo"
version = "3.16.0"
@ -471,6 +480,12 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "is-terminal"
version = "0.4.12"
@ -1023,7 +1038,7 @@ dependencies = [
"bit-vec",
"classgroup",
"criterion",
"hex",
"hex 0.3.2",
"num-traits",
"sha2",
"uniffi",

View File

@ -15,6 +15,7 @@
members = [
"crates/vdf",
"crates/classgroup",
"crates/bls48581",
]
[profile.release]

View File

@ -27,6 +27,10 @@ COPY . .
WORKDIR /opt/ceremonyclient/vdf
RUN ./generate.sh
## Generate Rust bindings for BLS48581
WORKDIR /opt/ceremonyclient/bls48581
RUN ./generate.sh
# Build and install the node
WORKDIR /opt/ceremonyclient/node

View File

@ -1,277 +0,0 @@
# Quilibrium - Dawn
Quilibrium is a decentralized alternative to platform as a service providers.
This release, mirrored to GitHub, is the Dawn release, which contains the
initial application, the MPC Powers-of-Tau Ceremony. Documentation for the
underlying technology can be found at https://www.quilibrium.com/
## Install Requirements
wget https://:go.dev/dl/go1.20.14.linux-amd64.tar.gz
sudo tar -xvf go1.20.14.linux-amd64.tar.gz
sudo mv go /usr/local
sudo rm go1.20.14.linux-amd64.tar.gz
sudo nano ~/.bashrc
At the end of the file, add these lines and save the file.
GOROOT=/usr/local/go
GOPATH=$HOME/go
PATH=$GOPATH/bin:$GOROOT/bin:$PATH
On command line, run
~/.bashrc
Check GO Version
go version
It must show "go version go.1.20.14 linux/amd64"
## Configure Linux Network Device Settings
To optimize throughput and latency for large parallel job typcal of network like Q
nano /etc/sysctl.conf
Copy and paste the 3 lines below into the file. The values below are six hundred million.
#Increase buffer sizes for better network performance
net.core.rmem_max=600000000
net.core.wmem_max=600000000
Save and exit then
sudo sysctl -p
## Clone the Repo
git clone https://github.com/QuilibriumNetwork/ceremonyclient.git
cd ceremonyclient/node
## Quick Start
All commands are to be run in the `node/` folder.
If you have a voucher from the offline ceremony, first run:
GOEXPERIMENT=arenas go run ./... -import-priv-key `cat /path/to/voucher.hex`
If you do not, or have already run the above, run:
GOEXPERIMENT=arenas go run ./...
## Peer ID
In order to find the peer id of a running node, execute the following command from the `node/` folder:
GOEXPERIMENT=arenas go run ./... -peer-id
The peer id will be printed to stdout.
## EXPERIMENTAL gRPC/REST Support
If you want to enable gRPC/REST, add the following entries to your config.yml:
sudo nano .config/config.yml
edit these lines below
listenGrpcMultiaddr: /ip4/127.0.0.1/tcp/8337
listenRESTMultiaddr: /ip4/127.0.0.1/tcp/8338
Save and exit
Ensure that port 8337 among other neeeded ports are enabled via firewall.
sudo ufw enable
sudo ufw allow 8336
sudo ufw allow 8337
sudo ufw allow 8338
sudo ufw status
Please note: this interface, while read-only, is unauthenticated and not rate-
limited. It is recommended that you only enable if you are properly controlling
access via firewall or only query via localhost.
## Token Balance
In order to query the token balance of a running node, execute the following command from the `node/` folder:
GOEXPERIMENT=arenas go run ./... -balance
Or
GOEXPERIMENT=arenas /root/go/bin/node -balance
The confirmed token balance will be printed to stdout in QUILs.
Note that this feature requires that [gRPC support](#experimental--grpcrest-support) is enabled.
## Build the node binary file
GOEXPERIMENT=arenas go install ./...
Thiw will build binary file in /root/go/bin folder
## Start the Quilibrium Node as a Service
nano /lib/systemd/system/ceremonyclient.service
Write the code below
[Unit]
Description=Ceremony Client Go App Service
[Service]
Type=simple
Restart=always
RestartSec=5s
WorkingDirectory=/root/ceremonyclient/node
Environment=GOEXPERIMENT=arenas
ExecStart=/root/go/bin/node ./...
[Install]
WantedBy=multi-user.target
Save and exit
To start service run
service ceremonyclient start
To stop service run
service ceremonyclient stop
To view service logs run
sudo journalctl -u ceremonyclient.service -f --no-hostname -o cat
## Upgrading Node
service ceremonyclient stop
git fetch origin
git merge origin
Go to ceremonyclient/node folder and run
GOEXPERIMENT=arenas go clean -v -n -a ./...
rm /root/go/bin/node
GOEXPERIMENT=arenas go install ./...
service ceremonyclient start
If everything is okay you would see logs when you run
sudo journalctl -u ceremonyclient.service -f --no-hostname -o cat
Ensure that your service running correctly.
## Auto Upgrading Script
Create a file named update.sh in your server and put the code below.
#!/bin/bash
# Stop the ceremonyclient service
service ceremonyclient stop
# Switch to the ~/ceremonyclient directory
cd ~/ceremonyclient
# Fetch updates from the remote repository
git fetch origin
git merge origin
# Switch to the ~/ceremonyclient/node directory
cd ~/ceremonyclient/node
# Clean and reinstall node
GOEXPERIMENT=arenas go clean -v -n -a ./...
rm /root/go/bin/node
GOEXPERIMENT=arenas go install ./...
# Start the ceremonyclient service
service ceremonyclient start
chmod u+x update.sh
When there is new update, run
./update.sh
## Stats Collection
In order to opt-in to stats collection about the health of the network, edit your `config.yml` in the `node/.config` directory to have a new section under `engine`:
```yml
<earlier parts of config>
engine:
statsMultiaddr: "/dns/stats.quilibrium.com/tcp/443"
<rest of config continues below>
```
## Purpose
The ceremony application provides a secure reference string (SRS) from which
KZG proofs can be constructed for the network. This yields applicability for a
number of proof systems, in particular for the release after Dawn, the ability
to provide proofs of execution, and proofs of data availability for the network.
### Rewards
For participating in a round of the ceremony, nodes will be allocated:
reward = 161 * log_2(participant_count) QUIL
### Basic Flow
Rounds of the ceremony follow the following order:
- OPEN: Nodes can join in for the round, deferring preference to nodes that
could not join in on the prior round
- IN PROGRESS: The MPC ceremony round is in progress, nodes are engaging in a
logarithmic collection of Multiplication-to-Add Oblivious Transfer circuits,
each sub round producing a new collection of values, until the sub rounds have
completed, producing a collection of public G1 and G2 BLS48-581 points for each
peer.
- FINALIZING: The collection of points are broadcasted, and added together,
producing a singular ceremony transcript contribution.
- VALIDATING: The updated ceremony transcript is validated against the
predecessor, and is confirmed to be the new state, issuing rewards to the
participant set. The next round can begin.
## Pull Requests
Contributions are welcome a new network is rife with opportunities. We are
in the process of updating our JIRA board so that it can be made public. The
repository has basic coding guidelines:
- 80 character line limit, with the exception where gofmt or the syntax is
impossible to achieve otherwise
- Error wrapping matching function names
- Interface composition and dependency injection with Wire
## Minimum System Requirements
For the Dawn phase, a server must have a minimum of 16GB of RAM, preferably
32 GB, 250GB of storage, preferably via SSD, and 50MBps symmetric bandwidth.
For Intel/AMD, the baseline processor is a Skylake processor @ 3.4GHz with 12
dedicated cores. For ARM, the M1 line of Apple is a good reference.
With Dusk, these minimum requirements will reduce significantly.
## License + Interpretation
Significant portions of Quilibrium's codebase depends on GPL-licensed code,
mandating a minimum license of GPL, however Quilibrium is licensed as AGPL to
accomodate the scenario in which a cloud provider may wish to coopt the network
software. The AGPL allows such providers to do so, provided they are willing
to contribute back the management code that interacts with the protocol and node
software. To provide clarity, our interpretation is with respect to node
provisioning and management tooling for deploying alternative networks, and not
applications which are deployed to the network, mainnet status monitors, or
container deployments of mainnet nodes from the public codebase.

View File

@ -64,17 +64,10 @@ This section contains community-built clients, applications, guides, etc <br /><
- A detailed beginners' guide for how to setup a Quilibrium Node, created by [@demipoet](https://www.github.com/demipoet) - [link](https://quilibrium.guide/)<br/>
## Pull Requests
## Development
Contributions are welcome a new network is rife with opportunities. We are
in the process of updating our JIRA board so that it can be made public. The
repository has basic coding guidelines:
- 80 character line limit, with the exception where gofmt or the syntax is
impossible to achieve otherwise
- Error wrapping matching function names
- Interface composition and dependency injection with Wire
Please see the [CONTRIBUTING.md](CONTRIBUTING.md) file for more information on
how to contribute to this repository.
## License + Interpretation

View File

@ -35,6 +35,7 @@ tasks:
desc: Build the Quilibrium node binary for MacOS ARM. Assumes it's ran from the same platform. Outputs to node/build.
cmds:
- vdf/generate.sh
- bls48581/generate.sh
- node/build.sh -o build/arm64_macos/node
build_node_arm64_linux:

9
bls48581/README.md Normal file
View File

@ -0,0 +1,9 @@
# BLS48581
Wrapper for the Rust implementation of the BLS48581 in [crates/bls48581](../crates/bls48581).
## Generate Go bindings
```sh
go generate
```

23
bls48581/bls48581.go Normal file
View File

@ -0,0 +1,23 @@
package bls48581
import (
generated "source.quilibrium.com/quilibrium/monorepo/bls48581/generated/bls48581"
)
//go:generate ./generate.sh
func Init() {
generated.Init()
}
func CommitRaw(data []byte, polySize uint64) []byte {
return generated.CommitRaw(data, polySize)
}
func ProveRaw(data []byte, index uint64, polySize uint64) []byte {
return generated.ProveRaw(data, index, polySize)
}
func VerifyRaw(data []byte, commit []byte, index uint64, proof []byte, polySize uint64) bool {
return generated.VerifyRaw(data, commit, index, proof, polySize)
}

24
bls48581/bls48581_test.go Normal file
View File

@ -0,0 +1,24 @@
package bls48581_test
import (
"bytes"
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
"source.quilibrium.com/quilibrium/monorepo/bls48581"
)
func TestProveVerify(t *testing.T) {
data, _ := hex.DecodeString("408f9f0a63a1c463579a1fdaf82b37e0f397476e87c524915870ce7f5ede9c248493ea4ffefae154b8a55f10add4d75846b273a7f57433b438ae72880a29ab7cab6c3187a14651bac085329778526ebb31d14c9beb7b0983ff5e71a47c96ed9e7149e9e896cd4d604191583a282bdb5a92ea71334f296fd06498323b0c5d0e60c04180a7141813f6f9a6c766c450898ffc437ebed07a2fbd9201207171a0a8f5006a83d9e2430687952dd42237b7d77de61c0655b91bb1943ed4b9337449ded69ef8f2f83fba58827be7b7082db048b799f1bb590f61c558976910e77357562eb4d66fc97636c26ea562fe18b4cc397e679acad23cfd003ae93efe2903534ce1fe475eba3c82fef71554b4d63b593f2da3fea3b1b3f91379c6ff1989c91eaab70e336d96f3c46de987ef7165d111f692fe8205f7df0eb854fc550aa0d10942049dec4c60d99a51b0a7cde49a6d5e9364d0162cb86af1a51efeffacf7935f796f18cb868756e693aa967339efb8e45071da835ff8b6897fe56dc14edb49352edc88d3a6866873ecfa2bf968907e86c0dd139ab9a23bae341ec6aa5f1fbac2390a9d7f5ef9346d5c433268bf85e34e98295233f5e0d2ceb35c47b33b93e8ae9445c3b9f6ec32d8e3a1a1bc95b013dd36a84d803e468e873420c71b6473e44300f4d2702ccb452146c675d5ac1511a0b0a61a857b58ed3365ecdc1cafafbdfe5f0f2420389ae5f54d2fb9d12de314b416fdb12786fb66d0517229347ecc347eb8207a88abeffbdb9acfc582047a9343efae6c21cf67566e2d949920bdff1f4cea376332dd503c9dcd72a776744724c29a25038ef582f1103b406321e14d0f232c709b3d5a3568c75a1bc244b65e18d9ca7c53e2e13bb5638c325f6d43601de131aa2e3b7ffcc23accf6c69e9c6360cf8f4d48de3f11354855ec281f8a9c85caec0b8284c99c66a43ed0c37d6ce0f5c349e4551da6a1d9edcfa02f6be27ed037c5ec79c0519ba60725f89b3fe7826ca1a7b157ef9360bc2007bc2b9dd2ba8fdc225047a9f66b832e2da1dc6019f480e3aadb46ba93cccbd1e7b221a5d36e0fc96cbf497bfb40ff0276f14b7d45c4738a1b755e2754c5c352ac4af96c1a9be1d92942200b325cc3c53e9b3099c99a466bdc6c001179f6c63f828936b1c33f651a150c080b2eac8ed7cb9cfe599daee477f9ba88a6d1cbdeb08995c3c7bcce18ee2946c2beb138b8c797f61c6c33800ffeda74b77dab186cc4c7e91e9aca954d4863de6b04a82ef563a6eefbedec8fdc9284fb33e15197d2512e4928019fc29aa9c0a199797ef02c8daeb8706dd21a0e6b25b0e73795bac18dfaac2abc1defddf530f6a14046c2a918fa581b7ab0240bbd4f2e570a527581cb0a39bb544ceeabeedf891bc2417ac1e1fa558c09a9ceffef108a5778ff99a8575b4fb69cbbfb2c474d58")
commit := bls48581.CommitRaw(data, 1024)
targetCommit, _ := hex.DecodeString("020d105dafd65908befb8e1f43ea523b4e3b3f5b03c60439f2d3475d96a212d0ba013d7cf6245aa05490c5bbcbe37a2e9bf38c20f8d8545bc34efefdd102625c695cadc6caa01dc253db")
assert.True(t, bytes.Equal(commit, targetCommit))
proof := bls48581.ProveRaw(data, 0, 1024)
targetProof, _ := hex.DecodeString("0209230d7f5db98b70eddbd486502edad3298dfac35d9838818c7c94540932706fd2107c7105596d74ca06883bc1e2c80c2b3e1aa9407c098c98462cb6ef4a7419e7fc174b10ad2cb29f")
assert.True(t, bytes.Equal(proof, targetProof))
isOk := bls48581.VerifyRaw(data[:64], commit[:], 0, proof[:], 1024)
if !isOk {
t.Fatalf("Verification failed")
}
}

14
bls48581/generate.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euxo pipefail
ROOT_DIR="${ROOT_DIR:-$( cd "$(dirname "$(realpath "$( dirname "${BASH_SOURCE[0]}" )")")" >/dev/null 2>&1 && pwd )}"
RUST_BLS48581_PACKAGE="$ROOT_DIR/crates/bls48581"
BINDINGS_DIR="$ROOT_DIR/bls48581"
# Build the Rust BLS48581 package in release mode
cargo build -p bls48581 --release
# Generate Go bindings
pushd "$RUST_BLS48581_PACKAGE" > /dev/null
uniffi-bindgen-go src/lib.udl -o "$BINDINGS_DIR"/generated

View File

@ -0,0 +1,8 @@
#include <bls48581.h>
// This file exists beacause of
// https://github.com/golang/go/issues/11263
void cgo_rust_task_callback_bridge_bls48581(RustTaskCallback cb, const void * taskData, int8_t status) {
cb(taskData, status);
}

View File

@ -0,0 +1,578 @@
package bls48581
// #include <bls48581.h>
import "C"
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"unsafe"
)
type RustBuffer = C.RustBuffer
type RustBufferI interface {
AsReader() *bytes.Reader
Free()
ToGoBytes() []byte
Data() unsafe.Pointer
Len() int
Capacity() int
}
func RustBufferFromExternal(b RustBufferI) RustBuffer {
return RustBuffer{
capacity: C.int(b.Capacity()),
len: C.int(b.Len()),
data: (*C.uchar)(b.Data()),
}
}
func (cb RustBuffer) Capacity() int {
return int(cb.capacity)
}
func (cb RustBuffer) Len() int {
return int(cb.len)
}
func (cb RustBuffer) Data() unsafe.Pointer {
return unsafe.Pointer(cb.data)
}
func (cb RustBuffer) AsReader() *bytes.Reader {
b := unsafe.Slice((*byte)(cb.data), C.int(cb.len))
return bytes.NewReader(b)
}
func (cb RustBuffer) Free() {
rustCall(func(status *C.RustCallStatus) bool {
C.ffi_bls48581_rustbuffer_free(cb, status)
return false
})
}
func (cb RustBuffer) ToGoBytes() []byte {
return C.GoBytes(unsafe.Pointer(cb.data), C.int(cb.len))
}
func stringToRustBuffer(str string) RustBuffer {
return bytesToRustBuffer([]byte(str))
}
func bytesToRustBuffer(b []byte) RustBuffer {
if len(b) == 0 {
return RustBuffer{}
}
// We can pass the pointer along here, as it is pinned
// for the duration of this call
foreign := C.ForeignBytes{
len: C.int(len(b)),
data: (*C.uchar)(unsafe.Pointer(&b[0])),
}
return rustCall(func(status *C.RustCallStatus) RustBuffer {
return C.ffi_bls48581_rustbuffer_from_bytes(foreign, status)
})
}
type BufLifter[GoType any] interface {
Lift(value RustBufferI) GoType
}
type BufLowerer[GoType any] interface {
Lower(value GoType) RustBuffer
}
type FfiConverter[GoType any, FfiType any] interface {
Lift(value FfiType) GoType
Lower(value GoType) FfiType
}
type BufReader[GoType any] interface {
Read(reader io.Reader) GoType
}
type BufWriter[GoType any] interface {
Write(writer io.Writer, value GoType)
}
type FfiRustBufConverter[GoType any, FfiType any] interface {
FfiConverter[GoType, FfiType]
BufReader[GoType]
}
func LowerIntoRustBuffer[GoType any](bufWriter BufWriter[GoType], value GoType) RustBuffer {
// This might be not the most efficient way but it does not require knowing allocation size
// beforehand
var buffer bytes.Buffer
bufWriter.Write(&buffer, value)
bytes, err := io.ReadAll(&buffer)
if err != nil {
panic(fmt.Errorf("reading written data: %w", err))
}
return bytesToRustBuffer(bytes)
}
func LiftFromRustBuffer[GoType any](bufReader BufReader[GoType], rbuf RustBufferI) GoType {
defer rbuf.Free()
reader := rbuf.AsReader()
item := bufReader.Read(reader)
if reader.Len() > 0 {
// TODO: Remove this
leftover, _ := io.ReadAll(reader)
panic(fmt.Errorf("Junk remaining in buffer after lifting: %s", string(leftover)))
}
return item
}
func rustCallWithError[U any](converter BufLifter[error], callback func(*C.RustCallStatus) U) (U, error) {
var status C.RustCallStatus
returnValue := callback(&status)
err := checkCallStatus(converter, status)
return returnValue, err
}
func checkCallStatus(converter BufLifter[error], status C.RustCallStatus) error {
switch status.code {
case 0:
return nil
case 1:
return converter.Lift(status.errorBuf)
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
default:
return fmt.Errorf("unknown status code: %d", status.code)
}
}
func checkCallStatusUnknown(status C.RustCallStatus) error {
switch status.code {
case 0:
return nil
case 1:
panic(fmt.Errorf("function not returning an error returned an error"))
case 2:
// when the rust code sees a panic, it tries to construct a rustbuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if status.errorBuf.len > 0 {
panic(fmt.Errorf("%s", FfiConverterStringINSTANCE.Lift(status.errorBuf)))
} else {
panic(fmt.Errorf("Rust panicked while handling Rust panic"))
}
default:
return fmt.Errorf("unknown status code: %d", status.code)
}
}
func rustCall[U any](callback func(*C.RustCallStatus) U) U {
returnValue, err := rustCallWithError(nil, callback)
if err != nil {
panic(err)
}
return returnValue
}
func writeInt8(writer io.Writer, value int8) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeUint8(writer io.Writer, value uint8) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeInt16(writer io.Writer, value int16) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeUint16(writer io.Writer, value uint16) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeInt32(writer io.Writer, value int32) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeUint32(writer io.Writer, value uint32) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeInt64(writer io.Writer, value int64) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeUint64(writer io.Writer, value uint64) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeFloat32(writer io.Writer, value float32) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func writeFloat64(writer io.Writer, value float64) {
if err := binary.Write(writer, binary.BigEndian, value); err != nil {
panic(err)
}
}
func readInt8(reader io.Reader) int8 {
var result int8
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readUint8(reader io.Reader) uint8 {
var result uint8
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readInt16(reader io.Reader) int16 {
var result int16
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readUint16(reader io.Reader) uint16 {
var result uint16
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readInt32(reader io.Reader) int32 {
var result int32
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readUint32(reader io.Reader) uint32 {
var result uint32
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readInt64(reader io.Reader) int64 {
var result int64
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readUint64(reader io.Reader) uint64 {
var result uint64
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readFloat32(reader io.Reader) float32 {
var result float32
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func readFloat64(reader io.Reader) float64 {
var result float64
if err := binary.Read(reader, binary.BigEndian, &result); err != nil {
panic(err)
}
return result
}
func init() {
uniffiCheckChecksums()
}
func uniffiCheckChecksums() {
// Get the bindings contract version from our ComponentInterface
bindingsContractVersion := 24
// Get the scaffolding contract version by calling the into the dylib
scaffoldingContractVersion := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint32_t {
return C.ffi_bls48581_uniffi_contract_version(uniffiStatus)
})
if bindingsContractVersion != int(scaffoldingContractVersion) {
// If this happens try cleaning and rebuilding your project
panic("bls48581: UniFFI contract version mismatch")
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_commit_raw(uniffiStatus)
})
if checksum != 20099 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_commit_raw: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_init(uniffiStatus)
})
if checksum != 11227 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_init: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_prove_raw(uniffiStatus)
})
if checksum != 64858 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_prove_raw: UniFFI API checksum mismatch")
}
}
{
checksum := rustCall(func(uniffiStatus *C.RustCallStatus) C.uint16_t {
return C.uniffi_bls48581_checksum_func_verify_raw(uniffiStatus)
})
if checksum != 52165 {
// If this happens try cleaning and rebuilding your project
panic("bls48581: uniffi_bls48581_checksum_func_verify_raw: UniFFI API checksum mismatch")
}
}
}
type FfiConverterUint8 struct{}
var FfiConverterUint8INSTANCE = FfiConverterUint8{}
func (FfiConverterUint8) Lower(value uint8) C.uint8_t {
return C.uint8_t(value)
}
func (FfiConverterUint8) Write(writer io.Writer, value uint8) {
writeUint8(writer, value)
}
func (FfiConverterUint8) Lift(value C.uint8_t) uint8 {
return uint8(value)
}
func (FfiConverterUint8) Read(reader io.Reader) uint8 {
return readUint8(reader)
}
type FfiDestroyerUint8 struct{}
func (FfiDestroyerUint8) Destroy(_ uint8) {}
type FfiConverterUint64 struct{}
var FfiConverterUint64INSTANCE = FfiConverterUint64{}
func (FfiConverterUint64) Lower(value uint64) C.uint64_t {
return C.uint64_t(value)
}
func (FfiConverterUint64) Write(writer io.Writer, value uint64) {
writeUint64(writer, value)
}
func (FfiConverterUint64) Lift(value C.uint64_t) uint64 {
return uint64(value)
}
func (FfiConverterUint64) Read(reader io.Reader) uint64 {
return readUint64(reader)
}
type FfiDestroyerUint64 struct{}
func (FfiDestroyerUint64) Destroy(_ uint64) {}
type FfiConverterBool struct{}
var FfiConverterBoolINSTANCE = FfiConverterBool{}
func (FfiConverterBool) Lower(value bool) C.int8_t {
if value {
return C.int8_t(1)
}
return C.int8_t(0)
}
func (FfiConverterBool) Write(writer io.Writer, value bool) {
if value {
writeInt8(writer, 1)
} else {
writeInt8(writer, 0)
}
}
func (FfiConverterBool) Lift(value C.int8_t) bool {
return value != 0
}
func (FfiConverterBool) Read(reader io.Reader) bool {
return readInt8(reader) != 0
}
type FfiDestroyerBool struct{}
func (FfiDestroyerBool) Destroy(_ bool) {}
type FfiConverterString struct{}
var FfiConverterStringINSTANCE = FfiConverterString{}
func (FfiConverterString) Lift(rb RustBufferI) string {
defer rb.Free()
reader := rb.AsReader()
b, err := io.ReadAll(reader)
if err != nil {
panic(fmt.Errorf("reading reader: %w", err))
}
return string(b)
}
func (FfiConverterString) Read(reader io.Reader) string {
length := readInt32(reader)
buffer := make([]byte, length)
read_length, err := reader.Read(buffer)
if err != nil {
panic(err)
}
if read_length != int(length) {
panic(fmt.Errorf("bad read length when reading string, expected %d, read %d", length, read_length))
}
return string(buffer)
}
func (FfiConverterString) Lower(value string) RustBuffer {
return stringToRustBuffer(value)
}
func (FfiConverterString) Write(writer io.Writer, value string) {
if len(value) > math.MaxInt32 {
panic("String is too large to fit into Int32")
}
writeInt32(writer, int32(len(value)))
write_length, err := io.WriteString(writer, value)
if err != nil {
panic(err)
}
if write_length != len(value) {
panic(fmt.Errorf("bad write length when writing string, expected %d, written %d", len(value), write_length))
}
}
type FfiDestroyerString struct{}
func (FfiDestroyerString) Destroy(_ string) {}
type FfiConverterSequenceUint8 struct{}
var FfiConverterSequenceUint8INSTANCE = FfiConverterSequenceUint8{}
func (c FfiConverterSequenceUint8) Lift(rb RustBufferI) []uint8 {
return LiftFromRustBuffer[[]uint8](c, rb)
}
func (c FfiConverterSequenceUint8) Read(reader io.Reader) []uint8 {
length := readInt32(reader)
if length == 0 {
return nil
}
result := make([]uint8, 0, length)
for i := int32(0); i < length; i++ {
result = append(result, FfiConverterUint8INSTANCE.Read(reader))
}
return result
}
func (c FfiConverterSequenceUint8) Lower(value []uint8) RustBuffer {
return LowerIntoRustBuffer[[]uint8](c, value)
}
func (c FfiConverterSequenceUint8) Write(writer io.Writer, value []uint8) {
if len(value) > math.MaxInt32 {
panic("[]uint8 is too large to fit into Int32")
}
writeInt32(writer, int32(len(value)))
for _, item := range value {
FfiConverterUint8INSTANCE.Write(writer, item)
}
}
type FfiDestroyerSequenceUint8 struct{}
func (FfiDestroyerSequenceUint8) Destroy(sequence []uint8) {
for _, value := range sequence {
FfiDestroyerUint8{}.Destroy(value)
}
}
func CommitRaw(data []uint8, polySize uint64) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_commit_raw(FfiConverterSequenceUint8INSTANCE.Lower(data), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus)
}))
}
func Init() {
rustCall(func(_uniffiStatus *C.RustCallStatus) bool {
C.uniffi_bls48581_fn_func_init(_uniffiStatus)
return false
})
}
func ProveRaw(data []uint8, index uint64, polySize uint64) []uint8 {
return FfiConverterSequenceUint8INSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) RustBufferI {
return C.uniffi_bls48581_fn_func_prove_raw(FfiConverterSequenceUint8INSTANCE.Lower(data), FfiConverterUint64INSTANCE.Lower(index), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus)
}))
}
func VerifyRaw(data []uint8, commit []uint8, index uint64, proof []uint8, polySize uint64) bool {
return FfiConverterBoolINSTANCE.Lift(rustCall(func(_uniffiStatus *C.RustCallStatus) C.int8_t {
return C.uniffi_bls48581_fn_func_verify_raw(FfiConverterSequenceUint8INSTANCE.Lower(data), FfiConverterSequenceUint8INSTANCE.Lower(commit), FfiConverterUint64INSTANCE.Lower(index), FfiConverterSequenceUint8INSTANCE.Lower(proof), FfiConverterUint64INSTANCE.Lower(polySize), _uniffiStatus)
}))
}

View File

@ -0,0 +1,417 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
#include <stdbool.h>
#include <stdint.h>
// The following structs are used to implement the lowest level
// of the FFI, and thus useful to multiple uniffied crates.
// We ensure they are declared exactly once, with a header guard, UNIFFI_SHARED_H.
#ifdef UNIFFI_SHARED_H
// We also try to prevent mixing versions of shared uniffi header structs.
// If you add anything to the #else block, you must increment the version suffix in UNIFFI_SHARED_HEADER_V6
#ifndef UNIFFI_SHARED_HEADER_V6
#error Combining helper code from multiple versions of uniffi is not supported
#endif // ndef UNIFFI_SHARED_HEADER_V6
#else
#define UNIFFI_SHARED_H
#define UNIFFI_SHARED_HEADER_V6
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V6 in this file. ⚠️
typedef struct RustBuffer {
int32_t capacity;
int32_t len;
uint8_t *data;
} RustBuffer;
typedef int32_t (*ForeignCallback)(uint64_t, int32_t, uint8_t *, int32_t, RustBuffer *);
// Task defined in Rust that Go executes
typedef void (*RustTaskCallback)(const void *, int8_t);
// Callback to execute Rust tasks using a Go routine
//
// Args:
// executor: ForeignExecutor lowered into a uint64_t value
// delay: Delay in MS
// task: RustTaskCallback to call
// task_data: data to pass the task callback
typedef int8_t (*ForeignExecutorCallback)(uint64_t, uint32_t, RustTaskCallback, void *);
typedef struct ForeignBytes {
int32_t len;
const uint8_t *data;
} ForeignBytes;
// Error definitions
typedef struct RustCallStatus {
int8_t code;
RustBuffer errorBuf;
} RustCallStatus;
// Continuation callback for UniFFI Futures
typedef void (*RustFutureContinuation)(void * , int8_t);
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V6 in this file. ⚠️
#endif // def UNIFFI_SHARED_H
// Needed because we can't execute the callback directly from go.
void cgo_rust_task_callback_bridge_bls48581(RustTaskCallback, const void *, int8_t);
int8_t uniffiForeignExecutorCallbackbls48581(uint64_t, uint32_t, RustTaskCallback, void*);
void uniffiFutureContinuationCallbackbls48581(void*, int8_t);
RustBuffer uniffi_bls48581_fn_func_commit_raw(
RustBuffer data,
uint64_t poly_size,
RustCallStatus* out_status
);
void uniffi_bls48581_fn_func_init(
RustCallStatus* out_status
);
RustBuffer uniffi_bls48581_fn_func_prove_raw(
RustBuffer data,
uint64_t index,
uint64_t poly_size,
RustCallStatus* out_status
);
int8_t uniffi_bls48581_fn_func_verify_raw(
RustBuffer data,
RustBuffer commit,
uint64_t index,
RustBuffer proof,
uint64_t poly_size,
RustCallStatus* out_status
);
RustBuffer ffi_bls48581_rustbuffer_alloc(
int32_t size,
RustCallStatus* out_status
);
RustBuffer ffi_bls48581_rustbuffer_from_bytes(
ForeignBytes bytes,
RustCallStatus* out_status
);
void ffi_bls48581_rustbuffer_free(
RustBuffer buf,
RustCallStatus* out_status
);
RustBuffer ffi_bls48581_rustbuffer_reserve(
RustBuffer buf,
int32_t additional,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_continuation_callback_set(
RustFutureContinuation callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_u8(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_u8(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_u8(
void* handle,
RustCallStatus* out_status
);
uint8_t ffi_bls48581_rust_future_complete_u8(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_i8(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_i8(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_i8(
void* handle,
RustCallStatus* out_status
);
int8_t ffi_bls48581_rust_future_complete_i8(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_u16(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_u16(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_u16(
void* handle,
RustCallStatus* out_status
);
uint16_t ffi_bls48581_rust_future_complete_u16(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_i16(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_i16(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_i16(
void* handle,
RustCallStatus* out_status
);
int16_t ffi_bls48581_rust_future_complete_i16(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_u32(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_u32(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_u32(
void* handle,
RustCallStatus* out_status
);
uint32_t ffi_bls48581_rust_future_complete_u32(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_i32(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_i32(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_i32(
void* handle,
RustCallStatus* out_status
);
int32_t ffi_bls48581_rust_future_complete_i32(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_u64(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_u64(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_u64(
void* handle,
RustCallStatus* out_status
);
uint64_t ffi_bls48581_rust_future_complete_u64(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_i64(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_i64(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_i64(
void* handle,
RustCallStatus* out_status
);
int64_t ffi_bls48581_rust_future_complete_i64(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_f32(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_f32(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_f32(
void* handle,
RustCallStatus* out_status
);
float ffi_bls48581_rust_future_complete_f32(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_f64(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_f64(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_f64(
void* handle,
RustCallStatus* out_status
);
double ffi_bls48581_rust_future_complete_f64(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_pointer(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_pointer(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_pointer(
void* handle,
RustCallStatus* out_status
);
void* ffi_bls48581_rust_future_complete_pointer(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_rust_buffer(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_rust_buffer(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_rust_buffer(
void* handle,
RustCallStatus* out_status
);
RustBuffer ffi_bls48581_rust_future_complete_rust_buffer(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_poll_void(
void* handle,
void* uniffi_callback,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_cancel_void(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_free_void(
void* handle,
RustCallStatus* out_status
);
void ffi_bls48581_rust_future_complete_void(
void* handle,
RustCallStatus* out_status
);
uint16_t uniffi_bls48581_checksum_func_commit_raw(
RustCallStatus* out_status
);
uint16_t uniffi_bls48581_checksum_func_init(
RustCallStatus* out_status
);
uint16_t uniffi_bls48581_checksum_func_prove_raw(
RustCallStatus* out_status
);
uint16_t uniffi_bls48581_checksum_func_verify_raw(
RustCallStatus* out_status
);
uint32_t ffi_bls48581_uniffi_contract_version(
RustCallStatus* out_status
);

17
bls48581/go.mod Normal file
View File

@ -0,0 +1,17 @@
module source.quilibrium.com/quilibrium/monorepo/bls48581
go 1.20
// A necessary hack until source.quilibrium.com is open to all
replace source.quilibrium.com/quilibrium/monorepo/nekryptology => ../nekryptology
require github.com/stretchr/testify v1.9.0
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/sys v0.21.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
source.quilibrium.com/quilibrium/monorepo/nekryptology v0.0.0-00010101000000-000000000000 // indirect
)

13
bls48581/go.sum Normal file
View File

@ -0,0 +1,13 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

17
bls48581/test.sh Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
set -euxo pipefail
# Run tests for the bls48581 package. Takes care of linking the native BLS48581 library.
# Assumes that the BLS48581 library has been built by running the generate.sh script in the same directory.
ROOT_DIR="${ROOT_DIR:-$( cd "$(dirname "$(realpath "$( dirname "${BASH_SOURCE[0]}" )")")" >/dev/null 2>&1 && pwd )}"
NODE_DIR="$ROOT_DIR/bls48581"
BINARIES_DIR="$ROOT_DIR/target/release"
# Link the native BLS48581 library and execute tests
pushd "$NODE_DIR" > /dev/null
CGO_LDFLAGS="-L$BINARIES_DIR -lbls48581 -ldl" \
CGO_ENABLED=1 \
GOEXPERIMENT=arenas \
go test "$@"

7
crates/bls48581/Cargo.lock generated Normal file
View File

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "bls48581"
version = "0.1.0"

View File

@ -0,0 +1,16 @@
[package]
name = "bls48581"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["lib", "staticlib"]
name = "bls48581"
[dependencies]
hex = "0.4.3"
serde_json = "1.0.117"
uniffi = { version= "0.25", features = ["cli"]}
[build-dependencies]
uniffi = { version = "0.25", features = [ "build" ] }

5
crates/bls48581/build.rs Normal file
View File

@ -0,0 +1,5 @@
fn main() {
println!("cargo:rerun-if-changed=build.rs");
uniffi::generate_scaffolding("src/lib.udl").expect("uniffi generation failed");
}

901
crates/bls48581/src/aes.rs Normal file
View File

@ -0,0 +1,901 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pub const ECB: usize = 0;
pub const CBC: usize = 1;
pub const CFB1: usize = 2;
pub const CFB2: usize = 3;
pub const CFB4: usize = 5;
pub const OFB1: usize = 14;
pub const OFB2: usize = 15;
pub const OFB4: usize = 17;
pub const OFB8: usize = 21;
pub const OFB16: usize = 29;
pub const CTR1: usize = 30;
pub const CTR2: usize = 31;
pub const CTR4: usize = 33;
pub const CTR8: usize = 37;
pub const CTR16: usize = 45;
const INCO: [u8; 4] = [0xB, 0xD, 0x9, 0xE]; /* Inverse Coefficients */
const PTAB: [u8; 256] = [
1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, 95, 225, 56, 72, 216, 115,
149, 164, 247, 2, 6, 10, 30, 34, 102, 170, 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217,
112, 144, 171, 230, 49, 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, 131, 158, 185, 208,
107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, 181, 196, 87, 249, 16, 48, 80, 240,
11, 29, 39, 105, 187, 214, 97, 163, 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174,
233, 32, 96, 160, 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, 195,
94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, 159, 186, 213, 100, 172,
239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, 155, 182, 193, 88, 232, 35, 101, 175,
234, 37, 111, 177, 200, 67, 197, 84, 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176,
203, 70, 202, 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, 18, 54,
90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, 57, 75, 221, 124, 132, 151,
162, 253, 28, 36, 108, 180, 199, 82, 246, 1,
];
const LTAB: [u8; 256] = [
0, 255, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3, 100, 4, 224, 14, 52, 141,
129, 239, 76, 113, 8, 200, 248, 105, 28, 193, 125, 194, 29, 181, 249, 185, 39, 106, 77, 228,
166, 114, 154, 201, 9, 120, 101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218,
142, 150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56, 102, 221, 253,
48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16, 126, 110, 72, 195, 163, 182, 30, 66,
58, 107, 40, 84, 250, 133, 61, 186, 43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243,
115, 167, 87, 175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232, 44,
215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160, 127, 12, 246, 111, 23,
196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183, 204, 187, 62, 90, 251, 96, 177, 134, 59, 82,
161, 108, 170, 85, 41, 157, 151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63,
91, 209, 83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171, 68, 17, 146,
217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165, 103, 74, 237, 222, 197, 49, 254,
24, 13, 99, 140, 128, 192, 247, 112, 7,
];
const FBSUB: [u8; 256] = [
99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118, 202, 130, 201, 125,
250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204,
52, 165, 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235,
39, 178, 117, 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0,
237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170, 251, 67, 77, 51, 133,
69, 249, 2, 127, 80, 60, 159, 168, 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16,
255, 243, 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115, 96, 129,
79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92,
194, 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234,
101, 122, 174, 8, 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138,
112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248, 152, 17, 105,
217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, 140, 161, 137, 13, 191, 230, 66, 104, 65,
153, 45, 15, 176, 84, 187, 22,
];
const RBSUB: [u8; 256] = [
82, 9, 106, 213, 48, 54, 165, 56, 191, 64, 163, 158, 129, 243, 215, 251, 124, 227, 57, 130,
155, 47, 255, 135, 52, 142, 67, 68, 196, 222, 233, 203, 84, 123, 148, 50, 166, 194, 35, 61,
238, 76, 149, 11, 66, 250, 195, 78, 8, 46, 161, 102, 40, 217, 36, 178, 118, 91, 162, 73, 109,
139, 209, 37, 114, 248, 246, 100, 134, 104, 152, 22, 212, 164, 92, 204, 93, 101, 182, 146, 108,
112, 72, 80, 253, 237, 185, 218, 94, 21, 70, 87, 167, 141, 157, 132, 144, 216, 171, 0, 140,
188, 211, 10, 247, 228, 88, 5, 184, 179, 69, 6, 208, 44, 30, 143, 202, 63, 15, 2, 193, 175,
189, 3, 1, 19, 138, 107, 58, 145, 17, 65, 79, 103, 220, 234, 151, 242, 207, 206, 240, 180, 230,
115, 150, 172, 116, 34, 231, 173, 53, 133, 226, 249, 55, 232, 28, 117, 223, 110, 71, 241, 26,
113, 29, 41, 197, 137, 111, 183, 98, 14, 170, 24, 190, 27, 252, 86, 62, 75, 198, 210, 121, 32,
154, 219, 192, 254, 120, 205, 90, 244, 31, 221, 168, 51, 136, 7, 199, 49, 177, 18, 16, 89, 39,
128, 236, 95, 96, 81, 127, 169, 25, 181, 74, 13, 45, 229, 122, 159, 147, 201, 156, 239, 160,
224, 59, 77, 174, 42, 245, 176, 200, 235, 187, 60, 131, 83, 153, 97, 23, 43, 4, 126, 186, 119,
214, 38, 225, 105, 20, 99, 85, 33, 12, 125,
];
const RCO: [u8; 16] = [
1, 2, 4, 8, 16, 32, 64, 128, 27, 54, 108, 216, 171, 77, 154, 47,
];
const FTABLE: [u32; 256] = [
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, 0xdf2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
0x50303060, 0x3010102, 0xa96767ce, 0x7d2b2b56, 0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec,
0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa, 0x15fafaef, 0xeb5959b2, 0xc947478e, 0xbf0f0fb,
0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45, 0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b,
0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c, 0x5a36366c, 0x413f3f7e, 0x2f7f7f5, 0x4fcccc83,
0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x8f1f1f9, 0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a,
0xc040408, 0x52c7c795, 0x65232346, 0x5ec3c39d, 0x28181830, 0xa1969637, 0xf05050a, 0xb59a9a2f,
0x907070e, 0x36121224, 0x9b80801b, 0x3de2e2df, 0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea,
0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34, 0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b,
0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d, 0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413,
0xf55353a6, 0x68d1d1b9, 0x0, 0x2cededc1, 0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6,
0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972, 0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85,
0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed, 0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511,
0xcf45458a, 0x10f9f9e9, 0x6020204, 0x817f7ffe, 0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b,
0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05, 0xad92923f, 0xbc9d9d21, 0x48383870, 0x4f5f5f1,
0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142, 0x30101020, 0x1affffe5, 0xef3f3fd, 0x6dd2d2bf,
0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3, 0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e,
0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a, 0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6,
0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3, 0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b,
0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428, 0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad,
0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14, 0xdb494992, 0xa06060c, 0x6c242448, 0xe45c5cb8,
0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4, 0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2,
0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda, 0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949,
0xb46c6cd8, 0xfa5656ac, 0x7f4f4f3, 0x25eaeacf, 0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810,
0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c, 0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697,
0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e, 0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f,
0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc, 0xd8484890, 0x5030306, 0x1f6f6f7, 0x120e0e1c,
0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969, 0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27,
0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122, 0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433,
0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9, 0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5,
0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a, 0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0,
0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e, 0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c,
];
const RTABLE: [u32; 256] = [
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a, 0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5, 0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5,
0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d, 0x2752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b,
0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295, 0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e,
0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927, 0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d,
0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362, 0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9,
0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52, 0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566,
0x728ebb2, 0x3c2b52f, 0x9a7bc586, 0xa50837d3, 0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed,
0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e, 0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4,
0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4, 0x39ec830b, 0xaaef6040, 0x69f715e, 0x51106ebd,
0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d, 0xb58d5491, 0x55dc471, 0x6fd40604, 0xff155060,
0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967, 0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879,
0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x0, 0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c,
0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36, 0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624,
0xb1670a0c, 0xfe75793, 0xd296eeb4, 0x9e919b1b, 0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c,
0xaba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12, 0xb0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14,
0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3, 0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b,
0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8, 0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684,
0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7, 0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177,
0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947, 0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322,
0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498, 0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f,
0xe49d3a2c, 0xd927850, 0x9bcc5f6a, 0x62467e54, 0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382,
0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf, 0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb,
0x97826cd, 0xf418596e, 0x1b79aec, 0xa89a4f83, 0x656e95e6, 0x7ee6ffaa, 0x8cfbc21, 0xe6e815ef,
0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029, 0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235,
0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733, 0x4a9804f1, 0xf7daec41, 0xe50cd7f, 0x2ff69117,
0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4, 0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546,
0x4ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb, 0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d,
0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb, 0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a,
0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773, 0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478,
0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2, 0x72c31d16, 0xc25e2bc, 0x8b493c28, 0x41950dff,
0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664, 0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0,
];
pub struct AES {
// nk: usize,
nr: usize,
mode: usize,
fkey: [u32; 60],
rkey: [u32; 60],
pub f: [u8; 16],
}
fn rotl8(x: u32) -> u32 {
((x) << 8) | ((x) >> 24)
}
fn rotl16(x: u32) -> u32 {
((x) << 16) | ((x) >> 16)
}
fn rotl24(x: u32) -> u32 {
((x) << 24) | ((x) >> 8)
}
fn pack(b: [u8; 4]) -> u32 {
/* pack bytes into a 32-bit Word */
((b[3] as u32) << 24)
| ((b[2] as u32) << 16)
| ((b[1] as u32) << 8)
| (b[0] as u32)
}
fn unpack(a: u32) -> [u8; 4] {
/* unpack bytes from a word */
[
(a & 0xff) as u8,
((a >> 8) & 0xff) as u8,
((a >> 16) & 0xff) as u8,
((a >> 24) & 0xff) as u8,
]
}
fn bmul(x: u8, y: u8) -> u8 {
/* x.y= AntiLog(Log(x) + Log(y)) */
let ix = (x as usize) & 0xff;
let iy = (y as usize) & 0xff;
let lx = (LTAB[ix] as usize) & 0xff;
let ly = (LTAB[iy] as usize) & 0xff;
if x != 0 && y != 0 {
PTAB[(lx + ly) % 255]
} else {
0
}
}
fn subbyte(a: u32) -> u32 {
let mut b = unpack(a);
b[0] = FBSUB[b[0] as usize];
b[1] = FBSUB[b[1] as usize];
b[2] = FBSUB[b[2] as usize];
b[3] = FBSUB[b[3] as usize];
pack(b)
}
fn product(x: u32, y: u32) -> u8 {
/* dot product of two 4-byte arrays */
let xb = unpack(x);
let yb = unpack(y);
bmul(xb[0], yb[0])
^ bmul(xb[1], yb[1])
^ bmul(xb[2], yb[2])
^ bmul(xb[3], yb[3])
}
fn invmixcol(x: u32) -> u32 {
/* matrix Multiplication */
let mut b: [u8; 4] = [0; 4];
let mut m = pack(INCO);
b[3] = product(m, x);
m = rotl24(m);
b[2] = product(m, x);
m = rotl24(m);
b[1] = product(m, x);
m = rotl24(m);
b[0] = product(m, x);
pack(b)
}
fn increment(f: &mut [u8; 16]) {
for i in 0..16 {
f[i] += 1;
if f[i] != 0 {
break;
}
}
}
impl AES {
pub fn new() -> AES {
AES {
// nk: 0,
nr: 0,
mode: 0,
fkey: [0; 60],
rkey: [0; 60],
f: [0; 16],
}
}
/* reset cipher */
pub fn reset(&mut self, m: usize, iv: Option<[u8; 16]>) {
/* reset mode, or reset iv */
self.mode = m;
for i in 0..16 {
self.f[i] = 0
}
if self.mode != ECB {
if let Some(x) = iv {
for i in 0..16 {
self.f[i] = x[i]
}
}
}
}
pub fn init(&mut self, m: usize, nkey: usize, key: &[u8], iv: Option<[u8; 16]>) -> bool {
/* Key Scheduler. Create expanded encryption key */
let mut cipherkey: [u32; 8] = [0; 8];
let mut b: [u8; 4] = [0; 4];
let nk = nkey / 4;
if nk != 4 && nk != 6 && nk != 8 {
return false;
}
let nr = 6 + nk;
//self.nk = nk;
self.nr = nr;
self.reset(m, iv);
let n = 4 * (nr + 1);
let mut j = 0;
for i in 0..nk {
for k in 0..4 {
b[k] = key[j + k]
}
cipherkey[i] = pack(b);
j += 4;
}
for i in 0..nk {
self.fkey[i] = cipherkey[i]
}
j = nk;
let mut k = 0;
while j < n {
self.fkey[j] =
self.fkey[j - nk] ^ subbyte(rotl24(self.fkey[j - 1])) ^ (RCO[k] as u32);
if nk<=6 {
for i in 1..nk {
if (i + j) >= n {
break;
}
self.fkey[i + j] = self.fkey[i + j - nk] ^ self.fkey[i + j - 1];
}
} else {
for i in 1..4 {
if (i + j) >= n {
break;
}
self.fkey[i + j] = self.fkey[i + j - nk] ^ self.fkey[i + j - 1];
}
if (j + 4) < n {
self.fkey[j + 4] = self.fkey[j + 4 - nk] ^ subbyte(self.fkey[j + 3]);
}
for i in 5..nk {
if (i + j) >= n {
break;
}
self.fkey[i + j] = self.fkey[i + j - nk] ^ self.fkey[i + j - 1];
}
}
j += nk;
k += 1;
}
/* now for the expanded decrypt key in reverse order */
for j in 0..4 {
self.rkey[j + n - 4] = self.fkey[j]
}
let mut i = 4;
while i < n - 4 {
let k = n - 4 - i;
for j in 0..4 {
self.rkey[k + j] = invmixcol(self.fkey[i + j])
}
i += 4;
}
for j in n - 4..n {
self.rkey[j + 4 - n] = self.fkey[j]
}
true
}
pub fn getreg(&mut self) -> [u8; 16] {
let mut ir: [u8; 16] = [0; 16];
for i in 0..16 {
ir[i] = self.f[i]
}
ir
}
/* Encrypt a single block */
pub fn ecb_encrypt(&mut self, buff: &mut [u8; 16]) {
let mut b: [u8; 4] = [0; 4];
let mut p: [u32; 4] = [0; 4];
let mut q: [u32; 4] = [0; 4];
let mut j = 0;
for i in 0..4 {
for k in 0..4 {
b[k] = buff[j + k]
}
p[i] = pack(b);
p[i] ^= self.fkey[i];
j += 4;
}
let mut k = 4;
/* State alternates between p and q */
for _ in 1..self.nr {
q[0] = self.fkey[k]
^ FTABLE[(p[0] & 0xff) as usize]
^ rotl8(FTABLE[((p[1] >> 8) & 0xff) as usize])
^ rotl16(FTABLE[((p[2] >> 16) & 0xff) as usize])
^ rotl24(FTABLE[((p[3] >> 24) & 0xff) as usize]);
q[1] = self.fkey[k + 1]
^ FTABLE[(p[1] & 0xff) as usize]
^ rotl8(FTABLE[((p[2] >> 8) & 0xff) as usize])
^ rotl16(FTABLE[((p[3] >> 16) & 0xff) as usize])
^ rotl24(FTABLE[((p[0] >> 24) & 0xff) as usize]);
q[2] = self.fkey[k + 2]
^ FTABLE[(p[2] & 0xff) as usize]
^ rotl8(FTABLE[((p[3] >> 8) & 0xff) as usize])
^ rotl16(FTABLE[((p[0] >> 16) & 0xff) as usize])
^ rotl24(FTABLE[((p[1] >> 24) & 0xff) as usize]);
q[3] = self.fkey[k + 3]
^ FTABLE[(p[3] & 0xff) as usize]
^ rotl8(FTABLE[((p[0] >> 8) & 0xff) as usize])
^ rotl16(FTABLE[((p[1] >> 16) & 0xff) as usize])
^ rotl24(FTABLE[((p[2] >> 24) & 0xff) as usize]);
k += 4;
for j in 0..4 {
let t = p[j];
p[j] = q[j];
q[j] = t;
}
}
/* Last Round */
q[0] = self.fkey[k]
^ (FBSUB[(p[0] & 0xff) as usize] as u32)
^ rotl8((FBSUB[((p[1] >> 8) & 0xff) as usize]) as u32)
^ rotl16((FBSUB[((p[2] >> 16) & 0xff) as usize]) as u32)
^ rotl24((FBSUB[((p[3] >> 24) & 0xff) as usize]) as u32);
q[1] = self.fkey[k + 1]
^ (FBSUB[(p[1] & 0xff) as usize] as u32)
^ rotl8((FBSUB[((p[2] >> 8) & 0xff) as usize]) as u32)
^ rotl16((FBSUB[((p[3] >> 16) & 0xff) as usize]) as u32)
^ rotl24((FBSUB[((p[0] >> 24) & 0xff) as usize]) as u32);
q[2] = self.fkey[k + 2]
^ (FBSUB[(p[2] & 0xff) as usize] as u32)
^ rotl8((FBSUB[((p[3] >> 8) & 0xff) as usize]) as u32)
^ rotl16((FBSUB[((p[0] >> 16) & 0xff) as usize]) as u32)
^ rotl24((FBSUB[((p[1] >> 24) & 0xff) as usize]) as u32);
q[3] = self.fkey[k + 3]
^ (FBSUB[(p[3] & 0xff) as usize] as u32)
^ rotl8((FBSUB[((p[0] >> 8) & 0xff) as usize]) as u32)
^ rotl16((FBSUB[((p[1] >> 16) & 0xff) as usize]) as u32)
^ rotl24((FBSUB[((p[2] >> 24) & 0xff) as usize]) as u32);
j = 0;
for i in 0..4 {
b = unpack(q[i]);
for k in 0..4 {
buff[j + k] = b[k]
}
j += 4;
}
}
/* Decrypt a single block */
pub fn ecb_decrypt(&mut self, buff: &mut [u8; 16]) {
let mut b: [u8; 4] = [0; 4];
let mut p: [u32; 4] = [0; 4];
let mut q: [u32; 4] = [0; 4];
let mut j = 0;
for i in 0..4 {
for k in 0..4 {
b[k] = buff[j + k]
}
p[i] = pack(b);
p[i] ^= self.rkey[i];
j += 4;
}
let mut k = 4;
/* State alternates between p and q */
for _ in 1..self.nr {
q[0] = self.rkey[k]
^ RTABLE[(p[0] & 0xff) as usize]
^ rotl8(RTABLE[((p[3] >> 8) & 0xff) as usize])
^ rotl16(RTABLE[((p[2] >> 16) & 0xff) as usize])
^ rotl24(RTABLE[((p[1] >> 24) & 0xff) as usize]);
q[1] = self.rkey[k + 1]
^ RTABLE[(p[1] & 0xff) as usize]
^ rotl8(RTABLE[((p[0] >> 8) & 0xff) as usize])
^ rotl16(RTABLE[((p[3] >> 16) & 0xff) as usize])
^ rotl24(RTABLE[((p[2] >> 24) & 0xff) as usize]);
q[2] = self.rkey[k + 2]
^ RTABLE[(p[2] & 0xff) as usize]
^ rotl8(RTABLE[((p[1] >> 8) & 0xff) as usize])
^ rotl16(RTABLE[((p[0] >> 16) & 0xff) as usize])
^ rotl24(RTABLE[((p[3] >> 24) & 0xff) as usize]);
q[3] = self.rkey[k + 3]
^ RTABLE[(p[3] & 0xff) as usize]
^ rotl8(RTABLE[((p[2] >> 8) & 0xff) as usize])
^ rotl16(RTABLE[((p[1] >> 16) & 0xff) as usize])
^ rotl24(RTABLE[((p[0] >> 24) & 0xff) as usize]);
k += 4;
for j in 0..4 {
let t = p[j];
p[j] = q[j];
q[j] = t;
}
}
/* Last Round */
q[0] = self.rkey[k]
^ (RBSUB[(p[0] & 0xff) as usize] as u32)
^ rotl8((RBSUB[((p[3] >> 8) & 0xff) as usize]) as u32)
^ rotl16((RBSUB[((p[2] >> 16) & 0xff) as usize]) as u32)
^ rotl24((RBSUB[((p[1] >> 24) & 0xff) as usize]) as u32);
q[1] = self.rkey[k + 1]
^ (RBSUB[(p[1] & 0xff) as usize] as u32)
^ rotl8((RBSUB[((p[0] >> 8) & 0xff) as usize]) as u32)
^ rotl16((RBSUB[((p[3] >> 16) & 0xff) as usize]) as u32)
^ rotl24((RBSUB[((p[2] >> 24) & 0xff) as usize]) as u32);
q[2] = self.rkey[k + 2]
^ (RBSUB[(p[2] & 0xff) as usize] as u32)
^ rotl8((RBSUB[((p[1] >> 8) & 0xff) as usize]) as u32)
^ rotl16((RBSUB[((p[0] >> 16) & 0xff) as usize]) as u32)
^ rotl24((RBSUB[((p[3] >> 24) & 0xff) as usize]) as u32);
q[3] = self.rkey[k + 3]
^ (RBSUB[((p[3]) & 0xff) as usize] as u32)
^ rotl8((RBSUB[((p[2] >> 8) & 0xff) as usize]) as u32)
^ rotl16((RBSUB[((p[1] >> 16) & 0xff) as usize]) as u32)
^ rotl24((RBSUB[((p[0] >> 24) & 0xff) as usize]) as u32);
j = 0;
for i in 0..4 {
b = unpack(q[i]);
for k in 0..4 {
buff[j + k] = b[k]
}
j += 4;
}
}
/* Encrypt using selected mode of operation */
pub fn encrypt(&mut self, buff: &mut [u8; 16]) -> u32 {
let mut st: [u8; 16] = [0; 16];
// Supported Modes of Operation
let mut fell_off: u32 = 0;
match self.mode {
ECB => {
self.ecb_encrypt(buff);
0
}
CBC => {
for j in 0..16 {
buff[j] ^= self.f[j]
}
self.ecb_encrypt(buff);
for j in 0..16 {
self.f[j] = buff[j]
}
0
}
CFB1 | CFB2 | CFB4 => {
let bytes = self.mode - CFB1 + 1;
for j in 0..bytes {
fell_off = (fell_off << 8) | (self.f[j] as u32)
}
for j in 0..16 {
st[j] = self.f[j]
}
for j in bytes..16 {
self.f[j - bytes] = self.f[j]
}
self.ecb_encrypt(&mut st);
for j in 0..bytes {
buff[j] ^= st[j];
self.f[16 - bytes + j] = buff[j];
}
fell_off
}
OFB1 | OFB2 | OFB4 | OFB8 | OFB16 => {
let bytes = self.mode - OFB1 + 1;
for j in 0..16 {
st[j] = self.f[j]
}
self.ecb_encrypt(&mut st);
for j in 0..bytes {
buff[j] ^= st[j]
}
for j in 0..16 {
self.f[j] = st[j]
}
//self.ecb_encrypt(&mut (self.f));
//for j in 0..bytes {buff[j]^=self.f[j]}
0
}
CTR1 | CTR2 | CTR4 | CTR8 | CTR16 => {
let bytes = self.mode - CTR1 + 1;
for j in 0..16 {
st[j] = self.f[j]
}
self.ecb_encrypt(&mut st);
for j in 0..bytes {
buff[j] ^= st[j]
}
increment(&mut (self.f));
0
}
_ => {
0
}
}
}
/* Decrypt using selected mode of operation */
pub fn decrypt(&mut self, buff: &mut [u8; 16]) -> u32 {
let mut st: [u8; 16] = [0; 16];
// Supported Modes of Operation
let mut fell_off: u32 = 0;
match self.mode {
ECB => {
self.ecb_decrypt(buff);
0
}
CBC => {
for j in 0..16 {
st[j] = self.f[j];
self.f[j] = buff[j];
}
self.ecb_decrypt(buff);
for j in 0..16 {
buff[j] ^= st[j];
st[j] = 0;
}
0
}
CFB1 | CFB2 | CFB4 => {
let bytes = self.mode - CFB1 + 1;
for j in 0..bytes {
fell_off = (fell_off << 8) | (self.f[j] as u32)
}
for j in 0..16 {
st[j] = self.f[j]
}
for j in bytes..16 {
self.f[j - bytes] = self.f[j]
}
self.ecb_encrypt(&mut st);
for j in 0..bytes {
self.f[16 - bytes + j] = buff[j];
buff[j] ^= st[j];
}
fell_off
}
OFB1 | OFB2 | OFB4 | OFB8 | OFB16 => {
let bytes = self.mode - OFB1 + 1;
for j in 0..16 {
st[j] = self.f[j]
}
self.ecb_encrypt(&mut st);
for j in 0..bytes {
buff[j] ^= st[j]
}
for j in 0..16 {
self.f[j] = st[j]
}
// self.ecb_encrypt(A.f[:]);
// for j in 0..bytes {buff[j]^=self.f[j]}
0
}
CTR1 | CTR2 | CTR4 | CTR8 | CTR16 => {
let bytes = self.mode - CTR1 + 1;
for j in 0..16 {
st[j] = self.f[j]
}
self.ecb_encrypt(&mut st);
for j in 0..bytes {
buff[j] ^= st[j]
}
increment(&mut (self.f));
0
}
_ => {
0
}
}
}
/* Clean up and delete left-overs */
pub fn end(&mut self) {
// clean up
for i in 0..4 * (self.nr + 1) {
self.fkey[i] = 0;
self.rkey[i] = 0
}
for i in 0..16 {
self.f[i] = 0
}
}
}
/* AES encryption/decryption. Encrypt byte array m using key k and returns ciphertext c */
pub fn cbc_iv0_encrypt(k: &[u8], m: &[u8],c: &mut [u8]) -> usize {
/* AES CBC encryption, with Null IV and key K */
/* Input is from an octet string m, output is to an octet string c */
/* Input is padded as necessary to make up a full final block */
let mut a = AES::new();
let mut fin = false;
let mut buff: [u8; 16] = [0; 16];
a.init(CBC, k.len(), k, None);
let mut ipt = 0;
let mut opt = 0;
let mut i;
loop {
i = 0;
while i < 16 {
if ipt < m.len() {
buff[i] = m[ipt];
i += 1;
ipt += 1;
} else {
fin = true;
break;
}
}
if fin {
break;
}
a.encrypt(&mut buff);
for j in 0..16 {
if opt < c.len() {
c[opt]=buff[j]; opt+=1;
}
}
}
/* last block, filled up to i-th index */
let padlen = 16 - i;
for j in i..16 {
buff[j] = padlen as u8
}
a.encrypt(&mut buff);
for j in 0..16 {
if opt<c.len() {
c[opt]=buff[j]; opt+=1;
}
}
a.end();
opt
}
/* returns plaintext if all consistent, else returns null string */
pub fn cbc_iv0_decrypt(k: &[u8], c: &[u8], m: &mut [u8]) -> usize {
/* padding is removed */
let mut a = AES::new();
let mut fin = false;
let mut buff: [u8; 16] = [0; 16];
a.init(CBC, k.len(), k, None);
let mut ipt = 0;
let mut opt = 0;
let mut i;
if c.is_empty() {
return 0;
}
let mut ch = c[ipt];
ipt += 1;
loop {
i = 0;
while i < 16 {
buff[i] = ch;
if ipt >= c.len() {
fin = true;
break;
} else {
ch = c[ipt];
ipt += 1
}
i += 1;
}
a.decrypt(&mut buff);
if fin {
break;
}
for j in 0..16 {
if opt<m.len() {
m[opt]=buff[j]; opt+=1;
}
}
}
a.end();
let mut bad = false;
let padlen = buff[15] as usize;
if i != 15 || padlen < 1 || padlen > 16 {
bad = true
}
if padlen >= 2 && padlen <= 16 {
for j in 16 - padlen..16 {
if buff[j] != padlen as u8 {
bad = true
}
}
}
if !bad {
for i in 0..16 - padlen {
if opt<m.len() {
m[opt]=buff[i]; opt+=1;
}
}
}
if bad {
0
} else {
opt
}
}
/*
fn main()
{
let mut key:[u8;32]=[0;32];
let mut block:[u8;16]=[0;16];
let mut iv: [u8;16] = [0;16];
for i in 0..32 {key[i]=0}
key[0]=1;
for i in 0..16 {iv[i]=i as u8}
for i in 0..16 {block[i]=i as u8}
let mut aes=AES::new();
aes.init(CTR16,32,&key,Some(iv));
println!("Plain= ");
for i in 0..16 {print!("{:02x} ",block[i])}
println!("");
aes.encrypt(&mut block);
println!("Encrypt= ");
for i in 0..16 {print!("{:02x} ",block[i])}
println!("");
aes.reset(CTR16,Some(iv));
aes.decrypt(&mut block);
println!("Decrypt= ");
for i in 0..16 {print!("{:02x} ",block[i])}
println!("");
aes.end();
}
*/

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pub type Chunk = i64;
pub type DChunk = i128;
pub const CHUNK: usize = 64;

249
crates/bls48581/src/bls.rs Normal file
View File

@ -0,0 +1,249 @@
use std::sync::{Once};
use std::{mem::MaybeUninit};
use std::collections::HashMap;
use serde_json;
use hex;
use crate::bls48581::big;
use crate::bls48581::ecp;
use crate::bls48581::ecp8;
use crate::bls48581::bls256;
pub struct SingletonKZGSetup {
pub RootOfUnityBLS48581: HashMap<u64, big::BIG>,
pub RootsOfUnityBLS48581: HashMap<u64, Vec<big::BIG>>,
pub ReverseRootsOfUnityBLS48581: HashMap<u64, Vec<big::BIG>>,
pub CeremonyBLS48581G1: Vec<ecp::ECP>,
pub CeremonyBLS48581G2: Vec<ecp8::ECP8>,
pub FFTBLS48581: HashMap<u64, Vec<ecp::ECP>>,
}
pub fn singleton() -> &'static SingletonKZGSetup {
static mut SINGLETON: MaybeUninit<SingletonKZGSetup> = MaybeUninit::uninit();
static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {
bls256::init();
let bytes = include_bytes!("optimized_ceremony.json");
let v: serde_json::Value = serde_json::from_slice(bytes).unwrap();
let mut blsg1 = Vec::<ecp::ECP>::new();
let mut blsg2 = Vec::<ecp8::ECP8>::new();
let mut rootOfUnity = HashMap::<u64, big::BIG>::new();
let mut rootsOfUnity = HashMap::<u64, Vec<big::BIG>>::new();
let mut reverseRootsOfUnity = HashMap::<u64, Vec<big::BIG>>::new();
let mut ffts = HashMap::<u64, Vec<ecp::ECP>>::new();
for power in v["powersOfTau"]["G1Powers"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
blsg1.push(ecp::ECP::frombytes(&p));
}
for power in v["powersOfTau"]["G2Powers"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
blsg2.push(ecp8::ECP8::frombytes(&p));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity16"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(16, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity32"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(32, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity64"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(64, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity128"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(128, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity256"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(256, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity512"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(512, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity1024"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(1024, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity2048"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(2048, big::BIG::frombytes(&r));
}
{
let root = v["sized"]["rootOfUnity"]["rootOfUnity65536"].clone();
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootOfUnity.insert(65536, big::BIG::frombytes(&r));
}
let mut rootsOfUnity16 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity16"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity16.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(16, rootsOfUnity16.clone());
reverseRootsOfUnity.insert(16, rootsOfUnity16.into_iter().rev().collect());
let mut rootsOfUnity32 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity32"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity32.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(32, rootsOfUnity32.clone());
reverseRootsOfUnity.insert(32, rootsOfUnity32.into_iter().rev().collect());
let mut rootsOfUnity64 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity64"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity64.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(64, rootsOfUnity64.clone());
reverseRootsOfUnity.insert(64, rootsOfUnity64.into_iter().rev().collect());
let mut rootsOfUnity128 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity128"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity128.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(128, rootsOfUnity128.clone());
reverseRootsOfUnity.insert(128, rootsOfUnity128.into_iter().rev().collect());
let mut rootsOfUnity256 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity256"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity256.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(256, rootsOfUnity256.clone());
reverseRootsOfUnity.insert(256, rootsOfUnity256.into_iter().rev().collect());
let mut rootsOfUnity512 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity512"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity512.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(512, rootsOfUnity512.clone());
reverseRootsOfUnity.insert(512, rootsOfUnity512.into_iter().rev().collect());
let mut rootsOfUnity1024 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity1024"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity1024.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(1024, rootsOfUnity1024.clone());
reverseRootsOfUnity.insert(1024, rootsOfUnity1024.into_iter().rev().collect());
let mut rootsOfUnity2048 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity2048"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity2048.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(2048, rootsOfUnity2048.clone());
reverseRootsOfUnity.insert(2048, rootsOfUnity2048.into_iter().rev().collect());
let mut rootsOfUnity65536 = Vec::<big::BIG>::new();
for root in v["sized"]["rootsOfUnity"]["rootsOfUnity65536"].as_array().unwrap() {
let r = hex::decode(root.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
rootsOfUnity65536.push(big::BIG::frombytes(&r));
}
rootsOfUnity.insert(65536, rootsOfUnity65536.clone());
reverseRootsOfUnity.insert(65536, rootsOfUnity65536.into_iter().rev().collect());
let mut f16 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT16"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f16.push(ecp::ECP::frombytes(&p));
}
ffts.insert(16, f16);
let mut f32 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT32"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f32.push(ecp::ECP::frombytes(&p));
}
ffts.insert(32, f32);
let mut f64 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT64"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f64.push(ecp::ECP::frombytes(&p));
}
ffts.insert(64, f64);
let mut f128 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT128"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f128.push(ecp::ECP::frombytes(&p));
}
ffts.insert(128, f128);
let mut f256 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT256"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f256.push(ecp::ECP::frombytes(&p));
}
ffts.insert(256, f256);
let mut f512 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT512"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f512.push(ecp::ECP::frombytes(&p));
}
ffts.insert(512, f512);
let mut f1024 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT1024"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f1024.push(ecp::ECP::frombytes(&p));
}
ffts.insert(1024, f1024);
let mut f2048 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT2048"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f2048.push(ecp::ECP::frombytes(&p));
}
ffts.insert(2048, f2048);
let mut f65536 = Vec::<ecp::ECP>::new();
for power in v["sized"]["G1FFT"]["G1FFT65536"].as_array().unwrap() {
let p = hex::decode(power.as_str().unwrap().strip_prefix("0x").unwrap()).unwrap();
f65536.push(ecp::ECP::frombytes(&p));
}
ffts.insert(65536, f65536);
let singleton = SingletonKZGSetup {
RootOfUnityBLS48581: rootOfUnity,
RootsOfUnityBLS48581: rootsOfUnity,
ReverseRootsOfUnityBLS48581: reverseRootsOfUnity,
CeremonyBLS48581G1: blsg1,
CeremonyBLS48581G2: blsg2,
FFTBLS48581: ffts,
};
SINGLETON.write(singleton);
});
SINGLETON.assume_init_ref()
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,170 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::bls48581::big;
use crate::bls48581::big::BIG;
use crate::bls48581::fp::FP;
use crate::bls48581::ecp;
use crate::bls48581::ecp::ECP;
use crate::bls48581::dbig::DBIG;
use crate::bls48581::ecp8::ECP8;
use crate::bls48581::fp16::FP16;
use crate::bls48581::pair8;
use crate::bls48581::rom;
use crate::hmac;
/* Boneh-Lynn-Shacham signature 256-bit API Functions */
pub const BFS: usize = big::MODBYTES as usize;
pub const BGS: usize = big::MODBYTES as usize;
pub const BLS_OK: isize = 0;
pub const BLS_FAIL: isize = -1;
// NOTE this must be accessed in unsafe mode.
// But it is just written to once at start-up, so actually safe.
static mut G2_TAB: [FP16; ecp::G2_TABLE] = [FP16::new(); ecp::G2_TABLE];
fn ceil(a: usize,b: usize) -> usize {
(a-1)/b+1
}
/* output u \in F_p */
fn hash_to_field(hash: usize,hlen: usize ,u: &mut [FP], dst: &[u8],m: &[u8],ctr: usize) {
let q = BIG::new_ints(&rom::MODULUS);
let nbq=q.nbits();
let el = ceil(nbq+ecp::AESKEY*8,8);
let mut okm: [u8;256]=[0;256];
let mut fd: [u8;128]=[0;128];
hmac::xmd_expand(hash,hlen,&mut okm,el*ctr,&dst,&m);
for i in 0..ctr {
for j in 0..el {
fd[j]=okm[el*i+j];
}
u[i]=FP::new_big(&DBIG::frombytes(&fd[0 .. el]).ctdmod(&q,8*el-nbq));
}
}
/* hash a message to an ECP point, using SHA2, random oracle method */
#[allow(non_snake_case)]
pub fn bls_hash_to_point(m: &[u8]) -> ECP {
//let dst= String::from("BLS_SIG_ZZZG1_XMD:SHA-512_SVDW_RO_NUL_".to_ascii_uppercase());
let dst= "BLS_SIG_ZZZG1_XMD:SHA-512_SVDW_RO_NUL_";
let mut u: [FP; 2] = [
FP::new(),
FP::new(),
];
hash_to_field(hmac::MC_SHA2,ecp::HASH_TYPE,&mut u,dst.as_bytes(),m,2);
let mut P=ECP::map2point(&u[0]);
let P1=ECP::map2point(&u[1]);
P.add(&P1);
P.cfp();
P.affine();
P
}
pub fn init() -> isize {
let g = ECP8::generator();
if g.is_infinity() {
return BLS_FAIL;
}
unsafe {
pair8::precomp(&mut G2_TAB, &g);
}
BLS_OK
}
/* generate key pair, private key s, public key w */
pub fn key_pair_generate(ikm: &[u8], s: &mut [u8], w: &mut [u8]) -> isize {
let r = BIG::new_ints(&rom::CURVE_ORDER);
let nbr=r.nbits();
let el = ceil(3*ceil(nbr,8),2);
let g = ECP8::generator();
let mut len: [u8; 2] = [0; 2];
hmac::inttobytes(el,&mut len);
let salt="BLS-SIG-KEYGEN-SALT-";
let mut prk: [u8;64]=[0;64];
let mut okm: [u8;128]=[0;128];
let mut aikm: [u8;65]=[0;65];
let likm=ikm.len();
for i in 0..likm {
aikm[i]=ikm[i];
}
aikm[likm]=0;
let hlen=ecp::HASH_TYPE;
hmac::hkdf_extract(hmac::MC_SHA2,hlen,&mut prk,Some(&salt.as_bytes()),&aikm[0 .. likm+1]);
hmac::hkdf_expand(hmac::MC_SHA2,hlen,&mut okm,el,&prk[0 .. hlen],&len);
let mut dx = DBIG::frombytes(&okm[0 .. el]);
let sc = dx.ctdmod(&r,8*el-nbr);
sc.tobytes(s);
// SkToPk
pair8::g2mul(&g, &sc).tobytes(w,true); // true for public key compression
BLS_OK
}
/* Sign message m using private key s to produce signature sig */
pub fn core_sign(sig: &mut [u8], m: &[u8], s: &[u8]) -> isize {
let d = bls_hash_to_point(m);
let sc = BIG::frombytes(&s);
pair8::g1mul(&d, &sc).tobytes(sig, true);
BLS_OK
}
/* Verify signature given message m, the signature sig, and the public key w */
pub fn core_verify(sig: &[u8], m: &[u8], w: &[u8]) -> isize {
let hm = bls_hash_to_point(m);
let mut d = ECP::frombytes(&sig);
if !pair8::g1member(&d) {
return BLS_FAIL;
}
d.neg();
let pk = ECP8::frombytes(&w);
if !pair8::g2member(&pk) {
return BLS_FAIL;
}
// Use new multi-pairing mechanism
let mut r = pair8::initmp();
// pair8::another(&mut r,&g,&d);
unsafe {
pair8::another_pc(&mut r, &G2_TAB, &d);
}
pair8::another(&mut r, &pk, &hm);
let mut v = pair8::miller(&mut r);
//.. or alternatively
// let g = ECP8::generator();
// let mut v = pair8::ate2(&g, &d, &pk, &hm);
v = pair8::fexp(&v);
if v.isunity() {
return BLS_OK;
}
BLS_FAIL
}

View File

@ -0,0 +1,314 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::arch;
use crate::arch::Chunk;
use crate::bls48581::big;
use crate::bls48581::big::BIG;
pub struct DBIG {
pub w: [Chunk; big::DNLEN],
}
impl std::fmt::Debug for DBIG {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl std::fmt::Display for DBIG {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl DBIG {
pub fn new() -> DBIG {
DBIG {
w: [0; big::DNLEN as usize],
}
}
pub fn new_copy(y: &DBIG) -> DBIG {
let mut s = DBIG::new();
for i in 0..big::DNLEN {
s.w[i] = y.w[i]
}
s
}
pub fn new_scopy(x: &BIG) -> DBIG {
let mut b = DBIG::new();
for i in 0..big::NLEN {
b.w[i] = x.w[i];
}
b.w[big::NLEN - 1] = x.get(big::NLEN - 1) & big::BMASK; /* top word normalized */
b.w[big::NLEN] = x.get(big::NLEN - 1) >> big::BASEBITS;
for i in big::NLEN + 1..big::DNLEN {
b.w[i] = 0
}
b
}
/* split DBIG at position n, return higher half, keep lower half */
pub fn split(&mut self, n: usize) -> BIG {
let mut t = BIG::new();
let m = n % big::BASEBITS;
let mut carry = self.w[big::DNLEN - 1] << (big::BASEBITS - m);
for i in (big::NLEN - 1..big::DNLEN - 1).rev() {
let nw = (self.w[i] >> m) | carry;
carry = (self.w[i] << (big::BASEBITS - m)) & big::BMASK;
t.set(i + 1 - big::NLEN, nw);
}
self.w[big::NLEN - 1] &= ((1 as Chunk) << m) - 1;
t
}
/* general shift left */
pub fn shl(&mut self, k: usize) {
let n = k % big::BASEBITS;
let m = k / big::BASEBITS;
self.w[big::DNLEN - 1] =
(self.w[big::DNLEN - 1 - m] << n) | (self.w[big::DNLEN - m - 2] >> (big::BASEBITS - n));
for i in (m + 1..big::DNLEN - 1).rev() {
self.w[i] =
((self.w[i - m] << n) & big::BMASK) | (self.w[i - m - 1] >> (big::BASEBITS - n));
}
self.w[m] = (self.w[0] << n) & big::BMASK;
for i in 0..m {
self.w[i] = 0
}
}
/* general shift right */
pub fn shr(&mut self, k: usize) {
let n = k % big::BASEBITS;
let m = k / big::BASEBITS;
for i in 0..big::DNLEN - m - 1 {
self.w[i] =
(self.w[m + i] >> n) | ((self.w[m + i + 1] << (big::BASEBITS - n)) & big::BMASK);
}
self.w[big::DNLEN - m - 1] = self.w[big::DNLEN - 1] >> n;
for i in big::DNLEN - m..big::DNLEN {
self.w[i] = 0
}
}
/* Copy from another DBIG */
pub fn copy(&mut self, x: &DBIG) {
for i in 0..big::DNLEN {
self.w[i] = x.w[i];
}
}
pub fn ucopy(&mut self, x: &BIG) {
for i in 0..big::NLEN {
self.w[i] = 0;
}
for i in big::NLEN..big::DNLEN {
self.w[i] = x.w[i - big::NLEN];
}
}
pub fn cmove(&mut self, g: &DBIG, d: isize) -> Chunk {
let b = -d as Chunk;
let mut w=0 as Chunk;
let r=self.w[0]^g.w[1];
let mut ra=r.wrapping_add(r); ra >>= 1;
for i in 0..big::DNLEN {
let mut t = b & (self.w[i] ^ g.w[i]);
t^=r;
let e=self.w[i]^t; w^=e;
self.w[i]=e^ra;
}
return w;
}
/* self+=x */
pub fn add(&mut self, x: &DBIG) {
for i in 0..big::DNLEN {
self.w[i] += x.w[i];
}
}
/* self-=x */
pub fn sub(&mut self, x: &DBIG) {
for i in 0..big::DNLEN {
self.w[i] -= x.w[i];
}
}
/* self=x-self */
pub fn rsub(&mut self, x: &DBIG) {
for i in 0..big::DNLEN {
self.w[i] = x.w[i] - self.w[i];
}
}
/* Compare a and b, return 0 if a==b, -1 if a<b, +1 if a>b. Inputs must be normalised */
pub fn comp(a: &DBIG, b: &DBIG) -> isize {
let mut gt = 0 as Chunk;
let mut eq = 1 as Chunk;
for i in (0..big::DNLEN).rev() {
gt |= ((b.w[i]-a.w[i]) >> big::BASEBITS) & eq;
eq &= ((b.w[i]^a.w[i])-1) >> big::BASEBITS;
}
(gt+gt+eq-1) as isize
}
/* convert from byte array to BIG */
pub fn frombytes(b: &[u8]) -> DBIG {
let mut m = DBIG::new();
for i in 0..(b.len()) {
m.shl(8);
m.w[0] += b[i] as Chunk;
}
m
}
/* normalise BIG - force all digits < 2^big::BASEBITS */
pub fn norm(&mut self) {
let mut carry = self.w[0]>>big::BASEBITS;
self.w[0] &= big::BMASK;
for i in 1..big::DNLEN - 1 {
let d = self.w[i] + carry;
self.w[i] = d & big::BMASK;
carry = d >> big::BASEBITS;
}
self.w[big::DNLEN - 1] += carry
}
// Set self=self mod m in constant time (if bd is known at compile time)
// bd is Max number of bits in b - Actual number of bits in m
pub fn ctdmod(&mut self, m: &BIG, bd: usize) -> BIG {
let mut k=bd;
self.norm();
let mut c = DBIG::new_scopy(m);
let mut dr = DBIG::new();
c.shl(k);
loop {
dr.copy(self);
dr.sub(&c);
dr.norm();
self.cmove(&dr,(1 - ((dr.w[big::DNLEN - 1] >> (arch::CHUNK - 1)) & 1)) as isize);
if k==0 {break;}
c.shr(1);
k -= 1;
}
BIG::new_dcopy(self)
}
/* reduces self DBIG mod a BIG, and returns the BIG */
pub fn dmod(&mut self, m: &BIG) -> BIG {
let ss=self.nbits() as isize;
let ms=m.nbits() as isize;
let mut k=(ss-ms) as usize;
if ss<ms {k=0;}
self.ctdmod(m,k)
}
// self=self/m in constant time (if bd is known at compile time)
// bd is Max number of bits in b - Actual number of bits in m
pub fn ctdiv(&mut self, m: &BIG, bd:usize) -> BIG {
let mut k=bd;
let mut c = DBIG::new_scopy(m);
let mut a = BIG::new();
let mut e = BIG::new_int(1);
let mut dr = DBIG::new();
let mut r = BIG::new();
self.norm();
c.shl(k);
e.shl(k);
loop {
dr.copy(self);
dr.sub(&c);
dr.norm();
let d = (1 - ((dr.w[big::DNLEN - 1] >> (arch::CHUNK - 1)) & 1)) as isize;
self.cmove(&dr, d);
r.copy(&a);
r.add(&e);
r.norm();
a.cmove(&r, d);
if k==0 {break;}
k -= 1;
c.shr(1);
e.shr(1);
}
a
}
/* return this/c */
pub fn div(&mut self, m: &BIG) -> BIG {
let ss=self.nbits() as isize;
let ms=m.nbits() as isize;
let mut k=(ss-ms) as usize;
if ss<ms {k=0;}
self.ctdiv(m,k)
}
/* return number of bits */
pub fn nbits(&self) -> usize {
let mut k = big::DNLEN - 1;
let mut s = DBIG::new_copy(&self);
s.norm();
while (k as isize) >= 0 && s.w[k] == 0 {
k = k.wrapping_sub(1)
}
if (k as isize) < 0 {
return 0;
}
let mut bts = (big::BASEBITS as usize) * k;
let mut c = s.w[k];
while c != 0 {
c /= 2;
bts += 1;
}
bts
}
/* Convert to Hex String */
pub fn tostring(&self) -> String {
let mut s = String::new();
let mut len = self.nbits();
if len % 4 == 0 {
len /= 4;
} else {
len /= 4;
len += 1;
}
for i in (0..len).rev() {
let mut b = DBIG::new_copy(&self);
b.shr(i * 4);
s = s + &format!("{:X}", b.w[0] & 15);
}
s
}
}

View File

@ -0,0 +1,432 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* ECDH/ECIES/ECDSA API Functions */
use crate::bls48581::big;
use crate::bls48581::big::BIG;
use crate::bls48581::ecp;
use crate::bls48581::ecp::ECP;
use crate::bls48581::rom;
use crate::aes;
use crate::hmac;
use crate::rand::RAND;
pub const INVALID_PUBLIC_KEY: isize = -2;
pub const ERROR: isize = -3;
//pub const INVALID: isize = -4;
pub const EFS: usize = big::MODBYTES as usize;
pub const EGS: usize = big::MODBYTES as usize;
pub fn rfc7748(r: &mut BIG) {
let mut lg=0;
let mut t=BIG::new_int(1);
let mut c=rom::CURVE_COF_I;
while c!=1 {
lg+=1;
c/=2;
}
let n=(8*EGS-lg+1) as usize;
r.mod2m(n);
t.shl(n);
r.add(&t);
c=r.lastbits(lg as usize);
r.dec(c);
}
#[allow(non_snake_case)]
pub fn in_range(s: &[u8]) -> bool {
let r = BIG::new_ints(&rom::CURVE_ORDER);
let sc = BIG::frombytes(&s);
if sc.iszilch() {
return false;
}
if BIG::comp(&sc, &r) >= 0 {
return false;
}
true
}
/* Calculate a public/private EC GF(p) key pair w,s where W=s.G mod EC(p),
* where s is the secret key and W is the public key
* and G is fixed generator.
* If RNG is NULL then the private key is provided externally in s
* otherwise it is generated randomly internally */
#[allow(non_snake_case)]
pub fn key_pair_generate(rng: Option<&mut RAND>, s: &mut [u8], w: &mut [u8]) -> isize {
let res = 0;
let mut sc: BIG;
let G = ECP::generator();
let r = BIG::new_ints(&rom::CURVE_ORDER);
if let Some(x) = rng {
if ecp::CURVETYPE != ecp::WEIERSTRASS {
sc = BIG::random(x); // from random bytes
} else {
sc = BIG::randomnum(&r, x); // Removes biases
}
} else {
sc = BIG::frombytes(&s);
}
if ecp::CURVETYPE != ecp::WEIERSTRASS {
rfc7748(&mut sc); // For Montgomery or Edwards, apply RFC7748 transformation
}
sc.tobytes(s);
let WP = G.clmul(&sc,&r);
WP.tobytes(w, false); // To use point compression on public keys, change to true
res
}
/* validate public key */
#[allow(non_snake_case)]
pub fn public_key_validate(w: &[u8]) -> isize {
let mut WP = ECP::frombytes(w);
let mut res = 0;
let r = BIG::new_ints(&rom::CURVE_ORDER);
if WP.is_infinity() {
res = INVALID_PUBLIC_KEY
}
if res == 0 {
let q = BIG::new_ints(&rom::MODULUS);
let nb = q.nbits();
let mut k = BIG::new();
k.one();
k.shl((nb + 4) / 2);
k.add(&q);
k.div(&r);
while k.parity() == 0 {
k.shr(1);
WP.dbl();
}
if !k.isunity() {
WP = WP.mul(&mut k)
}
if WP.is_infinity() {
res = INVALID_PUBLIC_KEY
}
}
res
}
/* IEEE-1363 Diffie-Hellman online calculation Z=S.WD */
#[allow(non_snake_case)]
pub fn ecpsvdp_dh(s: &[u8], wd: &[u8], z: &mut [u8], typ: isize) -> isize {
let mut res = 0;
let sc = BIG::frombytes(&s);
let mut W = ECP::frombytes(&wd);
if W.is_infinity() {
res = ERROR
}
if res == 0 {
let r = BIG::new_ints(&rom::CURVE_ORDER);
W = W.clmul(&sc,&r);
if W.is_infinity() {
res = ERROR;
} else {
if ecp::CURVETYPE != ecp::MONTGOMERY {
if typ>0 {
if typ==1 {
W.tobytes(z,true);
} else {
W.tobytes(z,false);
}
} else {
W.getx().tobytes(z);
}
return res;
} else {
W.getx().tobytes(z);
}
}
}
res
}
/* IEEE ECDSA Signature, C and D are signature on F using private key S */
#[allow(non_snake_case)]
pub fn ecpsp_dsa(
sha: usize,
rng: &mut RAND,
s: &[u8],
f: &[u8],
c: &mut [u8],
d: &mut [u8],
) -> isize {
let mut t: [u8; EGS] = [0; EGS];
let mut b: [u8; EGS] = [0; EGS];
hmac::GPhashit(hmac::MC_SHA2, sha, &mut b, EGS,0,Some(f), -1, None);
let G = ECP::generator();
let r = BIG::new_ints(&rom::CURVE_ORDER);
let sc = BIG::frombytes(s); /* s or &s? */
let fb = BIG::frombytes(&b);
let mut cb = BIG::new();
let mut db = BIG::new();
let mut tb = BIG::new();
let mut V = ECP::new();
while db.iszilch() {
let mut u = BIG::randomnum(&r, rng);
let w = BIG::randomnum(&r, rng); /* IMPORTANT - side channel masking to protect invmodp() */
V.copy(&G);
V = V.clmul(&u,&r);
let vx = V.getx();
cb.copy(&vx);
cb.rmod(&r);
if cb.iszilch() {
continue;
}
tb.copy(&BIG::modmul(&u, &w, &r));
u.copy(&tb);
u.invmodp(&r);
db.copy(&BIG::modmul(&sc, &cb, &r));
db.copy(&BIG::modadd(&db, &fb, &r));
tb.copy(&BIG::modmul(&db, &w, &r));
db.copy(&tb);
tb.copy(&BIG::modmul(&u, &db, &r));
db.copy(&tb);
}
cb.tobytes(&mut t);
for i in 0..EGS {
c[i] = t[i]
}
db.tobytes(&mut t);
for i in 0..EGS {
d[i] = t[i]
}
0
}
/* IEEE1363 ECDSA Signature Verification. Signature C and D on F is verified using public key W */
#[allow(non_snake_case)]
pub fn ecpvp_dsa(sha: usize, w: &[u8], f: &[u8], c: &[u8], d: &[u8]) -> isize {
let mut res = 0;
let mut b: [u8; EGS] = [0; EGS];
hmac::GPhashit(hmac::MC_SHA2, sha, &mut b, EGS, 0,Some(f), -1, None);
let mut G = ECP::generator();
let r = BIG::new_ints(&rom::CURVE_ORDER);
let mut cb = BIG::frombytes(c); /* c or &c ? */
let mut db = BIG::frombytes(d); /* d or &d ? */
let mut fb = BIG::frombytes(&b);
let mut tb = BIG::new();
if cb.iszilch() || BIG::comp(&cb, &r) >= 0 || db.iszilch() || BIG::comp(&db, &r) >= 0 {
res = ERROR;
}
if res == 0 {
db.invmodp(&r);
tb.copy(&BIG::modmul(&mut fb, &mut db, &r));
fb.copy(&tb);
let h2 = BIG::modmul(&mut cb, &mut db, &r);
let WP = ECP::frombytes(&w);
if WP.is_infinity() {
res = ERROR;
} else {
let mut P = ECP::new();
P.copy(&WP);
P = P.mul2(&h2, &mut G, &fb);
if P.is_infinity() {
res = ERROR;
} else {
db = P.getx();
db.rmod(&r);
if BIG::comp(&db, &cb) != 0 {
res = ERROR
}
}
}
}
res
}
/* IEEE1363 ECIES encryption. Encryption of plaintext M uses public key W and produces ciphertext V,C,T */
// returns length of ciphertext
#[allow(non_snake_case)]
pub fn ecies_encrypt(
sha: usize,
p1: &[u8],
p2: &[u8],
rng: &mut RAND,
w: &[u8],
m: &[u8],
v: &mut [u8],
c: &mut [u8],
t: &mut [u8],
) -> usize {
let mut z: [u8; EFS] = [0; EFS];
let mut k1: [u8; ecp::AESKEY] = [0; ecp::AESKEY];
let mut k2: [u8; ecp::AESKEY] = [0; ecp::AESKEY];
let mut u: [u8; EGS] = [0; EGS];
let mut vz: [u8; 3 * EFS + 1] = [0; 3 * EFS + 1];
let mut k: [u8; 2 * ecp::AESKEY] = [0; 2 * ecp::AESKEY];
if key_pair_generate(Some(rng), &mut u, v) != 0 {
return 0;
}
if ecpsvdp_dh(&u, &w, &mut z, 0) != 0 {
return 0;
}
for i in 0..2 * EFS + 1 {
vz[i] = v[i]
}
for i in 0..EFS {
vz[2 * EFS + 1 + i] = z[i]
}
hmac::kdf2(hmac::MC_SHA2, sha, &vz, Some(p1), 2 * ecp::AESKEY, &mut k);
for i in 0..ecp::AESKEY {
k1[i] = k[i];
k2[i] = k[ecp::AESKEY + i]
}
let clen = aes::cbc_iv0_encrypt(&k1, m, c);
let mut l2: [u8; 8] = [0; 8];
let p2l = p2.len();
hmac::inttobytes(p2l, &mut l2);
let mut opt=clen;
for i in 0..p2l {
c[opt]=p2[i]; opt+=1;
}
for i in 0..8 {
c[opt]=l2[i]; opt+=1;
}
hmac::hmac1(hmac::MC_SHA2, sha, t, t.len(), &k2, &c[0..opt]);
clen
}
/* constant time n-byte compare */
fn ncomp(t1: &[u8], t2: &[u8], n: usize) -> bool {
let mut res = 0;
for i in 0..n {
res |= (t1[i] ^ t2[i]) as isize;
}
if res == 0 {
return true;
}
false
}
/* IEEE1363 ECIES decryption. Decryption of ciphertext V,C,T using private key U outputs plaintext M */
// returns length of plaintext
#[allow(non_snake_case)]
pub fn ecies_decrypt(
sha: usize,
p1: &[u8],
p2: &[u8],
v: &[u8],
c: &mut [u8],
clen: usize,
t: &[u8],
u: &[u8],
m: &mut [u8],
) -> usize {
let mut z: [u8; EFS] = [0; EFS];
let mut k1: [u8; ecp::AESKEY] = [0; ecp::AESKEY];
let mut k2: [u8; ecp::AESKEY] = [0; ecp::AESKEY];
let mut vz: [u8; 3 * EFS + 1] = [0; 3 * EFS + 1];
let mut k: [u8; 2 * ecp::AESKEY] = [0; 2 * ecp::AESKEY];
let mut tag: [u8; 32] = [0; 32]; /* 32 is max length of tag */
for i in 0..t.len() {
tag[i] = t[i]
}
if ecpsvdp_dh(&u, &v, &mut z, 0) != 0 {
return 0;
}
for i in 0..2 * EFS + 1 {
vz[i] = v[i]
}
for i in 0..EFS {
vz[2 * EFS + 1 + i] = z[i]
}
hmac::kdf2(hmac::MC_SHA2, sha, &vz, Some(p1), 2 * ecp::AESKEY, &mut k);
for i in 0..ecp::AESKEY {
k1[i] = k[i];
k2[i] = k[ecp::AESKEY + i]
}
let mlen = aes::cbc_iv0_decrypt(&k1, &c[0..clen], m);
if mlen == 0 {
return 0;
}
let mut l2: [u8; 8] = [0; 8];
let p2l = p2.len();
hmac::inttobytes(p2l, &mut l2);
let mut opt=clen;
for i in 0..p2l {
c[opt]=p2[i]; opt+=1;
}
for i in 0..8 {
c[opt]=l2[i]; opt+=1;
}
let tl=tag.len();
hmac::hmac1(hmac::MC_SHA2, sha, &mut tag, tl, &k2, &c[0..opt]);
if !ncomp(&t, &tag, t.len()) {
return 0;
}
mlen
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,825 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::arch;
use crate::arch::Chunk;
use crate::bls48581::big;
use crate::bls48581::big::BIG;
use crate::bls48581::dbig::DBIG;
use crate::bls48581::rom;
use crate::rand::RAND;
#[derive(Copy, Clone)]
pub struct FP {
pub x: BIG,
pub xes: i32,
}
impl std::fmt::Debug for FP {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl std::fmt::Display for FP {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
pub const NOT_SPECIAL: usize = 0;
pub const PSEUDO_MERSENNE: usize = 1;
pub const MONTGOMERY_FRIENDLY: usize = 2;
pub const GENERALISED_MERSENNE: usize = 3;
pub const NEGATOWER: usize = 0;
pub const POSITOWER: usize = 1;
pub const MODBITS:usize = 581; /* Number of bits in Modulus */
pub const PM1D2: usize = 1; /* Modulus mod 8 */
pub const RIADZ: isize = 2; /* Z for hash-to-point */
pub const RIADZG2A: isize = 2; /* G2 Z for hash-to-point */
pub const RIADZG2B: isize = 0; /* G2 Z for hash-to-point */
pub const MODTYPE:usize=NOT_SPECIAL;
pub const QNRI:usize=0; /* Fp2 QNR 2^i+sqrt(-1) */
pub const TOWER:usize=POSITOWER; /* Tower type */
pub const FEXCESS:i32 = ((1 as i32)<<19)-1;
pub const OMASK: Chunk = (-1) << (MODBITS % big::BASEBITS);
pub const TBITS: usize = MODBITS % big::BASEBITS; // Number of active bits in top word
pub const TMASK: Chunk = (1 << TBITS) - 1;
pub const BIG_ENDIAN_SIGN: bool = false;
impl FP {
/* Constructors */
pub const fn new() -> FP {
FP {
x: BIG::new(),
xes: 1,
}
}
pub fn new_int(a: isize) -> FP {
let mut f = FP::new();
if a<0 {
let mut m = BIG::new_ints(&rom::MODULUS);
m.inc(a); m.norm();
f.x.copy(&m);
} else {
f.x.inc(a);
}
f.nres();
f
}
pub fn new_copy(y: &FP) -> FP {
let mut f = FP::new();
f.x.copy(&(y.x));
f.xes = y.xes;
f
}
pub fn new_big(y: &BIG) -> FP {
let mut f = FP::new();
f.x.copy(y);
f.nres();
f
}
pub fn new_rand(rng: &mut RAND) -> FP {
let m = BIG::new_ints(&rom::MODULUS);
let w = BIG::randomnum(&m,rng);
FP::new_big(&w)
}
pub fn nres(&mut self) {
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
let r = BIG::new_ints(&rom::R2MODP);
let mut d = BIG::mul(&(self.x), &r);
self.x.copy(&FP::modulo(&mut d));
self.xes = 2;
} else {
let m = BIG::new_ints(&rom::MODULUS);
self.x.rmod(&m);
self.xes = 1;
}
}
/* convert back to regular form */
pub fn redc(&self) -> BIG {
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
let mut d = DBIG::new_scopy(&(self.x));
FP::modulo(&mut d)
} else {
BIG::new_copy(&(self.x))
}
}
/* reduce a DBIG to a BIG using the appropriate form of the modulus */
/* dd */
pub fn modulo(d: &mut DBIG) -> BIG {
if MODTYPE == PSEUDO_MERSENNE {
let mut b = BIG::new();
let mut t = d.split(MODBITS);
b.dcopy(&d);
let v = t.pmul(rom::MCONST as isize);
t.add(&b);
t.norm();
let tw = t.w[big::NLEN - 1];
t.w[big::NLEN - 1] &= TMASK;
t.w[0] += rom::MCONST * ((tw >> TBITS) + (v << (big::BASEBITS - TBITS)));
t.norm();
return t;
}
if MODTYPE == MONTGOMERY_FRIENDLY {
let mut b = BIG::new();
for i in 0..big::NLEN {
let x = d.w[i];
let tuple = BIG::muladd(x, rom::MCONST - 1, x, d.w[big::NLEN + i - 1]);
d.w[big::NLEN + i] += tuple.0;
d.w[big::NLEN + i - 1] = tuple.1;
}
b.zero();
for i in 0..big::NLEN {
b.w[i] = d.w[big::NLEN + i];
}
b.norm();
return b;
}
if MODTYPE == GENERALISED_MERSENNE {
// GoldiLocks Only
let mut b = BIG::new();
let t = d.split(MODBITS);
let rm2 = (MODBITS / 2) as usize;
b.dcopy(&d);
b.add(&t);
let mut dd = DBIG::new_scopy(&t);
dd.shl(rm2);
let mut tt = dd.split(MODBITS);
let lo = BIG::new_dcopy(&dd);
b.add(&tt);
b.add(&lo);
b.norm();
tt.shl(rm2);
b.add(&tt);
let carry = b.w[big::NLEN - 1] >> TBITS;
b.w[big::NLEN - 1] &= TMASK;
b.w[0] += carry;
let ix=(224 / big::BASEBITS) as usize;
b.w[ix] += carry << (224 % big::BASEBITS);
b.norm();
return b;
}
if MODTYPE == NOT_SPECIAL {
let m = BIG::new_ints(&rom::MODULUS);
return BIG::monty(&m, rom::MCONST, d);
}
BIG::new()
}
/* convert to string */
pub fn tostring(&self) -> String {
self.redc().tostring()
}
/* reduce this mod Modulus */
pub fn reduce(&mut self) {
let mut m = BIG::new_ints(&rom::MODULUS);
let mut r = BIG::new_copy(&m);
let mut sb: usize;
self.x.norm();
if self.xes > 16 {
let q = FP::quo(&self.x, &m);
let carry = r.pmul(q);
r.w[big::NLEN - 1] += carry << big::BASEBITS; // correction - put any carry out back in again
self.x.sub(&r);
self.x.norm();
sb = 2;
} else {
sb = FP::logb2((self.xes - 1) as u32);
}
m.fshl(sb);
while sb > 0 {
let sr = BIG::ssn(&mut r, &self.x, &mut m);
self.x.cmove(&r, 1 - sr);
sb -= 1;
}
self.xes = 1;
}
/* test this=0? */
pub fn iszilch(&self) -> bool {
let mut a = FP::new_copy(self);
a.reduce();
a.x.iszilch()
}
pub fn islarger(&self) -> isize {
if self.iszilch() {
return 0;
}
let mut sx = BIG::new_ints(&rom::MODULUS);
let fx=self.redc();
sx.sub(&fx); sx.norm();
BIG::comp(&fx,&sx)
}
pub fn tobytes(&self,b: &mut [u8]) {
self.redc().tobytes(b)
}
pub fn frombytes(b: &[u8]) -> FP {
let t=BIG::frombytes(b);
FP::new_big(&t)
}
/* test this=0? */
pub fn isunity(&self) -> bool {
let mut a = FP::new_copy(self);
a.reduce();
a.redc().isunity()
}
pub fn sign(&self) -> isize {
if BIG_ENDIAN_SIGN {
let mut m = BIG::new_ints(&rom::MODULUS);
m.dec(1);
m.fshr(1);
let mut n = FP::new_copy(self);
n.reduce();
let w=n.redc();
let cp=BIG::comp(&w,&m);
((cp+1)&2)>>1
} else {
let mut a = FP::new_copy(self);
a.reduce();
a.redc().parity()
}
}
/* copy from FP b */
pub fn copy(&mut self, b: &FP) {
self.x.copy(&(b.x));
self.xes = b.xes;
}
/* copy from BIG b */
pub fn bcopy(&mut self, b: &BIG) {
self.x.copy(&b);
self.nres();
}
/* set this=0 */
pub fn zero(&mut self) {
self.x.zero();
self.xes = 1;
}
/* set this=1 */
pub fn one(&mut self) {
self.x.one();
self.nres()
}
/* normalise this */
pub fn norm(&mut self) {
self.x.norm();
}
/* swap FPs depending on d */
pub fn cswap(&mut self, b: &mut FP, d: isize) {
self.x.cswap(&mut (b.x), d);
let mut c = d as i32;
c = !(c - 1);
let t = c & (self.xes ^ b.xes);
self.xes ^= t;
b.xes ^= t;
}
/* copy FPs depending on d */
pub fn cmove(&mut self, b: &FP, d: isize) {
self.x.cmove(&(b.x), d);
let c = d as i32;
self.xes ^= (self.xes ^ b.xes) & (-c);
}
/* this*=b mod Modulus */
pub fn mul(&mut self, b: &FP) {
if (self.xes as i64) * (b.xes as i64) > FEXCESS as i64 {
self.reduce()
}
let mut d = BIG::mul(&(self.x), &(b.x));
self.x.copy(&FP::modulo(&mut d));
self.xes = 2;
}
fn logb2(w: u32) -> usize {
let mut v = w;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v = v - ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
((((v + (v >> 4)) & 0xF0F0F0F).wrapping_mul(0x1010101)) >> 24) as usize
}
// find appoximation to quotient of a/m
// Out by at most 2.
// Note that MAXXES is bounded to be 2-bits less than half a word
fn quo(n: &BIG, m: &BIG) -> isize {
let hb = arch::CHUNK / 2;
if TBITS < hb {
let sh = hb - TBITS;
let num = (n.w[big::NLEN - 1] << sh) | (n.w[big::NLEN - 2] >> (big::BASEBITS - sh));
let den = (m.w[big::NLEN - 1] << sh) | (m.w[big::NLEN - 2] >> (big::BASEBITS - sh));
(num / (den + 1)) as isize
} else {
let num = n.w[big::NLEN - 1];
let den = m.w[big::NLEN - 1];
(num / (den + 1)) as isize
}
}
/* this = -this mod Modulus */
pub fn neg(&mut self) {
let mut p = BIG::new_ints(&rom::MODULUS);
let sb = FP::logb2((self.xes - 1) as u32);
p.fshl(sb);
self.x.rsub(&p);
self.xes = 1 << ((sb as i32) + 1);
if self.xes > FEXCESS {
self.reduce()
}
}
/* this*=c mod Modulus, where c is a small int */
pub fn imul(&mut self, c: isize) {
let mut cc = c;
let mut s = false;
if cc < 0 {
cc = -cc;
s = true;
}
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
let mut d = self.x.pxmul(cc);
self.x.copy(&FP::modulo(&mut d));
self.xes = 2
} else if self.xes * (cc as i32) <= FEXCESS {
self.x.pmul(cc);
self.xes *= cc as i32;
} else {
let n = FP::new_int(cc);
self.mul(&n);
}
if s {
self.neg();
self.norm();
}
}
/* self*=self mod Modulus */
pub fn sqr(&mut self) {
if (self.xes as i64) * (self.xes as i64) > FEXCESS as i64 {
self.reduce()
}
let mut d = BIG::sqr(&(self.x));
self.x.copy(&FP::modulo(&mut d));
self.xes = 2
}
/* self+=b */
pub fn add(&mut self, b: &FP) {
self.x.add(&(b.x));
self.xes += b.xes;
if self.xes > FEXCESS {
self.reduce()
}
}
/* self+=self */
pub fn dbl(&mut self) {
self.x.dbl();
self.xes += self.xes;
if self.xes > FEXCESS {
self.reduce()
}
}
/* self-=b */
pub fn sub(&mut self, b: &FP) {
let mut n = FP::new_copy(b);
n.neg();
self.add(&n);
}
/* self=b-self */
pub fn rsub(&mut self, b: &FP) {
self.neg();
self.add(&b);
}
/* self/=2 mod Modulus */
pub fn div2(&mut self) {
let p = BIG::new_ints(&rom::MODULUS);
let pr = self.x.parity();
let mut w = BIG::new_copy(&self.x);
self.x.fshr(1);
w.add(&p); w.norm();
w.fshr(1);
self.x.cmove(&w,pr);
}
/* return jacobi symbol (this/Modulus) */
pub fn jacobi(&mut self) -> isize {
let p = BIG::new_ints(&rom::MODULUS);
let mut w = self.redc();
w.jacobi(&p)
}
/* return TRUE if self==a */
pub fn equals(&self, a: &FP) -> bool {
let mut f = FP::new_copy(self);
let mut s = FP::new_copy(a);
f.reduce();
s.reduce();
BIG::comp(&(f.x), &(s.x)) == 0
}
/* return self^e mod Modulus */
// Could leak size of e
// but not used here with secret exponent e
pub fn pow(&self, e: &BIG) -> FP {
let mut tb: [FP; 16] = [
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
];
const CT: usize = 1 + (big::NLEN * (big::BASEBITS as usize) + 3) / 4;
let mut w: [i8; CT] = [0; CT];
let mut s = FP::new_copy(&self);
s.norm();
let mut t = BIG::new_copy(e);
t.norm();
let nb = 1 + (t.nbits() + 3) / 4;
for i in 0..nb {
let lsbs = t.lastbits(4);
t.dec(lsbs);
t.norm();
w[i] = lsbs as i8;
t.fshr(4);
}
tb[0].one();
tb[1].copy(&s);
let mut c = FP::new();
for i in 2..16 {
c.copy(&tb[i - 1]);
tb[i].copy(&c);
tb[i].mul(&s);
}
let mut r = FP::new_copy(&tb[w[nb - 1] as usize]);
for i in (0..nb - 1).rev() {
r.sqr();
r.sqr();
r.sqr();
r.sqr();
r.mul(&tb[w[i] as usize])
}
r.reduce();
r
}
// See eprint paper https://eprint.iacr.org/2018/1038
// return this^(p-3)/4 or this^(p-5)/8
pub fn fpow(&self) -> FP {
let ac: [isize; 11] = [1, 2, 3, 6, 12, 15, 30, 60, 120, 240, 255];
let mut xp: [FP; 11] = [
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
FP::new(),
];
// phase 1
let mut t = FP::new();
xp[0].copy(&self); // 1
xp[1].copy(&self);
xp[1].sqr(); // 2
t.copy(&xp[1]);
xp[2].copy(&t);
xp[2].mul(&self); // 3
t.copy(&xp[2]);
xp[3].copy(&t);
xp[3].sqr(); // 6
t.copy(&xp[3]);
xp[4].copy(&t);
xp[4].sqr(); // 12
t.copy(&xp[4]);
t.mul(&xp[2]);
xp[5].copy(&t); // 15
t.copy(&xp[5]);
xp[6].copy(&t);
xp[6].sqr(); // 30
t.copy(&xp[6]);
xp[7].copy(&t);
xp[7].sqr(); // 60
t.copy(&xp[7]);
xp[8].copy(&t);
xp[8].sqr(); // 120
t.copy(&xp[8]);
xp[9].copy(&t);
xp[9].sqr(); // 240
t.copy(&xp[9]);
t.mul(&xp[5]);
xp[10].copy(&t); // 255
let mut n = MODBITS as isize;
let mut c: isize;
if MODTYPE == GENERALISED_MERSENNE {
// Goldilocks ONLY
n /= 2;
}
let e = PM1D2 as isize;
n-=e+1;
c=((rom::MCONST as isize)+(1<<e)+1)/(1<<(e+1));
let mut nd=0;
while c%2==0 {
c/=2;
n-=1;
nd+=1;
}
let mut bw = 0;
let mut w = 1;
while w < c {
w *= 2;
bw += 1;
}
let mut k = w - c;
let mut i = 10;
let mut key = FP::new();
if k != 0 {
while ac[i] > k {
i -= 1;
}
key.copy(&xp[i]);
k -= ac[i];
}
while k != 0 {
i -= 1;
if ac[i] > k {
continue;
}
key.mul(&xp[i]);
k -= ac[i];
}
// phase 2
t.copy(&xp[2]);
xp[1].copy(&t);
t.copy(&xp[5]);
xp[2].copy(&t);
t.copy(&xp[10]);
xp[3].copy(&t);
let mut j = 3;
let mut m = 8;
let nw = n - bw;
let mut r = FP::new();
while 2 * m < nw {
t.copy(&xp[j]);
j += 1;
for _ in 0..m {
t.sqr();
}
r.copy(&xp[j - 1]);
r.mul(&t);
xp[j].copy(&r);
m *= 2;
}
let mut lo = nw - m;
r.copy(&xp[j]);
while lo != 0 {
m /= 2;
j -= 1;
if lo < m {
continue;
}
lo -= m;
t.copy(&r);
for _ in 0..m {
t.sqr();
}
r.copy(&t);
r.mul(&xp[j]);
}
// phase 3
if bw != 0 {
for _ in 0..bw {
r.sqr();
}
r.mul(&key);
}
if MODTYPE == GENERALISED_MERSENNE {
// Goldilocks ONLY
key.copy(&r);
r.sqr();
r.mul(&self);
for _ in 0..n + 1 {
r.sqr();
}
r.mul(&key);
}
while nd>0 {
r.sqr();
nd-=1;
}
r
}
/* Pseudo_inverse square root */
pub fn progen(&mut self) {
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
self.copy(&self.fpow());
return;
}
let e=PM1D2 as usize;
let mut m = BIG::new_ints(&rom::MODULUS);
m.dec(1);
m.shr(e);
m.dec(1);
m.fshr(1);
self.copy(&self.pow(&m));
}
/* self=1/self mod Modulus */
pub fn inverse(&mut self,take_hint: Option<&FP>) {
let e=PM1D2 as isize;
self.norm();
let mut s=FP::new_copy(self);
for _ in 0..e-1 {
s.sqr();
s.mul(self);
}
if let Some(hint) = take_hint {
self.copy(&hint);
} else {
self.progen();
}
for _ in 0..=e {
self.sqr();
}
self.mul(&s);
self.reduce();
}
/* Test for Quadratic Residue */
pub fn qr(&self,give_hint: Option<&mut FP>) -> isize {
let e=PM1D2 as isize;
let mut r=FP::new_copy(self);
r.progen();
if let Some(hint) = give_hint {
hint.copy(&r);
}
r.sqr();
r.mul(self);
for _ in 0..e-1 {
r.sqr();
}
r.isunity() as isize
}
pub fn invsqrt(&self,i: &mut FP,s: &mut FP) -> isize {
let mut h=FP::new();
let qr=self.qr(Some(&mut h));
s.copy(&self.sqrt(Some(&h)));
i.copy(self);
i.inverse(Some(&h));
qr
}
// Two for the price of One - See Hamburg https://eprint.iacr.org/2012/309.pdf
// Calculate inverse of i and square root of s, return QR
pub fn tpo(mut i: &mut FP,mut s: &mut FP) -> isize {
let mut w = FP::new_copy(s);
let mut t = FP::new_copy(i);
w.mul(&i);
t.mul(&w);
let qr=t.invsqrt(&mut i,&mut s);
i.mul(&w);
s.mul(&i);
qr
}
/* return sqrt(this) mod Modulus */
pub fn sqrt(&self,take_hint: Option<&FP>) -> FP {
let e=PM1D2 as isize;
let mut g=FP::new_copy(self);
if let Some(hint) = take_hint {
g.copy(&hint);
} else {
g.progen();
}
let m = BIG::new_ints(&rom::ROI);
let mut v=FP::new_big(&m);
let mut t=FP::new_copy(&g);
t.sqr();
t.mul(self);
let mut r=FP::new_copy(self);
r.mul(&g);
let mut b=FP::new_copy(&t);
for k in (2..=e).rev() //(int k=e;k>1;k--)
{
for _ in 1..k-1 {
b.sqr();
}
let u=!b.isunity() as isize;
g.copy(&r); g.mul(&v);
r.cmove(&g,u);
v.sqr();
g.copy(&t); g.mul(&v);
t.cmove(&g,u);
b.copy(&t);
}
let sgn=r.sign();
let mut nr=FP::new_copy(&r);
nr.neg(); nr.norm();
r.cmove(&nr,sgn);
r
}
}

View File

@ -0,0 +1,651 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::bls48581::big;
use crate::bls48581::big::BIG;
use crate::bls48581::fp::FP;
use crate::bls48581::fp2::FP2;
use crate::bls48581::fp8::FP8;
#[derive(Copy, Clone)]
pub struct FP16 {
a: FP8,
b: FP8,
}
impl std::fmt::Debug for FP16 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl std::fmt::Display for FP16 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl FP16 {
pub const fn new() -> FP16 {
FP16 {
a: FP8::new(),
b: FP8::new(),
}
}
pub fn new_int(a: isize) -> FP16 {
let mut f = FP16::new();
f.a.copy(&FP8::new_int(a));
f.b.zero();
f
}
pub fn new_copy(x: &FP16) -> FP16 {
let mut f = FP16::new();
f.a.copy(&x.a);
f.b.copy(&x.b);
f
}
pub fn new_fp8s(c: &FP8, d: &FP8) -> FP16 {
let mut f = FP16::new();
f.a.copy(c);
f.b.copy(d);
f
}
pub fn new_fp8(c: &FP8) -> FP16 {
let mut f = FP16::new();
f.a.copy(c);
f.b.zero();
f
}
pub fn set_fp8s(&mut self, c: &FP8, d: &FP8) {
self.a.copy(&c);
self.b.copy(&d);
}
pub fn set_fp8(&mut self, c: &FP8) {
self.a.copy(&c);
self.b.zero();
}
pub fn set_fp8h(&mut self, c: &FP8) {
self.b.copy(&c);
self.a.zero();
}
/* reduce components mod Modulus */
pub fn reduce(&mut self) {
self.a.reduce();
self.b.reduce();
}
/* normalise components of w */
pub fn norm(&mut self) {
self.a.norm();
self.b.norm();
}
pub fn cmove(&mut self, g: &FP16, d: isize) {
self.a.cmove(&g.a, d);
self.b.cmove(&g.b, d);
}
/* test self=0 ? */
pub fn iszilch(&self) -> bool {
self.a.iszilch() && self.b.iszilch()
}
pub fn tobytes(&self,bf: &mut [u8]) {
const MB:usize = 4*(big::MODBYTES as usize);
let mut t: [u8; MB] = [0; MB];
self.b.tobytes(&mut t);
for i in 0..MB {
bf[i]=t[i];
}
self.a.tobytes(&mut t);
for i in 0..MB {
bf[i+MB]=t[i];
}
}
pub fn frombytes(bf: &[u8]) -> FP16 {
const MB:usize = 8*(big::MODBYTES as usize);
let mut t: [u8; MB] = [0; MB];
for i in 0..MB {
t[i]=bf[i];
}
let tb=FP8::frombytes(&t);
for i in 0..MB {
t[i]=bf[i+MB];
}
let ta=FP8::frombytes(&t);
FP16::new_fp8s(&ta,&tb)
}
/* test self=1 ? */
pub fn isunity(&self) -> bool {
let one = FP8::new_int(1);
self.a.equals(&one) && self.b.iszilch()
}
/* test is w real? That is in a+ib test b is zero */
pub fn isreal(&mut self) -> bool {
self.b.iszilch()
}
/* extract real part a */
pub fn real(&self) -> FP8 {
FP8::new_copy(&self.a)
}
pub fn geta(&self) -> FP8 {
FP8::new_copy(&self.a)
}
/* extract imaginary part b */
pub fn getb(&self) -> FP8 {
FP8::new_copy(&self.b)
}
/* test self=x */
pub fn equals(&self, x: &FP16) -> bool {
self.a.equals(&x.a) && self.b.equals(&x.b)
}
/* copy self=x */
pub fn copy(&mut self, x: &FP16) {
self.a.copy(&x.a);
self.b.copy(&x.b);
}
/* set self=0 */
pub fn zero(&mut self) {
self.a.zero();
self.b.zero();
}
/* set self=1 */
pub fn one(&mut self) {
self.a.one();
self.b.zero();
}
/* negate self mod Modulus */
pub fn neg(&mut self) {
self.norm();
let mut m = FP8::new_copy(&self.a);
let mut t = FP8::new();
m.add(&self.b);
m.neg();
t.copy(&m);
t.add(&self.b);
self.b.copy(&m);
self.b.add(&self.a);
self.a.copy(&t);
self.norm();
}
/* set to a-ib */
pub fn conj(&mut self) {
self.b.neg();
self.norm();
}
/* self=-conjugate(self) */
pub fn nconj(&mut self) {
self.a.neg();
self.norm();
}
/* self+=a */
pub fn add(&mut self, x: &FP16) {
self.a.add(&x.a);
self.b.add(&x.b);
}
pub fn padd(&mut self, x: &FP8) {
self.a.add(x);
}
pub fn dbl(&mut self) {
self.a.dbl();
self.b.dbl();
}
/* self-=a */
pub fn sub(&mut self, x: &FP16) {
let mut m = FP16::new_copy(x);
m.neg();
self.add(&m);
}
/* this-=x */
pub fn rsub(&mut self, x: &FP16) {
self.neg();
self.add(x);
}
/* self*=s, where s is an FP8 */
pub fn pmul(&mut self, s: &FP8) {
self.a.mul(s);
self.b.mul(s);
}
/* self*=s, where s is an FP2 */
pub fn qmul(&mut self, s: &FP2) {
self.a.qmul(s);
self.b.qmul(s);
}
/* self*=s, where s is an FP2 */
pub fn tmul(&mut self, s: &FP) {
self.a.tmul(s);
self.b.tmul(s);
}
/* self*=i, where i is an int */
pub fn imul(&mut self, c: isize) {
self.a.imul(c);
self.b.imul(c);
}
/* self*=self */
pub fn sqr(&mut self) {
let mut t1 = FP8::new_copy(&self.a);
let mut t2 = FP8::new_copy(&self.b);
let mut t3 = FP8::new_copy(&self.a);
t3.mul(&self.b);
t1.add(&self.b);
t2.times_i();
t2.add(&self.a);
t1.norm();
t2.norm();
self.a.copy(&t1);
self.a.mul(&t2);
t2.copy(&t3);
t2.times_i();
t2.add(&t3);
t2.norm();
t2.neg();
self.a.add(&t2);
t3.dbl();
self.b.copy(&t3);
self.norm();
}
/* self*=y */
pub fn mul(&mut self, y: &FP16) {
let mut t1 = FP8::new_copy(&self.a);
let mut t2 = FP8::new_copy(&self.b);
let mut t3 = FP8::new();
let mut t4 = FP8::new_copy(&self.b);
t1.mul(&y.a);
t2.mul(&y.b);
t3.copy(&y.b);
t3.add(&y.a);
t4.add(&self.a);
t3.norm();
t4.norm();
t4.mul(&t3);
t3.copy(&t1);
t3.neg();
t4.add(&t3);
t4.norm();
t3.copy(&t2);
t3.neg();
self.b.copy(&t4);
self.b.add(&t3);
t2.times_i();
self.a.copy(&t2);
self.a.add(&t1);
self.norm();
}
/* output to hex string */
pub fn tostring(&self) -> String {
format!("[{},{}]", self.a.tostring(), self.b.tostring())
}
/* self=1/self */
pub fn inverse(&mut self) {
let mut t1 = FP8::new_copy(&self.a);
let mut t2 = FP8::new_copy(&self.b);
t1.sqr();
t2.sqr();
t2.times_i();
t2.norm();
t1.sub(&t2);
t1.norm();
t1.inverse(None);
self.a.mul(&t1);
t1.neg();
t1.norm();
self.b.mul(&t1);
}
/* self*=i where i = sqrt(-1+sqrt(-1)) */
pub fn times_i(&mut self) {
let mut s = FP8::new_copy(&self.b);
let t = FP8::new_copy(&self.a);
s.times_i();
self.a.copy(&s);
self.b.copy(&t);
self.norm();
}
pub fn times_i2(&mut self) {
self.a.times_i();
self.b.times_i();
}
pub fn times_i4(&mut self) {
self.a.times_i2();
self.b.times_i2();
}
/* self=self^p using Frobenius */
pub fn frob(&mut self, f: &FP2) {
let mut ff = FP2::new_copy(f);
ff.sqr();
ff.norm();
self.a.frob(&ff);
self.b.frob(&ff);
self.b.qmul(f);
self.b.times_i();
}
/* return this^e */
pub fn pow(&self, e: &BIG) -> FP16 {
let mut w = FP16::new_copy(self);
w.norm();
let mut z = BIG::new_copy(&e);
let mut r = FP16::new_int(1);
z.norm();
loop {
let bt = z.parity();
z.fshr(1);
if bt == 1 {
r.mul(&mut w)
};
if z.iszilch() {
break;
}
w.sqr();
}
r.reduce();
r
}
/* XTR xtr_a function */
/*
pub fn xtr_a(&mut self, w: &FP16, y: &FP16, z: &FP16) {
let mut r = FP16::new_copy(w);
let mut t = FP16::new_copy(w);
r.sub(y);
r.norm();
r.pmul(&self.a);
t.add(y);
t.norm();
t.pmul(&self.b);
t.times_i();
self.copy(&r);
self.add(&t);
self.add(z);
self.norm();
}
*/
/* XTR xtr_d function */
/*
pub fn xtr_d(&mut self) {
let mut w = FP16::new_copy(self);
self.sqr();
w.conj();
w.dbl();
w.norm();
self.sub(&w);
self.reduce();
}
*/
/* r=x^n using XTR method on traces of FP48s */
/*
pub fn xtr_pow(&self, n: &BIG) -> FP16 {
let mut sf = FP16::new_copy(self);
sf.norm();
let mut a = FP16::new_int(3);
let mut b = FP16::new_copy(&sf);
let mut c = FP16::new_copy(&b);
c.xtr_d();
let mut t = FP16::new();
let mut r = FP16::new();
let par = n.parity();
let mut v = BIG::new_copy(n);
v.norm();
v.fshr(1);
if par == 0 {
v.dec(1);
v.norm();
}
let nb = v.nbits();
for i in (0..nb).rev() {
if v.bit(i) != 1 {
t.copy(&b);
sf.conj();
c.conj();
b.xtr_a(&a, &sf, &c);
sf.conj();
c.copy(&t);
c.xtr_d();
a.xtr_d();
} else {
t.copy(&a);
t.conj();
a.copy(&b);
a.xtr_d();
b.xtr_a(&c, &sf, &t);
c.xtr_d();
}
}
if par == 0 {
r.copy(&c)
} else {
r.copy(&b)
}
r.reduce();
return r;
}
*/
/* r=ck^a.cl^n using XTR double exponentiation method on traces of FP48s. See Stam thesis. */
/*
pub fn xtr_pow2(&mut self, ck: &FP16, ckml: &FP16, ckm2l: &FP16, a: &BIG, b: &BIG) -> FP16 {
let mut e = BIG::new_copy(a);
let mut d = BIG::new_copy(b);
let mut w = BIG::new();
d.norm();
e.norm();
let mut cu = FP16::new_copy(ck); // can probably be passed in w/o copying
let mut cv = FP16::new_copy(self);
let mut cumv = FP16::new_copy(ckml);
let mut cum2v = FP16::new_copy(ckm2l);
let mut r = FP16::new();
let mut t = FP16::new();
let mut f2: usize = 0;
while d.parity() == 0 && e.parity() == 0 {
d.fshr(1);
e.fshr(1);
f2 += 1;
}
while BIG::comp(&d, &e) != 0 {
if BIG::comp(&d, &e) > 0 {
w.copy(&e);
w.imul(4);
w.norm();
if BIG::comp(&d, &w) <= 0 {
w.copy(&d);
d.copy(&e);
e.rsub(&w);
e.norm();
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cum2v.copy(&cumv);
cum2v.conj();
cumv.copy(&cv);
cv.copy(&cu);
cu.copy(&t);
} else {
if d.parity() == 0 {
d.fshr(1);
r.copy(&cum2v);
r.conj();
t.copy(&cumv);
t.xtr_a(&cu, &cv, &r);
cum2v.copy(&cumv);
cum2v.xtr_d();
cumv.copy(&t);
cu.xtr_d();
} else {
if e.parity() == 1 {
d.sub(&e);
d.norm();
d.fshr(1);
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cu.xtr_d();
cum2v.copy(&cv);
cum2v.xtr_d();
cum2v.conj();
cv.copy(&t);
} else {
w.copy(&d);
d.copy(&e);
d.fshr(1);
e.copy(&w);
t.copy(&cumv);
t.xtr_d();
cumv.copy(&cum2v);
cumv.conj();
cum2v.copy(&t);
cum2v.conj();
t.copy(&cv);
t.xtr_d();
cv.copy(&cu);
cu.copy(&t);
}
}
}
}
if BIG::comp(&d, &e) < 0 {
w.copy(&d);
w.imul(4);
w.norm();
if BIG::comp(&e, &w) <= 0 {
e.sub(&d);
e.norm();
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cum2v.copy(&cumv);
cumv.copy(&cu);
cu.copy(&t);
} else {
if e.parity() == 0 {
w.copy(&d);
d.copy(&e);
d.fshr(1);
e.copy(&w);
t.copy(&cumv);
t.xtr_d();
cumv.copy(&cum2v);
cumv.conj();
cum2v.copy(&t);
cum2v.conj();
t.copy(&cv);
t.xtr_d();
cv.copy(&cu);
cu.copy(&t);
} else {
if d.parity() == 1 {
w.copy(&e);
e.copy(&d);
w.sub(&d);
w.norm();
d.copy(&w);
d.fshr(1);
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cumv.conj();
cum2v.copy(&cu);
cum2v.xtr_d();
cum2v.conj();
cu.copy(&cv);
cu.xtr_d();
cv.copy(&t);
} else {
d.fshr(1);
r.copy(&cum2v);
r.conj();
t.copy(&cumv);
t.xtr_a(&cu, &cv, &r);
cum2v.copy(&cumv);
cum2v.xtr_d();
cumv.copy(&t);
cu.xtr_d();
}
}
}
}
}
r.copy(&cv);
r.xtr_a(&cu, &cumv, &cum2v);
for _ in 0..f2 {
r.xtr_d()
}
r = r.xtr_pow(&mut d);
return r;
}
*/
}

View File

@ -0,0 +1,516 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::bls48581::big;
use crate::bls48581::big::BIG;
use crate::bls48581::dbig::DBIG;
use crate::bls48581::fp;
use crate::bls48581::fp::FP;
use crate::bls48581::rom;
use crate::rand::RAND;
#[derive(Copy, Clone)]
pub struct FP2 {
a: FP,
b: FP,
}
impl std::fmt::Debug for FP2 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl std::fmt::Display for FP2 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl FP2 {
pub const fn new() -> FP2 {
FP2 {
a: FP::new(),
b: FP::new(),
}
}
pub fn new_int(a: isize) -> FP2 {
let mut f = FP2::new();
f.a.copy(&FP::new_int(a));
f.b.zero();
f
}
pub fn new_ints(a: isize, b: isize) -> FP2 {
let mut f = FP2::new();
f.a.copy(&FP::new_int(a));
f.b.copy(&FP::new_int(b));
f
}
pub fn new_copy(x: &FP2) -> FP2 {
let mut f = FP2::new();
f.a.copy(&x.a);
f.b.copy(&x.b);
f
}
pub fn new_fps(c: &FP, d: &FP) -> FP2 {
let mut f = FP2::new();
f.a.copy(c);
f.b.copy(d);
f
}
pub fn new_bigs(c: &BIG, d: &BIG) -> FP2 {
let mut f = FP2::new();
f.a.copy(&FP::new_big(c));
f.b.copy(&FP::new_big(d));
f
}
pub fn new_fp(c: &FP) -> FP2 {
let mut f = FP2::new();
f.a.copy(c);
f.b.zero();
f
}
pub fn new_big(c: &BIG) -> FP2 {
let mut f = FP2::new();
f.a.copy(&FP::new_big(c));
f.b.zero();
f
}
pub fn new_rand(rng: &mut RAND) -> FP2 {
FP2::new_fps(&FP::new_rand(rng),&FP::new_rand(rng))
}
/* reduce components mod Modulus */
pub fn reduce(&mut self) {
self.a.reduce();
self.b.reduce();
}
/* normalise components of w */
pub fn norm(&mut self) {
self.a.norm();
self.b.norm();
}
/* test self=0 ? */
pub fn iszilch(&self) -> bool {
self.a.iszilch() && self.b.iszilch()
}
pub fn islarger(&self) -> isize {
if self.iszilch() {
return 0;
}
let cmp=self.b.islarger();
if cmp!=0 {
return cmp;
}
self.a.islarger()
}
pub fn tobytes(&self,bf: &mut [u8]) {
const MB:usize = big::MODBYTES as usize;
let mut t: [u8; MB] = [0; MB];
self.b.tobytes(&mut t);
for i in 0..MB {
bf[i]=t[i];
}
self.a.tobytes(&mut t);
for i in 0..MB {
bf[i+MB]=t[i];
}
}
pub fn frombytes(bf: &[u8]) -> FP2 {
const MB:usize = big::MODBYTES as usize;
let mut t: [u8; MB] = [0; MB];
for i in 0..MB {
t[i]=bf[i];
}
let tb=FP::frombytes(&t);
for i in 0..MB {
t[i]=bf[i+MB];
}
let ta=FP::frombytes(&t);
FP2::new_fps(&ta,&tb)
}
pub fn cmove(&mut self, g: &FP2, d: isize) {
self.a.cmove(&g.a, d);
self.b.cmove(&g.b, d);
}
/* test self=1 ? */
pub fn isunity(&self) -> bool {
let one = FP::new_int(1);
self.a.equals(&one) && self.b.iszilch()
}
/* test self=x */
pub fn equals(&self, x: &FP2) -> bool {
self.a.equals(&x.a) && self.b.equals(&x.b)
}
/* extract a */
#[allow(non_snake_case)]
pub fn getA(&mut self) -> FP {
self.a
}
/* extract b */
#[allow(non_snake_case)]
pub fn getB(&mut self) -> FP {
self.b
}
/* extract a */
pub fn geta(&mut self) -> BIG {
self.a.redc()
}
/* extract b */
pub fn getb(&mut self) -> BIG {
self.b.redc()
}
/* copy self=x */
pub fn copy(&mut self, x: &FP2) {
self.a.copy(&x.a);
self.b.copy(&x.b);
}
pub fn set_fp(&mut self, x: &FP) {
self.a.copy(x);
self.b.zero();
}
/* set self=0 */
pub fn zero(&mut self) {
self.a.zero();
self.b.zero();
}
/* set self=1 */
pub fn one(&mut self) {
self.a.one();
self.b.zero();
}
pub fn sign(&self) -> isize {
let mut p1=self.a.sign();
let mut p2=self.b.sign();
if fp::BIG_ENDIAN_SIGN {
let u=self.b.iszilch() as isize;
p2^=(p1^p2)&u;
p2
} else {
let u=self.a.iszilch() as isize;
p1^=(p1^p2)&u;
p1
}
}
/* negate self mod Modulus */
pub fn neg(&mut self) {
let mut m = FP::new_copy(&self.a);
let mut t = FP::new();
m.add(&self.b);
m.neg();
t.copy(&m);
t.add(&self.b);
self.b.copy(&m);
self.b.add(&self.a);
self.a.copy(&t);
}
/* set to a-ib */
pub fn conj(&mut self) {
self.b.neg();
self.b.norm();
}
/* self+=a */
pub fn add(&mut self, x: &FP2) {
self.a.add(&x.a);
self.b.add(&x.b);
}
pub fn dbl(&mut self) {
self.a.dbl();
self.b.dbl();
}
/* self-=a */
pub fn sub(&mut self, x: &FP2) {
let mut m = FP2::new_copy(x);
m.neg();
self.add(&m);
}
/* self=a-self */
pub fn rsub(&mut self, x: &FP2) {
self.neg();
self.add(x);
}
/* self*=s, where s is an FP */
pub fn pmul(&mut self, s: &FP) {
self.a.mul(s);
self.b.mul(s);
}
/* self*=i, where i is an int */
pub fn imul(&mut self, c: isize) {
self.a.imul(c);
self.b.imul(c);
}
/* self*=self */
pub fn sqr(&mut self) {
let mut w1 = FP::new_copy(&self.a);
let mut w3 = FP::new_copy(&self.a);
let mut mb = FP::new_copy(&self.b);
w1.add(&self.b);
w3.add(&self.a);
w3.norm();
self.b.mul(&w3);
mb.neg();
self.a.add(&mb);
w1.norm();
self.a.norm();
self.a.mul(&w1);
}
/* this*=y */
pub fn mul(&mut self, y: &FP2) {
if ((self.a.xes + self.b.xes) as i64) * ((y.a.xes + y.b.xes) as i64) > fp::FEXCESS as i64 {
if self.a.xes > 1 {
self.a.reduce()
}
if self.b.xes > 1 {
self.b.reduce()
}
}
let p = BIG::new_ints(&rom::MODULUS);
let mut pr = DBIG::new();
pr.ucopy(&p);
let mut c = BIG::new_copy(&(self.a.x));
let mut d = BIG::new_copy(&(y.a.x));
let mut a = BIG::mul(&self.a.x, &y.a.x);
let mut b = BIG::mul(&self.b.x, &y.b.x);
c.add(&self.b.x);
c.norm();
d.add(&y.b.x);
d.norm();
let mut e = BIG::mul(&c, &d);
let mut f = DBIG::new_copy(&a);
f.add(&b);
b.rsub(&pr);
a.add(&b);
a.norm();
e.sub(&f);
e.norm();
self.a.x.copy(&FP::modulo(&mut a));
self.a.xes = 3;
self.b.x.copy(&FP::modulo(&mut e));
self.b.xes = 2;
}
/*
pub fn pow(&mut self, e: &BIG) {
let mut w = FP2::new_copy(self);
let mut z = BIG::new_copy(&e);
let mut r = FP2::new_int(1);
loop {
let bt = z.parity();
z.fshr(1);
if bt == 1 {
r.mul(&mut w)
};
if z.iszilch() {
break;
}
w.sqr();
}
r.reduce();
self.copy(&r);
}*/
pub fn qr(&mut self,h:Option<&mut FP>) -> isize {
let mut c=FP2::new_copy(self);
c.conj();
c.mul(self);
c.getA().qr(h)
}
/* sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2)) */
pub fn sqrt(&mut self,h:Option<&FP>) {
if self.iszilch() {
return;
}
let mut w1 = FP::new_copy(&self.b);
let mut w2 = FP::new_copy(&self.a);
let mut w3 = FP::new_copy(&self.a);
let mut w4 = FP::new();
let mut hint = FP::new();
w1.sqr();
w2.sqr();
w1.add(&w2); w1.norm();
w2.copy(&w1.sqrt(h));
w1.copy(&w2);
w2.copy(&self.a);
w2.add(&w1);
w2.norm();
w2.div2();
w1.copy(&self.b); w1.div2();
let qr=w2.qr(Some(&mut hint));
// tweak hint
w3.copy(&hint); w3.neg(); w3.norm();
w4.copy(&w2); w4.neg(); w4.norm();
w2.cmove(&w4,1-qr);
hint.cmove(&w3,1-qr);
self.a.copy(&w2.sqrt(Some(&hint)));
w3.copy(&w2); w3.inverse(Some(&hint));
w3.mul(&self.a);
self.b.copy(&w3); self.b.mul(&w1);
w4.copy(&self.a);
self.a.cmove(&self.b,1-qr);
self.b.cmove(&w4,1-qr);
/*
self.a.copy(&w2.sqrt(Some(&hint)));
w3.copy(&w2); w3.inverse(Some(&hint));
w3.mul(&self.a);
self.b.copy(&w3); self.b.mul(&w1);
hint.neg(); hint.norm();
w2.neg(); w2.norm();
w4.copy(&w2.sqrt(Some(&hint)));
w3.copy(&w2); w3.inverse(Some(&hint));
w3.mul(&w4);
w3.mul(&w1);
self.a.cmove(&w3,1-qr);
self.b.cmove(&w4,1-qr);
*/
let sgn=self.sign();
let mut nr=FP2::new_copy(&self);
nr.neg(); nr.norm();
self.cmove(&nr,sgn);
}
/* output to hex string */
pub fn tostring(&self) -> String {
format!("[{},{}]", self.a.tostring(), self.b.tostring())
}
/* self=1/self */
pub fn inverse(&mut self,h:Option<&FP>) {
self.norm();
let mut w1 = FP::new_copy(&self.a);
let mut w2 = FP::new_copy(&self.b);
w1.sqr();
w2.sqr();
w1.add(&w2);
w1.inverse(h);
self.a.mul(&w1);
w1.neg();
w1.norm();
self.b.mul(&w1);
}
/* self/=2 */
pub fn div2(&mut self) {
self.a.div2();
self.b.div2();
}
/* self*=sqrt(-1) */
pub fn times_i(&mut self) {
let z = FP::new_copy(&self.a);
self.a.copy(&self.b);
self.a.neg();
self.b.copy(&z);
}
/* w*=(1+sqrt(-1)) */
/* where X*2-(1+sqrt(-1)) is irreducible for FP4, assumes p=3 mod 8 */
pub fn mul_ip(&mut self) {
let mut t = FP2::new_copy(self);
let mut i = fp::QNRI;
self.times_i();
while i > 0 {
t.dbl();
t.norm();
i -= 1;
}
self.add(&t);
if fp::TOWER == fp::POSITOWER {
self.norm();
self.neg();
}
}
/* w/=(1+sqrt(-1)) */
pub fn div_ip(&mut self) {
let mut z = FP2::new_ints(1 << fp::QNRI, 1);
z.inverse(None);
self.norm();
self.mul(&z);
if fp::TOWER == fp::POSITOWER {
self.neg();
self.norm();
}
}
}

View File

@ -0,0 +1,784 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::bls48581::big;
use crate::bls48581::big::BIG;
use crate::bls48581::fp;
use crate::bls48581::fp::FP;
use crate::bls48581::fp2::FP2;
use crate::rand::RAND;
#[allow(unused_imports)]
use crate::bls48581::rom;
#[derive(Copy, Clone)]
pub struct FP4 {
a: FP2,
b: FP2,
}
impl std::fmt::Debug for FP4 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl std::fmt::Display for FP4 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl FP4 {
pub const fn new() -> FP4 {
FP4 {
a: FP2::new(),
b: FP2::new(),
}
}
pub fn new_int(a: isize) -> FP4 {
let mut f = FP4::new();
f.a.copy(&FP2::new_int(a));
f.b.zero();
f
}
pub fn new_ints(a: isize,b: isize) -> FP4 {
let mut f = FP4::new();
f.a.copy(&FP2::new_int(a));
f.b.copy(&FP2::new_int(b));
f
}
pub fn new_copy(x: &FP4) -> FP4 {
let mut f = FP4::new();
f.a.copy(&x.a);
f.b.copy(&x.b);
f
}
pub fn new_fp2s(c: &FP2, d: &FP2) -> FP4 {
let mut f = FP4::new();
f.a.copy(c);
f.b.copy(d);
f
}
pub fn new_fp2(c: &FP2) -> FP4 {
let mut f = FP4::new();
f.a.copy(c);
f.b.zero();
f
}
pub fn new_fp(c: &FP) -> FP4 {
let mut f = FP4::new();
f.a.set_fp(c);
f.b.zero();
f
}
pub fn new_rand(rng: &mut RAND) -> FP4 {
FP4::new_fp2s(&FP2::new_rand(rng),&FP2::new_rand(rng))
}
pub fn set_fp2s(&mut self, c: &FP2, d: &FP2) {
self.a.copy(&c);
self.b.copy(&d);
}
pub fn set_fp(&mut self, c: &FP) {
self.a.set_fp(&c);
self.b.zero();
}
pub fn set_fp2(&mut self, c: &FP2) {
self.a.copy(&c);
self.b.zero();
}
pub fn set_fp2h(&mut self, c: &FP2) {
self.b.copy(&c);
self.a.zero();
}
/* reduce components mod Modulus */
pub fn reduce(&mut self) {
self.a.reduce();
self.b.reduce();
}
/* normalise components of w */
pub fn norm(&mut self) {
self.a.norm();
self.b.norm();
}
pub fn cmove(&mut self, g: &FP4, d: isize) {
self.a.cmove(&g.a, d);
self.b.cmove(&g.b, d);
}
/* test self=0 ? */
pub fn iszilch(&self) -> bool {
self.a.iszilch() && self.b.iszilch()
}
pub fn islarger(&self) -> isize {
if self.iszilch() {
0
} else {
let cmp=self.b.islarger();
if cmp!=0 {
cmp
} else {
self.a.islarger()
}
}
}
pub fn tobytes(&self,bf: &mut [u8]) {
const MB:usize = 2*(big::MODBYTES as usize);
let mut t: [u8; MB] = [0; MB];
self.b.tobytes(&mut t);
for i in 0..MB {
bf[i]=t[i];
}
self.a.tobytes(&mut t);
for i in 0..MB {
bf[i+MB]=t[i];
}
}
pub fn frombytes(bf: &[u8]) -> FP4 {
const MB:usize = 2*(big::MODBYTES as usize);
let mut t: [u8; MB] = [0; MB];
for i in 0..MB {
t[i]=bf[i];
}
let tb=FP2::frombytes(&t);
for i in 0..MB {
t[i]=bf[i+MB];
}
let ta=FP2::frombytes(&t);
FP4::new_fp2s(&ta,&tb)
}
/* test self=1 ? */
pub fn isunity(&self) -> bool {
let one = FP2::new_int(1);
self.a.equals(&one) && self.b.iszilch()
}
/* test is w real? That is in a+ib test b is zero */
pub fn isreal(&mut self) -> bool {
self.b.iszilch()
}
/* extract real part a */
pub fn real(&self) -> FP2 {
FP2::new_copy(&self.a)
}
pub fn geta(&self) -> FP2 {
FP2::new_copy(&self.a)
}
/* extract imaginary part b */
pub fn getb(&self) -> FP2 {
FP2::new_copy(&self.b)
}
/* test self=x */
pub fn equals(&self, x: &FP4) -> bool {
self.a.equals(&x.a) && self.b.equals(&x.b)
}
/* copy self=x */
pub fn copy(&mut self, x: &FP4) {
self.a.copy(&x.a);
self.b.copy(&x.b);
}
/* set self=0 */
pub fn zero(&mut self) {
self.a.zero();
self.b.zero();
}
/* set self=1 */
pub fn one(&mut self) {
self.a.one();
self.b.zero();
}
pub fn sign(&self) -> isize {
let mut p1=self.a.sign();
let mut p2=self.b.sign();
if fp::BIG_ENDIAN_SIGN {
let u=self.b.iszilch() as isize;
p2^=(p1^p2)&u;
p2
} else {
let u=self.a.iszilch() as isize;
p1^=(p1^p2)&u;
p1
}
}
/* negate self mod Modulus */
pub fn neg(&mut self) {
self.norm();
let mut m = FP2::new_copy(&self.a);
let mut t = FP2::new();
m.add(&self.b);
m.neg();
t.copy(&m);
t.add(&self.b);
self.b.copy(&m);
self.b.add(&self.a);
self.a.copy(&t);
self.norm();
}
/* set to a-ib */
pub fn conj(&mut self) {
self.b.neg();
self.norm();
}
/* self=-conjugate(self) */
pub fn nconj(&mut self) {
self.a.neg();
self.norm();
}
/* self+=a */
pub fn add(&mut self, x: &FP4) {
self.a.add(&x.a);
self.b.add(&x.b);
}
pub fn padd(&mut self, x: &FP2) {
self.a.add(x);
}
pub fn dbl(&mut self) {
self.a.dbl();
self.b.dbl();
}
/* self-=a */
pub fn sub(&mut self, x: &FP4) {
let mut m = FP4::new_copy(x);
m.neg();
self.add(&m);
}
/* self-=a */
pub fn rsub(&mut self, x: &FP4) {
self.neg();
self.add(x);
}
/* self*=s, where s is an FP2 */
pub fn pmul(&mut self, s: &FP2) {
self.a.mul(s);
self.b.mul(s);
}
/* self*=s, where s is an FP */
pub fn qmul(&mut self, s: &FP) {
self.a.pmul(s);
self.b.pmul(s);
}
/* self*=i, where i is an int */
pub fn imul(&mut self, c: isize) {
self.a.imul(c);
self.b.imul(c);
}
/* self*=self */
pub fn sqr(&mut self) {
let mut t1 = FP2::new_copy(&self.a);
let mut t2 = FP2::new_copy(&self.b);
let mut t3 = FP2::new_copy(&self.a);
t3.mul(&self.b);
t1.add(&self.b);
t2.mul_ip();
t2.add(&self.a);
t1.norm();
t2.norm();
self.a.copy(&t1);
self.a.mul(&t2);
t2.copy(&t3);
t2.mul_ip();
t2.add(&t3);
t2.norm();
t2.neg();
self.a.add(&t2);
t3.dbl();
self.b.copy(&t3);
self.norm();
}
/* self*=y */
pub fn mul(&mut self, y: &FP4) {
//self.norm();
let mut t1 = FP2::new_copy(&self.a);
let mut t2 = FP2::new_copy(&self.b);
let mut t3 = FP2::new();
let mut t4 = FP2::new_copy(&self.b);
t1.mul(&y.a);
t2.mul(&y.b);
t3.copy(&y.b);
t3.add(&y.a);
t4.add(&self.a);
t3.norm();
t4.norm();
t4.mul(&t3);
t3.copy(&t1);
t3.neg();
t4.add(&t3);
t4.norm();
t3.copy(&t2);
t3.neg();
self.b.copy(&t4);
self.b.add(&t3);
t2.mul_ip();
self.a.copy(&t2);
self.a.add(&t1);
self.norm();
}
/* output to hex string */
pub fn tostring(&self) -> String {
format!("[{},{}]", self.a.tostring(), self.b.tostring())
}
/* self=1/self */
pub fn inverse(&mut self,h:Option<&FP>) {
//self.norm();
let mut t1 = FP2::new_copy(&self.a);
let mut t2 = FP2::new_copy(&self.b);
t1.sqr();
t2.sqr();
t2.mul_ip();
t2.norm();
t1.sub(&t2);
t1.inverse(h);
self.a.mul(&t1);
t1.neg();
t1.norm();
self.b.mul(&t1);
}
/* self*=i where i = sqrt(-1+sqrt(-1)) */
pub fn times_i(&mut self) {
let mut t = FP2::new_copy(&self.b);
self.b.copy(&self.a);
t.mul_ip();
self.a.copy(&t);
self.norm();
if fp::TOWER == fp::POSITOWER {
self.neg();
self.norm();
}
}
/* self=self^p using Frobenius */
pub fn frob(&mut self, f: &FP2) {
self.a.conj();
self.b.conj();
self.b.mul(f);
}
/* return this^e */
/*
pub fn pow(&self, e: &BIG) -> FP4 {
let mut w = FP4::new_copy(self);
w.norm();
let mut z = BIG::new_copy(&e);
let mut r = FP4::new_int(1);
z.norm();
loop {
let bt = z.parity();
z.fshr(1);
if bt == 1 {
r.mul(&mut w)
};
if z.iszilch() {
break;
}
w.sqr();
}
r.reduce();
r
}
*/
/* XTR xtr_a function */
pub fn xtr_a(&mut self, w: &FP4, y: &FP4, z: &FP4) {
let mut r = FP4::new_copy(w);
let mut t = FP4::new_copy(w);
r.sub(y);
r.norm();
r.pmul(&self.a);
t.add(y);
t.norm();
t.pmul(&self.b);
t.times_i();
self.copy(&r);
self.add(&t);
self.add(z);
self.norm();
}
/* XTR xtr_d function */
pub fn xtr_d(&mut self) {
let mut w = FP4::new_copy(self);
self.sqr();
w.conj();
w.dbl();
w.norm();
self.sub(&w);
self.reduce();
}
/* r=x^n using XTR method on traces of FP12s */
pub fn xtr_pow(&self, n: &BIG) -> FP4 {
let mut sf = FP4::new_copy(self);
sf.norm();
let mut a = FP4::new_int(3);
let mut b = FP4::new_copy(&sf);
let mut c = FP4::new_copy(&b);
c.xtr_d();
let mut t = FP4::new();
let mut r = FP4::new();
let par = n.parity();
let mut v = BIG::new_copy(n);
v.norm();
v.fshr(1);
if par == 0 {
v.dec(1);
v.norm();
}
let nb = v.nbits();
for i in (0..nb).rev() {
if v.bit(i) != 1 {
t.copy(&b);
sf.conj();
c.conj();
b.xtr_a(&a, &sf, &c);
sf.conj();
c.copy(&t);
c.xtr_d();
a.xtr_d();
} else {
t.copy(&a);
t.conj();
a.copy(&b);
a.xtr_d();
b.xtr_a(&c, &sf, &t);
c.xtr_d();
}
}
if par == 0 {
r.copy(&c)
} else {
r.copy(&b)
}
r.reduce();
r
}
/* r=ck^a.cl^n using XTR double exponentiation method on traces of FP12s. See Stam thesis. */
pub fn xtr_pow2(&mut self, ck: &FP4, ckml: &FP4, ckm2l: &FP4, a: &BIG, b: &BIG) -> FP4 {
let mut e = BIG::new_copy(a);
let mut d = BIG::new_copy(b);
let mut w = BIG::new();
e.norm();
d.norm();
let mut cu = FP4::new_copy(ck); // can probably be passed in w/o copying
let mut cv = FP4::new_copy(self);
let mut cumv = FP4::new_copy(ckml);
let mut cum2v = FP4::new_copy(ckm2l);
let mut r = FP4::new();
let mut t = FP4::new();
let mut f2: usize = 0;
while d.parity() == 0 && e.parity() == 0 {
d.fshr(1);
e.fshr(1);
f2 += 1;
}
while BIG::comp(&d, &e) != 0 {
if BIG::comp(&d, &e) > 0 {
w.copy(&e);
w.imul(4);
w.norm();
if BIG::comp(&d, &w) <= 0 {
w.copy(&d);
d.copy(&e);
e.rsub(&w);
e.norm();
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cum2v.copy(&cumv);
cum2v.conj();
cumv.copy(&cv);
cv.copy(&cu);
cu.copy(&t);
} else if d.parity() == 0 {
d.fshr(1);
r.copy(&cum2v);
r.conj();
t.copy(&cumv);
t.xtr_a(&cu, &cv, &r);
cum2v.copy(&cumv);
cum2v.xtr_d();
cumv.copy(&t);
cu.xtr_d();
} else if e.parity() == 1 {
d.sub(&e);
d.norm();
d.fshr(1);
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cu.xtr_d();
cum2v.copy(&cv);
cum2v.xtr_d();
cum2v.conj();
cv.copy(&t);
} else {
w.copy(&d);
d.copy(&e);
d.fshr(1);
e.copy(&w);
t.copy(&cumv);
t.xtr_d();
cumv.copy(&cum2v);
cumv.conj();
cum2v.copy(&t);
cum2v.conj();
t.copy(&cv);
t.xtr_d();
cv.copy(&cu);
cu.copy(&t);
}
}
if BIG::comp(&d, &e) < 0 {
w.copy(&d);
w.imul(4);
w.norm();
if BIG::comp(&e, &w) <= 0 {
e.sub(&d);
e.norm();
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cum2v.copy(&cumv);
cumv.copy(&cu);
cu.copy(&t);
} else if e.parity() == 0 {
w.copy(&d);
d.copy(&e);
d.fshr(1);
e.copy(&w);
t.copy(&cumv);
t.xtr_d();
cumv.copy(&cum2v);
cumv.conj();
cum2v.copy(&t);
cum2v.conj();
t.copy(&cv);
t.xtr_d();
cv.copy(&cu);
cu.copy(&t);
} else if d.parity() == 1 {
w.copy(&e);
e.copy(&d);
w.sub(&d);
w.norm();
d.copy(&w);
d.fshr(1);
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cumv.conj();
cum2v.copy(&cu);
cum2v.xtr_d();
cum2v.conj();
cu.copy(&cv);
cu.xtr_d();
cv.copy(&t);
} else {
d.fshr(1);
r.copy(&cum2v);
r.conj();
t.copy(&cumv);
t.xtr_a(&cu, &cv, &r);
cum2v.copy(&cumv);
cum2v.xtr_d();
cumv.copy(&t);
cu.xtr_d();
}
}
}
r.copy(&cv);
r.xtr_a(&cu, &cumv, &cum2v);
for _ in 0..f2 {
r.xtr_d()
}
r = r.xtr_pow(&d);
r
}
/* this/=2 */
pub fn div2(&mut self) {
self.a.div2();
self.b.div2();
}
pub fn div_i(&mut self) {
let mut u = FP2::new_copy(&self.a);
let v = FP2::new_copy(&self.b);
u.div_ip();
self.a.copy(&v);
self.b.copy(&u);
if fp::TOWER == fp::POSITOWER {
self.neg();
self.norm();
}
}
/*
pub fn pow(&mut self, e: &BIG) {
let mut w = FP4::new_copy(self);
let mut z = BIG::new_copy(&e);
let mut r = FP4::new_int(1);
loop {
let bt = z.parity();
z.fshr(1);
if bt == 1 {
r.mul(&mut w)
};
if z.iszilch() {
break;
}
w.sqr();
}
r.reduce();
self.copy(&r);
}
*/
/* */
pub fn qr(&mut self,h:Option<&mut FP>) -> isize {
let mut c=FP4::new_copy(self);
c.conj();
c.mul(self);
c.geta().qr(h)
}
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
// returns true if this is QR
pub fn sqrt(&mut self,h:Option<&FP>) {
if self.iszilch() {
return;
}
let mut a = FP2::new_copy(&self.a);
let mut b = FP2::new_copy(&self.a);
let mut s = FP2::new_copy(&self.b);
let mut t = FP2::new_copy(&self.a);
let mut hint = FP::new();
s.sqr();
a.sqr();
s.mul_ip();
s.norm();
a.sub(&s);
s.copy(&a); s.norm();
s.sqrt(h);
a.copy(&t);
a.add(&s);
a.norm();
a.div2();
b.copy(&self.b); b.div2();
let qr=a.qr(Some(&mut hint));
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
s.copy(&a);
let mut twk = FP::new_big(&BIG::new_ints(&rom::TWK));
twk.mul(&hint);
s.div_ip(); s.norm();
a.cmove(&s,1-qr);
hint.cmove(&twk,1-qr);
self.a.copy(&a); self.a.sqrt(Some(&hint));
s.copy(&a); s.inverse(Some(&hint));
s.mul(&self.a);
self.b.copy(&s); self.b.mul(&b);
t.copy(&self.a);
self.a.cmove(&self.b,1-qr);
self.b.cmove(&t,1-qr);
let sgn=self.sign();
let mut nr=FP4::new_copy(&self);
nr.neg(); nr.norm();
self.cmove(&nr,sgn);
}
/* */
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,807 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::bls48581::big;
use crate::bls48581::fp;
use crate::bls48581::fp::FP;
use crate::bls48581::fp2::FP2;
use crate::bls48581::fp4::FP4;
use crate::rand::RAND;
#[allow(unused_imports)]
use crate::bls48581::big::BIG;
#[allow(unused_imports)]
use crate::bls48581::rom;
#[derive(Copy, Clone)]
pub struct FP8 {
a: FP4,
b: FP4,
}
impl std::fmt::Debug for FP8 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl std::fmt::Display for FP8 {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", self.tostring())
}
}
impl FP8 {
pub const fn new() -> FP8 {
FP8 {
a: FP4::new(),
b: FP4::new(),
}
}
pub fn new_int(a: isize) -> FP8 {
let mut f = FP8::new();
f.a.copy(&FP4::new_int(a));
f.b.zero();
f
}
pub fn new_ints(a: isize,b: isize) -> FP8 {
let mut f = FP8::new();
f.a.copy(&FP4::new_int(a));
f.b.copy(&FP4::new_int(b));
f
}
pub fn new_copy(x: &FP8) -> FP8 {
let mut f = FP8::new();
f.a.copy(&x.a);
f.b.copy(&x.b);
f
}
pub fn new_fp4s(c: &FP4, d: &FP4) -> FP8 {
let mut f = FP8::new();
f.a.copy(c);
f.b.copy(d);
f
}
pub fn new_fp4(c: &FP4) -> FP8 {
let mut f = FP8::new();
f.a.copy(c);
f.b.zero();
f
}
pub fn new_fp(c: &FP) -> FP8 {
let mut f = FP8::new();
f.a.set_fp(c);
f.b.zero();
f
}
pub fn new_rand(rng: &mut RAND) -> FP8 {
FP8::new_fp4s(&FP4::new_rand(rng),&FP4::new_rand(rng))
}
pub fn set_fp4s(&mut self, c: &FP4, d: &FP4) {
self.a.copy(&c);
self.b.copy(&d);
}
pub fn set_fp4(&mut self, c: &FP4) {
self.a.copy(&c);
self.b.zero();
}
pub fn set_fp4h(&mut self, c: &FP4) {
self.b.copy(&c);
self.a.zero();
}
/* reduce components mod Modulus */
pub fn reduce(&mut self) {
self.a.reduce();
self.b.reduce();
}
/* normalise components of w */
pub fn norm(&mut self) {
self.a.norm();
self.b.norm();
}
pub fn cmove(&mut self, g: &FP8, d: isize) {
self.a.cmove(&g.a, d);
self.b.cmove(&g.b, d);
}
/* test self=0 ? */
pub fn iszilch(&self) -> bool {
self.a.iszilch() && self.b.iszilch()
}
pub fn islarger(&self) -> isize {
if self.iszilch() {
return 0;
}
let cmp=self.b.islarger();
if cmp!=0 {
return cmp;
}
self.a.islarger()
}
pub fn tobytes(&self,bf: &mut [u8]) {
const MB:usize = 4*(big::MODBYTES as usize);
let mut t: [u8; MB] = [0; MB];
self.b.tobytes(&mut t);
for i in 0..MB {
bf[i]=t[i];
}
self.a.tobytes(&mut t);
for i in 0..MB {
bf[i+MB]=t[i];
}
}
pub fn frombytes(bf: &[u8]) -> FP8 {
const MB:usize = 4*(big::MODBYTES as usize);
let mut t: [u8; MB] = [0; MB];
for i in 0..MB {
t[i]=bf[i];
}
let tb=FP4::frombytes(&t);
for i in 0..MB {
t[i]=bf[i+MB];
}
let ta=FP4::frombytes(&t);
FP8::new_fp4s(&ta,&tb)
}
/* test self=1 ? */
pub fn isunity(&self) -> bool {
let one = FP4::new_int(1);
self.a.equals(&one) && self.b.iszilch()
}
/* test is w real? That is in a+ib test b is zero */
pub fn isreal(&mut self) -> bool {
self.b.iszilch()
}
/* extract real part a */
pub fn real(&self) -> FP4 {
FP4::new_copy(&self.a)
}
pub fn geta(&self) -> FP4 {
FP4::new_copy(&self.a)
}
/* extract imaginary part b */
pub fn getb(&self) -> FP4 {
FP4::new_copy(&self.b)
}
/* test self=x */
pub fn equals(&self, x: &FP8) -> bool {
self.a.equals(&x.a) && self.b.equals(&x.b)
}
/* copy self=x */
pub fn copy(&mut self, x: &FP8) {
self.a.copy(&x.a);
self.b.copy(&x.b);
}
/* set self=0 */
pub fn zero(&mut self) {
self.a.zero();
self.b.zero();
}
/* set self=1 */
pub fn one(&mut self) {
self.a.one();
self.b.zero();
}
pub fn sign(&self) -> isize {
let mut p1=self.a.sign();
let mut p2=self.b.sign();
if fp::BIG_ENDIAN_SIGN {
let u=self.b.iszilch() as isize;
p2^=(p1^p2)&u;
return p2;
} else {
let u=self.a.iszilch() as isize;
p1^=(p1^p2)&u;
return p1;
}
}
/* negate self mod Modulus */
pub fn neg(&mut self) {
self.norm();
let mut m = FP4::new_copy(&self.a);
let mut t = FP4::new();
m.add(&self.b);
m.neg();
t.copy(&m);
t.add(&self.b);
self.b.copy(&m);
self.b.add(&self.a);
self.a.copy(&t);
self.norm();
}
/* set to a-ib */
pub fn conj(&mut self) {
self.b.neg();
self.norm();
}
/* self=-conjugate(self) */
pub fn nconj(&mut self) {
self.a.neg();
self.norm();
}
/* self+=a */
pub fn add(&mut self, x: &FP8) {
self.a.add(&x.a);
self.b.add(&x.b);
}
pub fn padd(&mut self, x: &FP4) {
self.a.add(x);
}
pub fn dbl(&mut self) {
self.a.dbl();
self.b.dbl();
}
/* self-=a */
pub fn sub(&mut self, x: &FP8) {
let mut m = FP8::new_copy(x);
m.neg();
self.add(&m);
}
/* this-=x */
pub fn rsub(&mut self, x: &FP8) {
self.neg();
self.add(x);
}
/* self*=s, where s is an FP4 */
pub fn pmul(&mut self, s: &FP4) {
self.a.mul(s);
self.b.mul(s);
}
/* self*=s, where s is an FP2 */
pub fn qmul(&mut self, s: &FP2) {
self.a.pmul(s);
self.b.pmul(s);
}
/* self*=s, where s is an FP */
pub fn tmul(&mut self, s: &FP) {
self.a.qmul(s);
self.b.qmul(s);
}
/* self*=i, where i is an int */
pub fn imul(&mut self, c: isize) {
self.a.imul(c);
self.b.imul(c);
}
/* self*=self */
pub fn sqr(&mut self) {
let mut t1 = FP4::new_copy(&self.a);
let mut t2 = FP4::new_copy(&self.b);
let mut t3 = FP4::new_copy(&self.a);
t3.mul(&self.b);
t1.add(&self.b);
t2.times_i();
t2.add(&self.a);
t1.norm();
t2.norm();
self.a.copy(&t1);
self.a.mul(&t2);
t2.copy(&t3);
t2.times_i();
t2.add(&t3);
t2.norm();
t2.neg();
self.a.add(&t2);
t3.dbl();
self.b.copy(&t3);
self.norm();
}
/* self*=y */
pub fn mul(&mut self, y: &FP8) {
//self.norm();
let mut t1 = FP4::new_copy(&self.a);
let mut t2 = FP4::new_copy(&self.b);
let mut t3 = FP4::new();
let mut t4 = FP4::new_copy(&self.b);
t1.mul(&y.a);
t2.mul(&y.b);
t3.copy(&y.b);
t3.add(&y.a);
t4.add(&self.a);
t3.norm();
t4.norm();
t4.mul(&t3);
t3.copy(&t1);
t3.neg();
t4.add(&t3);
t4.norm();
t3.copy(&t2);
t3.neg();
self.b.copy(&t4);
self.b.add(&t3);
t2.times_i();
self.a.copy(&t2);
self.a.add(&t1);
self.norm();
}
/* output to hex string */
pub fn tostring(&self) -> String {
format!("[{},{}]", self.a.tostring(), self.b.tostring())
}
/* self=1/self */
pub fn inverse(&mut self,h:Option<&FP>) {
//self.norm();
let mut t1 = FP4::new_copy(&self.a);
let mut t2 = FP4::new_copy(&self.b);
t1.sqr();
t2.sqr();
t2.times_i();
t2.norm();
t1.sub(&t2);
t1.norm();
t1.inverse(h);
self.a.mul(&t1);
t1.neg();
t1.norm();
self.b.mul(&t1);
}
/* self*=i */
pub fn times_i(&mut self) {
let mut s = FP4::new_copy(&self.b);
let t = FP4::new_copy(&self.a);
s.times_i();
self.a.copy(&s);
self.b.copy(&t);
self.norm();
if fp::TOWER == fp::POSITOWER {
self.neg();
self.norm();
}
}
pub fn times_i2(&mut self) {
self.a.times_i();
self.b.times_i();
}
/* self=self^p using Frobenius */
pub fn frob(&mut self, f: &FP2) {
let mut ff = FP2::new_copy(f);
ff.sqr();
ff.mul_ip();
ff.norm();
self.a.frob(&ff);
self.b.frob(&ff);
self.b.pmul(f);
self.b.times_i();
}
/* return this^e */
/*
pub fn pow(&self, e: &BIG) -> FP8 {
let mut w = FP8::new_copy(self);
w.norm();
let mut z = BIG::new_copy(&e);
let mut r = FP8::new_int(1);
z.norm();
loop {
let bt = z.parity();
z.fshr(1);
if bt == 1 {
r.mul(&mut w)
};
if z.iszilch() {
break;
}
w.sqr();
}
r.reduce();
return r;
}
*/
/* XTR xtr_a function */
/*
pub fn xtr_a(&mut self, w: &FP8, y: &FP8, z: &FP8) {
let mut r = FP8::new_copy(w);
let mut t = FP8::new_copy(w);
r.sub(y);
r.norm();
r.pmul(&self.a);
t.add(y);
t.norm();
t.pmul(&self.b);
t.times_i();
self.copy(&r);
self.add(&t);
self.add(z);
self.norm();
}
*/
/* XTR xtr_d function */
/*
pub fn xtr_d(&mut self) {
let mut w = FP8::new_copy(self);
self.sqr();
w.conj();
w.dbl();
w.norm();
self.sub(&w);
self.reduce();
}
*/
/* r=x^n using XTR method on traces of FP24s */
/*
pub fn xtr_pow(&self, n: &BIG) -> FP8 {
let mut sf = FP8::new_copy(self);
sf.norm();
let mut a = FP8::new_int(3);
let mut b = FP8::new_copy(&sf);
let mut c = FP8::new_copy(&b);
c.xtr_d();
let mut t = FP8::new();
let mut r = FP8::new();
let par = n.parity();
let mut v = BIG::new_copy(n);
v.norm();
v.fshr(1);
if par == 0 {
v.dec(1);
v.norm();
}
let nb = v.nbits();
for i in (0..nb).rev() {
if v.bit(i) != 1 {
t.copy(&b);
sf.conj();
c.conj();
b.xtr_a(&a, &sf, &c);
sf.conj();
c.copy(&t);
c.xtr_d();
a.xtr_d();
} else {
t.copy(&a);
t.conj();
a.copy(&b);
a.xtr_d();
b.xtr_a(&c, &sf, &t);
c.xtr_d();
}
}
if par == 0 {
r.copy(&c)
} else {
r.copy(&b)
}
r.reduce();
return r;
}
*/
/* r=ck^a.cl^n using XTR double exponentiation method on traces of FP24s. See Stam thesis. */
/*
pub fn xtr_pow2(&mut self, ck: &FP8, ckml: &FP8, ckm2l: &FP8, a: &BIG, b: &BIG) -> FP8 {
let mut e = BIG::new_copy(a);
let mut d = BIG::new_copy(b);
let mut w = BIG::new();
e.norm();
d.norm();
let mut cu = FP8::new_copy(ck); // can probably be passed in w/o copying
let mut cv = FP8::new_copy(self);
let mut cumv = FP8::new_copy(ckml);
let mut cum2v = FP8::new_copy(ckm2l);
let mut r = FP8::new();
let mut t = FP8::new();
let mut f2: usize = 0;
while d.parity() == 0 && e.parity() == 0 {
d.fshr(1);
e.fshr(1);
f2 += 1;
}
while BIG::comp(&d, &e) != 0 {
if BIG::comp(&d, &e) > 0 {
w.copy(&e);
w.imul(4);
w.norm();
if BIG::comp(&d, &w) <= 0 {
w.copy(&d);
d.copy(&e);
e.rsub(&w);
e.norm();
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cum2v.copy(&cumv);
cum2v.conj();
cumv.copy(&cv);
cv.copy(&cu);
cu.copy(&t);
} else {
if d.parity() == 0 {
d.fshr(1);
r.copy(&cum2v);
r.conj();
t.copy(&cumv);
t.xtr_a(&cu, &cv, &r);
cum2v.copy(&cumv);
cum2v.xtr_d();
cumv.copy(&t);
cu.xtr_d();
} else {
if e.parity() == 1 {
d.sub(&e);
d.norm();
d.fshr(1);
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cu.xtr_d();
cum2v.copy(&cv);
cum2v.xtr_d();
cum2v.conj();
cv.copy(&t);
} else {
w.copy(&d);
d.copy(&e);
d.fshr(1);
e.copy(&w);
t.copy(&cumv);
t.xtr_d();
cumv.copy(&cum2v);
cumv.conj();
cum2v.copy(&t);
cum2v.conj();
t.copy(&cv);
t.xtr_d();
cv.copy(&cu);
cu.copy(&t);
}
}
}
}
if BIG::comp(&d, &e) < 0 {
w.copy(&d);
w.imul(4);
w.norm();
if BIG::comp(&e, &w) <= 0 {
e.sub(&d);
e.norm();
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cum2v.copy(&cumv);
cumv.copy(&cu);
cu.copy(&t);
} else {
if e.parity() == 0 {
w.copy(&d);
d.copy(&e);
d.fshr(1);
e.copy(&w);
t.copy(&cumv);
t.xtr_d();
cumv.copy(&cum2v);
cumv.conj();
cum2v.copy(&t);
cum2v.conj();
t.copy(&cv);
t.xtr_d();
cv.copy(&cu);
cu.copy(&t);
} else {
if d.parity() == 1 {
w.copy(&e);
e.copy(&d);
w.sub(&d);
w.norm();
d.copy(&w);
d.fshr(1);
t.copy(&cv);
t.xtr_a(&cu, &cumv, &cum2v);
cumv.conj();
cum2v.copy(&cu);
cum2v.xtr_d();
cum2v.conj();
cu.copy(&cv);
cu.xtr_d();
cv.copy(&t);
} else {
d.fshr(1);
r.copy(&cum2v);
r.conj();
t.copy(&cumv);
t.xtr_a(&cu, &cv, &r);
cum2v.copy(&cumv);
cum2v.xtr_d();
cumv.copy(&t);
cu.xtr_d();
}
}
}
}
}
r.copy(&cv);
r.xtr_a(&cu, &cumv, &cum2v);
for _ in 0..f2 {
r.xtr_d()
}
r = r.xtr_pow(&mut d);
return r;
}
*/
/* this/=2 */
pub fn div2(&mut self) {
self.a.div2();
self.b.div2();
}
pub fn div_i(&mut self) {
let mut u = FP4::new_copy(&self.a);
let v = FP4::new_copy(&self.b);
u.div_i();
self.a.copy(&v);
self.b.copy(&u);
if fp::TOWER == fp::POSITOWER {
self.neg();
self.norm();
}
}
/*
pub fn pow(&mut self, e: &BIG) {
let mut w = FP8::new_copy(self);
let mut z = BIG::new_copy(&e);
let mut r = FP8::new_int(1);
loop {
let bt = z.parity();
z.fshr(1);
if bt == 1 {
r.mul(&mut w)
};
if z.iszilch() {
break;
}
w.sqr();
}
r.reduce();
self.copy(&r);
}*/
/* */
pub fn qr(&mut self,h:Option<&mut FP>) -> isize {
let mut c=FP8::new_copy(self);
c.conj();
c.mul(self);
return c.geta().qr(h);
}
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
// returns true if this is QR
pub fn sqrt(&mut self,h:Option<&FP>) {
if self.iszilch() {
return;
}
let mut a = FP4::new_copy(&self.a);
let mut b = FP4::new_copy(&self.a);
let mut s = FP4::new_copy(&self.b);
let mut t = FP4::new_copy(&self.a);
let mut hint = FP::new();
s.sqr();
a.sqr();
s.times_i();
s.norm();
a.sub(&s);
s.copy(&a); s.norm();
s.sqrt(h);
a.copy(&t);
a.add(&s);
a.norm();
a.div2();
b.copy(&self.b); b.div2();
let qr=a.qr(Some(&mut hint));
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
s.copy(&a);
let mut twk = FP::new_big(&BIG::new_ints(&rom::TWK));
twk.mul(&hint);
s.div_i(); s.norm();
a.cmove(&s,1-qr);
hint.cmove(&twk,1-qr);
self.a.copy(&a); self.a.sqrt(Some(&hint));
s.copy(&a); s.inverse(Some(&hint));
s.mul(&self.a);
self.b.copy(&s); self.b.mul(&b);
t.copy(&self.a);
self.a.cmove(&self.b,1-qr);
self.b.cmove(&t,1-qr);
let sgn=self.sign();
let mut nr=FP8::new_copy(&self);
nr.neg(); nr.norm();
self.cmove(&nr,sgn);
}
/* */
}

View File

@ -0,0 +1,386 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::bls48581::ecp;
use crate::bls48581::ecdh;
use crate::hmac;
use crate::rand::RAND;
const GROUP: usize = ecdh::EGS;
const POINT: usize = 2*ecdh::EFS+1;
const MAX_LABEL: usize = 20; // may need adjustment
#[allow(non_snake_case)]
fn reverse (x: &mut [u8]) {
let lx=x.len();
for i in 0..lx/2 {
let ch=x[i];
x[i]=x[lx-i-1];
x[lx-i-1]=ch;
}
}
/*
fn printbinary(array: &[u8]) {
for i in 0..array.len() {
print!("{:02X}", array[i])
}
println!("")
}
*/
#[allow(non_snake_case)]
fn labeledExtract(prk: &mut [u8],salt: Option<&[u8]>,suite_id: &[u8],label: &str,ikm: Option<&[u8]>) {
let rfc="HPKE-v1";
let prefix1=rfc.as_bytes();
let prefix2=label.as_bytes();
let mut likm: [u8; 18+MAX_LABEL+2*POINT] = [0; 18+MAX_LABEL+2*POINT];
let mut k=0;
for i in 0..prefix1.len() {
likm[k]=prefix1[i];
k+=1;
}
for i in 0..suite_id.len() {
likm[k]=suite_id[i];
k+=1;
}
for i in 0..prefix2.len() {
likm[k]=prefix2[i];
k+=1;
}
if let Some(sikm) = ikm {
for i in 0..sikm.len() {
likm[k]=sikm[i];
k+=1;
}
}
hmac::hkdf_extract(hmac::MC_SHA2,ecp::HASH_TYPE,prk,salt,&likm[0..k]);
}
#[allow(non_snake_case)]
fn labeledExpand(okm: &mut [u8],prk: &[u8],suite_id: &[u8],label: &str,info: Option<&[u8]>,el: usize) {
let mut ar: [u8; 2] = [0; 2];
let rfc="HPKE-v1";
let prefix1=rfc.as_bytes();
let prefix2=label.as_bytes();
hmac::inttobytes(el,&mut ar);
let mut linfo: [u8; 20+MAX_LABEL+3*POINT] = [0; 20+MAX_LABEL+3*POINT];
linfo[0]=ar[0];
linfo[1]=ar[1];
let mut k=2;
for i in 0..prefix1.len() {
linfo[k]=prefix1[i];
k+=1;
}
for i in 0..suite_id.len() {
linfo[k]=suite_id[i];
k+=1;
}
for i in 0..prefix2.len() {
linfo[k]=prefix2[i];
k+=1;
}
if let Some(sinfo) = info {
for i in 0..sinfo.len() {
linfo[k]=sinfo[i];
k+=1;
}
}
hmac:: hkdf_expand(hmac::MC_SHA2,ecp::HASH_TYPE,okm,el,prk,&linfo[0..k]);
}
#[allow(non_snake_case)]
fn extractAndExpand(config_id: usize,okm: &mut [u8],dh: &[u8],context: &[u8]) {
let kem = config_id&255;
let txt="KEM";
let mut suite_id: [u8;5] = [0;5];
let mut kem_id: [u8; 2] = [0; 2];
let ckem=txt.as_bytes();
hmac::inttobytes(kem,&mut kem_id);
let mut k=0;
for i in 0..ckem.len() {
suite_id[k]=ckem[i];
k+=1;
}
suite_id[k]=kem_id[0]; k+=1;
suite_id[k]=kem_id[1];
let mut prk: [u8;ecp::HASH_TYPE]=[0;ecp::HASH_TYPE];
labeledExtract(&mut prk,None,&suite_id,"eae_prk",Some(dh));
labeledExpand(okm,&prk,&suite_id,"shared_secret",Some(&context),ecp::HASH_TYPE);
}
#[allow(non_snake_case)]
pub fn deriveKeyPair(config_id: usize,mut sk: &mut [u8],mut pk: &mut [u8],seed: &[u8]) -> bool {
let mut counter=0;
let kem = config_id&255;
let txt="KEM";
let mut suite_id: [u8;5] = [0;5];
let mut kem_id: [u8; 2] = [0; 2];
let ckem=txt.as_bytes();
hmac::inttobytes(kem,&mut kem_id);
let mut k=0;
for i in 0..ckem.len() {
suite_id[k]=ckem[i];
k+=1;
}
suite_id[k]=kem_id[0]; k+=1;
suite_id[k]=kem_id[1];
let mut prk: [u8;ecp::HASH_TYPE]=[0;ecp::HASH_TYPE];
labeledExtract(&mut prk,None,&suite_id,"dkp_prk",Some(&seed));
//println!("prk= {:02X?}",prk);
if kem==32 || kem==33 { // RFC7748
labeledExpand(&mut sk,&prk,&suite_id,"sk",None,GROUP);
reverse(&mut sk);
if kem==32 {
sk[GROUP-1]&=248;
sk[0]&=127;
sk[0]|=64;
} else {
sk[GROUP-1]&=252;
sk[0]|=128;
}
} else {
let mut bit_mask=0xff;
if kem==18 {
bit_mask=1;
}
for i in 0..GROUP {
sk[i]=0;
}
while !ecdh::in_range(&sk) && counter<256 {
let mut info: [u8;1]=[0;1];
info[0]=counter as u8;
labeledExpand(sk,&prk,&suite_id,"candidate",Some(&info),GROUP);
sk[0] &= bit_mask as u8;
counter += 1;
}
}
//for i in 0..sk.len() {
// print!({}
// println!("SK= {:02X?}",sk);
// println!("kem= {}",kem);
//println!("counter= {}",counter);
ecdh::key_pair_generate(None::<&mut RAND>, &mut sk, &mut pk);
if kem==32 || kem==33 {
reverse(&mut pk);
}
counter<256
}
#[allow(non_snake_case)]
pub fn encap(config_id: usize,skE: &[u8],z: &mut [u8],pkE: &[u8],pkR: &[u8]) {
let pklen=pkE.len();
let mut dh: [u8; ecdh::EFS] = [0; ecdh::EFS];
let mut kemcontext: [u8; 2*POINT] = [0;2*POINT];
let kem = config_id&255;
let mut rev: [u8; POINT]=[0; POINT];
if kem==32 || kem==33 {
for i in 0..pklen {
rev[i]=pkR[i];
}
reverse(&mut rev[0..pklen]);
ecdh::ecpsvdp_dh(&skE, &rev[0..pklen], &mut dh, 0);
reverse(&mut dh[0..pklen]);
} else {
ecdh::ecpsvdp_dh(&skE, &pkR, &mut dh, 0);
}
let mut k=0;
for i in 0..pklen {
kemcontext[k]=pkE[i];
k+=1;
}
for i in 0..pklen {
kemcontext[k]=pkR[i];
k+=1;
}
//print!("e dh= "); printbinary(&dh[0..pklen]);
extractAndExpand(config_id,z,&dh,&kemcontext[0..k]);
}
#[allow(non_snake_case)]
pub fn decap(config_id: usize,skR: &[u8],z: &mut [u8],pkE: &[u8],pkR: &[u8]) {
let pklen=pkE.len();
let mut dh: [u8; ecdh::EFS] = [0; ecdh::EFS];
let mut kemcontext: [u8; 2*POINT] = [0;2*POINT];
let mut rev: [u8; POINT]=[0; POINT];
let kem = config_id&255;
if kem==32 || kem==33 {
for i in 0..pklen {
rev[i]=pkE[i];
}
reverse(&mut rev[0..pklen]);
ecdh::ecpsvdp_dh(&skR, &rev[0..pklen], &mut dh, 0);
reverse(&mut dh[0..pklen]);
} else {
ecdh::ecpsvdp_dh(&skR, &pkE, &mut dh, 0);
}
let mut k=0;
for i in 0..pklen {
kemcontext[k]=pkE[i];
k+=1;
}
for i in 0..pklen { // not a mistake
kemcontext[k]=pkR[i];
k+=1;
}
//print!("d dh= "); printbinary(&dh[0..pklen]);
extractAndExpand(config_id,z,&dh,&kemcontext[0..k]);
}
#[allow(non_snake_case)]
pub fn authencap(config_id: usize,skE: &[u8],skS: &[u8],z: &mut [u8],pkE: &[u8],pkR: &[u8],pkS: &[u8]) {
let mut dh: [u8; 2*ecdh::EFS] = [0; 2*ecdh::EFS];
let mut dh1: [u8; ecdh::EFS] = [0; ecdh::EFS];
let mut kemcontext: [u8; 3*POINT] = [0;3*POINT];
let kem = config_id&255;
let pklen=pkE.len();
let mut rev: [u8; POINT]=[0; POINT];
if kem==32 || kem==33 {
for i in 0..pklen {
rev[i]=pkR[i];
}
reverse(&mut rev[0..pklen]);
ecdh::ecpsvdp_dh(&skE, &rev[0..pklen], &mut dh, 0);
ecdh::ecpsvdp_dh(&skS, &rev[0..pklen], &mut dh1, 0);
reverse(&mut dh[0..pklen]);
reverse(&mut dh1[0..pklen]);
} else {
ecdh::ecpsvdp_dh(&skE, &pkR, &mut dh, 0);
ecdh::ecpsvdp_dh(&skS, &pkR, &mut dh1, 0);
}
for i in 0..ecdh::EFS {
dh[i+ecdh::EFS] = dh1[i];
}
for i in 0..pklen {
kemcontext[i]=pkE[i];
kemcontext[pklen+i]= pkR[i];
kemcontext[2*pklen+i]= pkS[i];
}
//print!("e dh= "); printbinary(&dh[0..pklen]);
//print!("e kemcontext= "); printbinary(&kemcontext[0..3*pklen]);
extractAndExpand(config_id,z,&dh,&kemcontext[0..3*pklen]);
}
#[allow(non_snake_case)]
pub fn authdecap(config_id: usize,skR: &[u8],z: &mut [u8],pkE: &[u8],pkR: &[u8],pkS: &[u8]) {
let mut dh: [u8; 2*ecdh::EFS] = [0; 2*ecdh::EFS];
let mut dh1: [u8; ecdh::EFS] = [0; ecdh::EFS];
let mut kemcontext: [u8; 3*POINT] = [0;3*POINT];
let kem = config_id&255;
let pklen=pkE.len();
let mut rev: [u8; POINT]=[0; POINT];
if kem==32 || kem==33 {
for i in 0..pklen {
rev[i]=pkE[i];
}
reverse(&mut rev[0..pklen]);
ecdh::ecpsvdp_dh(&skR, &rev[0..pklen], &mut dh, 0);
for i in 0..pklen {
rev[i]=pkS[i];
}
reverse(&mut rev[0..pklen]);
ecdh::ecpsvdp_dh(&skR, &rev[0..pklen], &mut dh1, 0);
reverse(&mut dh[0..pklen]);
reverse(&mut dh1[0..pklen]);
} else {
ecdh::ecpsvdp_dh(&skR, &pkE, &mut dh, 0);
ecdh::ecpsvdp_dh(&skR, &pkS, &mut dh1, 0);
}
for i in 0..ecdh::EFS {
dh[i+ecdh::EFS] = dh1[i];
}
for i in 0..pklen {
kemcontext[i]=pkE[i];
kemcontext[pklen+i]= pkR[i];
kemcontext[2*pklen+i]= pkS[i];
}
//print!("d dh= "); printbinary(&dh[0..pklen]);
//print!("d kemcontext= "); printbinary(&kemcontext[0..3*pklen]);
extractAndExpand(config_id,z,&dh,&kemcontext[0..3*pklen]);
}
#[allow(non_snake_case)]
pub fn keyschedule(config_id: usize,key: &mut [u8],nonce: &mut [u8],exp_secret: &mut [u8],mode: usize,z: &mut [u8],info: &[u8],psk: Option<&[u8]>,pskID: Option<&[u8]>) {
let mut context: [u8; 1+2*ecp::HASH_TYPE] = [0; 1+2*ecp::HASH_TYPE];
let kem=config_id&255;
let kdf=(config_id>>8)&3;
let aead=(config_id>>10)&3;
let txt="HPKE";
let ckem=txt.as_bytes();
let mut suite_id: [u8;10] = [0;10];
let mut num: [u8; 2] = [0; 2];
let mut k=0;
for i in 0..ckem.len() {
suite_id[k]=ckem[i];
k+=1;
}
hmac::inttobytes(kem,&mut num);
suite_id[k]=num[0]; k+=1;
suite_id[k]=num[1]; k+=1;
hmac::inttobytes(kdf,&mut num);
suite_id[k]=num[0]; k+=1;
suite_id[k]=num[1]; k+=1;
hmac::inttobytes(aead,&mut num);
suite_id[k]=num[0]; k+=1;
suite_id[k]=num[1];
let mut k=0;
let mut h: [u8; 64] = [0; 64];
let mut secret: [u8; 64] = [0; 64];
context[k]=mode as u8; k+=1;
labeledExtract(&mut h,None,&suite_id,"psk_id_hash",pskID);
for i in 0..ecp::HASH_TYPE {
context[k] = h[i]; k+=1;
}
labeledExtract(&mut h,None,&suite_id,"info_hash",Some(&info));
for i in 0..ecp::HASH_TYPE {
context[k] = h[i]; k+=1;
}
//labeledExtract(&mut h,None,&suite_id,"psk_hash",psk);
//labeledExtract(&mut secret,Some(&h),&suite_id,"secret",Some(z));
labeledExtract(&mut secret,Some(z),&suite_id,"secret",psk);
labeledExpand(key,&secret,&suite_id,"key",Some(&context[0..k]),ecp::AESKEY);
labeledExpand(nonce,&secret,&suite_id,"base_nonce",Some(&context[0..k]),12);
labeledExpand(exp_secret,&secret,&suite_id,"exp",Some(&context[0..k]),ecp::HASH_TYPE);
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pub mod big;
pub mod bls256;
pub mod dbig;
pub mod ecp;
pub mod ecp8;
pub mod fp;
pub mod fp16;
pub mod fp2;
pub mod fp4;
pub mod fp48;
pub mod fp8;
pub mod mpin256;
pub mod pair8;
pub mod rom;

View File

@ -0,0 +1,227 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::bls48581::big;
use crate::bls48581::big::BIG;
use crate::bls48581::ecp;
use crate::bls48581::ecp::ECP;
use crate::bls48581::ecp8::ECP8;
use crate::bls48581::fp48::FP48;
use crate::bls48581::pair8;
use crate::bls48581::rom;
use crate::bls48581::fp::FP;
use crate::bls48581::dbig::DBIG;
use crate::hmac;
use crate::rand::RAND;
/* MPIN 256-bit API Functions */
/* Configure mode of operation */
pub const EFS: usize = big::MODBYTES as usize;
pub const EGS: usize = big::MODBYTES as usize;
pub const BAD_PARAMS: isize = -11;
pub const INVALID_POINT: isize = -14;
pub const WRONG_ORDER: isize = -18;
pub const BAD_PIN: isize = -19;
pub const SHA256: usize = 32;
pub const SHA384: usize = 48;
pub const SHA512: usize = 64;
/* Configure your PIN here */
pub const MAXPIN: i32 = 10000; /* PIN less than this */
pub const PBLEN: i32 = 14; /* Number of bits in PIN */
fn ceil(a: usize,b: usize) -> usize {
(a-1)/b+1
}
#[allow(non_snake_case)]
pub fn encode_to_curve(dst: &[u8],id: &[u8],hcid: &mut [u8]) {
let q = BIG::new_ints(&rom::MODULUS);
let k=q.nbits();
let r = BIG::new_ints(&rom::CURVE_ORDER);
let m=r.nbits();
let el=ceil(k+ceil(m,2),8);
let mut okm: [u8;512]=[0;512];
hmac::xmd_expand(hmac::MC_SHA2,ecp::HASH_TYPE,&mut okm,el,&dst,&id);
let mut fd: [u8;256]=[0;256];
for j in 0..el {
fd[j]=okm[j];
}
let mut dx=DBIG::frombytes(&fd[0..el]);
let u=FP::new_big(&dx.dmod(&q));
let mut P=ECP::map2point(&u);
P.cfp();
P.affine();
P.tobytes(hcid,false);
}
/* create random secret S */
pub fn random_generate(rng: &mut RAND, s: &mut [u8]) -> isize {
let r = BIG::new_ints(&rom::CURVE_ORDER);
let sc = BIG::randtrunc(&r, 16 * ecp::AESKEY, rng);
sc.tobytes(s);
0
}
/* Extract PIN from TOKEN for identity CID */
#[allow(non_snake_case)]
pub fn extract_pin(cid: &[u8], pin: i32, token: &mut [u8]) -> isize {
let mut P = ECP::frombytes(&token);
if P.is_infinity() {
return INVALID_POINT;
}
let mut R = ECP::frombytes(&cid);
if R.is_infinity() {
return INVALID_POINT;
}
R = R.pinmul(pin%MAXPIN, PBLEN);
P.sub(&mut R);
P.tobytes(token, false);
0
}
/* Implement step 2 on client side of MPin protocol */
#[allow(non_snake_case)]
pub fn client_2(x: &[u8], y: &[u8], sec: &mut [u8]) -> isize {
let mut r = BIG::new_ints(&rom::CURVE_ORDER);
let mut P = ECP::frombytes(sec);
if P.is_infinity() {
return INVALID_POINT;
}
let mut px = BIG::frombytes(x);
let py = BIG::frombytes(y);
px.add(&py);
px.rmod(&mut r);
P = pair8::g1mul(&P, &px);
P.neg();
P.tobytes(sec, false);
0
}
/* Client secret CST=S*H(CID) where CID is client ID and S is master secret */
#[allow(non_snake_case)]
pub fn get_client_secret(s: &mut [u8], idhtc: &[u8], cst: &mut [u8]) -> isize {
let sx=BIG::frombytes(s);
let P=ECP::frombytes(idhtc);
if P.is_infinity() {
return INVALID_POINT;
}
pair8::g1mul(&P, &sx).tobytes(cst, false);
0
}
/* Implement step 1 on client side of MPin protocol */
#[allow(non_snake_case)]
pub fn client_1(
cid: &[u8],
rng: Option<&mut RAND>,
x: &mut [u8],
pin: usize,
token: &[u8],
sec: &mut [u8],
xid: &mut [u8]
) -> isize {
let r = BIG::new_ints(&rom::CURVE_ORDER);
let sx: BIG;
if let Some(rd) = rng {
sx = BIG::randtrunc(&r, 16 * ecp::AESKEY, rd);
sx.tobytes(x);
} else {
sx = BIG::frombytes(x);
}
let mut P=ECP::frombytes(cid);
if P.is_infinity() {
return INVALID_POINT;
}
let mut T = ECP::frombytes(&token);
if T.is_infinity() {
return INVALID_POINT;
}
let mut W = P.pinmul((pin as i32) % MAXPIN, PBLEN);
T.add(&mut W);
P = pair8::g1mul(&P, &sx);
P.tobytes(xid, false);
T.tobytes(sec, false);
0
}
/* Extract Server Secret SST=S*Q where Q is fixed generator in G2 and S is master secret */
#[allow(non_snake_case)]
pub fn get_server_secret(s: &[u8], sst: &mut [u8]) -> isize {
let mut Q = ECP8::generator();
let sc = BIG::frombytes(s);
Q = pair8::g2mul(&Q, &sc);
Q.tobytes(sst,false);
0
}
/* Implement step 2 of MPin protocol on server side */
#[allow(non_snake_case)]
pub fn server(
hid: &[u8],
y: &[u8],
sst: &[u8],
xid: &[u8],
msec: &[u8],
) -> isize {
let Q = ECP8::generator();
let sQ = ECP8::frombytes(&sst);
if sQ.is_infinity() {
return INVALID_POINT;
}
let mut R = ECP::frombytes(&xid);
if R.is_infinity() {
return INVALID_POINT;
}
let sy = BIG::frombytes(&y);
let mut P = ECP::frombytes(&hid);
if P.is_infinity() {
return INVALID_POINT;
}
P = pair8::g1mul(&P, &sy);
P.add(&mut R);
R = ECP::frombytes(&msec);
if R.is_infinity() {
return INVALID_POINT;
}
let mut g: FP48;
g = pair8::ate2(&Q, &R, &sQ, &P);
g = pair8::fexp(&g);
if !g.isunity() {
return BAD_PIN;
}
0
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::arch::Chunk;
use crate::bls48581::big::NLEN;
// Base Bits= 60
pub const MODULUS:[Chunk;NLEN]=[0xEDC154E6565912B,0x8FDF721A4A48AC3,0x7A5513170EE0A57,0x394F4736DAF6836,0xAF6E082ACD9CD30,0xF3975444A48AE43,0x22131BB3BE6C0F1,0x12A0056E84F8D1,0x76F313824E31D47,0x1280F73FF34];
pub const ROI:[Chunk;NLEN]=[0xEDC154E6565912A,0x8FDF721A4A48AC3,0x7A5513170EE0A57,0x394F4736DAF6836,0xAF6E082ACD9CD30,0xF3975444A48AE43,0x22131BB3BE6C0F1,0x12A0056E84F8D1,0x76F313824E31D47,0x1280F73FF34];
pub const R2MODP:[Chunk;NLEN]=[0x79868479F1B5833,0xFB6EBA8FCB82D07,0x9CC8A7F1FD84C7F,0x402C51CF5CC3CBB,0x3F3114F078502C,0xFC90829BDC8336E,0xC7BE91DE9CA8EED,0xD4D273BB17BFADB,0x6EC7C9A81E792CA,0x1DC317A6E4];
pub const MCONST:Chunk=0x148B81FC39D5A7D;
pub const SQRTM3:[Chunk;NLEN]=[0x51EDFC2A1D65A0A,0xD62DAA292D8CDBF,0x24112478269D616,0x6C25D3CABF8AD71,0xC8E9B16B5D3E4CD,0xF50A03B738960EE,0x1A664376FED4343,0xBFFD8FB8925AE06,0x600908C6A28DEAA,0x1280F73F9A7];
pub const FRA:[Chunk;NLEN]=[0x62EB6CFE42AEB25,0xDB41942760AD3F9,0xA7DF2570715ECE4,0x90377B51208AC0F,0x6848493E1C8C418,0xF496307E298187E,0x58740E3CAFD6B62,0xF6067D047983E78,0x49FA75CD7E73E55,0xFD30DB501];
pub const FRB:[Chunk;NLEN]=[0x62EB6CFE42AEB25,0xDB41942760AD3F9,0xA7DF2570715ECE4,0x90377B51208AC0F,0x6848493E1C8C418,0xF496307E298187E,0x58740E3CAFD6B62,0xF6067D047983E78,0x49FA75CD7E73E55,0xFD30DB501];
pub const TWK:[Chunk;NLEN]=[0x7B433D25F426953,0xACE45923B9863D,0xC28BBDFA2D37E16,0x62FFCC8AFB4BC18,0x661B4392F002C4F,0x2ED27E951A14781,0x670A6683B853246,0xAEB8C9BA138A075,0xC10075769CDDD9E,0x3A65A537B];
//*** rom curve parameters *****
// Base Bits= 60
// Ate Bits= 33
// G2 Table size= 36
pub const CURVE_COF_I:isize = 0;
pub const CURVE_COF:[Chunk;NLEN]=[0x140000382,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0];
pub const CURVE_B_I:isize = 1;
pub const CURVE_B:[Chunk;NLEN]=[0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0];
pub const CURVE_ORDER:[Chunk;NLEN]=[0x8A5FE6FCD671C01,0xBE599467C24DA11,0xC7CD0562303C4CC,0x9D34C4C92016A85,0xBC972C2E6E74196,0x3F0B3CBE003FAD6,0x615C0D6C635387A,0xE2885E233A9CCC1,0x2386F8A925,0x0];
pub const CURVE_GX:[Chunk;NLEN]=[0xBCE8732315AF640,0x74DA5D3A1E6D8C3,0x57DB368B11786CB,0x665D859236EBDBC,0x46A9DF6F9645847,0xEDFFB9F75445505,0xE86868CF61ABDBA,0x93F860DE3F257E0,0x40F2BAF2B73DF1E,0x2AF59B7AC3];
pub const CURVE_GY:[Chunk;NLEN]=[0xDBB5DE3E2587A70,0xF37AEF7B926B576,0xF77C2876D1B2E35,0x78584C3EF22F487,0xFFB98AEE53E80F6,0xD41B720EF7BB7BE,0xFEB8A52E991279D,0xB398A488A553C9E,0x31F91F86B3A2D1F,0xCEFDA44F65];
pub const CURVE_HTPC:[Chunk;NLEN]=[0x393F0BE031193EC,0xC28896440758243,0xDBE4AA8E70D4620,0x6B27BD55EFD560E,0x24A9624BEECD070,0xE2626AD7C53B361,0xDD845A98030C755,0x29389B4E6A62C2D,0x5AF94F05D8A9FD4,0x92348CD5DC];
pub const CURVE_BNX:[Chunk;NLEN]=[0x140000381,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0];
pub const CRU:[Chunk;NLEN]=[0x4DE9AC5E1C79B90,0x5CD8E3F88E5DE82,0xAB21F74F7421A20,0x6694B9B60DB5D62,0x73422B5FB82F431,0xFF46A846B5FA6AA,0x83D66C1E5FCBED6,0x2096384F2AFA565,0x8B75055DD5D1F4E,0x2C6];
pub const CURVE_PXAAA:[Chunk;NLEN]=[0x34FD0B4ACE8BFAB,0xB79766322154DEC,0x4D80491F510317,0x3CA0612F4005030,0xBAAD1A8C42281A6,0x3A2EF156C46FF79,0x344DBCCB7DE64DB,0x2775DEBABBEFC70,0x71E4A38237FA45A,0x5D615D9A78];
pub const CURVE_PXAAB:[Chunk;NLEN]=[0x669B36676B47C57,0x5556A01AFA143F1,0x7630D979630FFD7,0x6AFFA62504F0C3C,0xABFEDF16214A7,0x12307F4E1C3943A,0xE1623E9526F6DA,0xBC07E8B22BB6D98,0x258512069B0E86A,0x7C4973ECE2];
pub const CURVE_PXABA:[Chunk;NLEN]=[0x488156CA55A3E6A,0xEF4CDED6B3F0B46,0xCBDFBB879D5FEA8,0x66F0D2A6D55F028,0xC1DBD19242FFAE7,0xCCBAB5AB6860161,0xAE237CA7A6D6957,0xAD83BC73A8A6CA9,0xF1334E1B2EA1853,0x1FCCC70198];
pub const CURVE_PXABB:[Chunk;NLEN]=[0x9A7033CBB7FEAFE,0x10B8CB4E80BC3F0,0x1C5257C200CA523,0x43B1B279B9468C3,0x5F63E1C776E6EC1,0x393F8BE0CC218A9,0x62F3E5821B7B92A,0x54D4BFE8F5985AC,0xEB6185C78D80129,0xBE2218C25C];
pub const CURVE_PXBAA:[Chunk;NLEN]=[0x39C3A1C53F8CCE5,0x5B5F746C9D4CBB7,0xD55FC1889AA80C6,0xEF492AE589274FA,0x9E48199D5AC10B2,0xC5805386699981F,0xB1642B5675FF0E7,0xA9DD63007C675D0,0x35913A3C598E4CA,0x38B91C600B];
pub const CURVE_PXBAB:[Chunk;NLEN]=[0x2004D914A3C093A,0x7960910FCE3370F,0xA9F177612F097FC,0x40B9C0B15DD7595,0x3835D28997EB57B,0x7BB037418181DF6,0xEF0977A3D1A5867,0xCDA088F7B8F35DC,0x738603F1311E4E,0xC96C7797EB];
pub const CURVE_PXBBA:[Chunk;NLEN]=[0x41607E60750E057,0x4B5B0E205C3354E,0xCBE4324C22D6333,0xAA5EFCF3432AAD1,0xF293B13CED0FD0C,0xA2C0B7A449CEF11,0x9D13852B6DB908B,0x8AEE660DEA41B3,0x61EE3F0197A4989,0xB9B7951C60];
pub const CURVE_PXBBB:[Chunk;NLEN]=[0xE19DA00FBC6AE34,0x6AF2FC9E97C3F84,0x9BD6AEBF9FC44E5,0x90B7E2B0D458547,0xA93F29CFF364A71,0x719728A7F9F8CFC,0xFAF47B5211CF741,0x4AAA2B1E5D7A9DE,0x2BDEC5282624C4F,0x827D5C22FB];
pub const CURVE_PYAAA:[Chunk;NLEN]=[0x3EDD3FE4D2D7971,0x45012AB12C0FF32,0x9ABF77EEA6D6590,0x336D8AE5163C159,0x35AFA27748D90F7,0xBFC435FAAB09062,0x59A577E6F3B39E,0x2F3024B918B4238,0x75B5DFA49721645,0xEB53356C3];
pub const CURVE_PYAAB:[Chunk;NLEN]=[0x1471DB936CD5665,0x8B423525FFC7B11,0x2FA097D760E2E58,0xD1892AB24E1DD21,0x6B243B1F192C5C3,0x64732FCBF3AFB09,0xA325E6FBA01D729,0x5FCADC2B75A422B,0xE0FF144DA653181,0x284DC75979];
pub const CURVE_PYABA:[Chunk;NLEN]=[0x8332A526A2A8474,0xBC7C46FC3B8FDE6,0x1D35D51A652269C,0x36CA3295E5E2F0C,0xC99D0E904115155,0xD370514475F7D5,0x216D5B119D3A48,0x67669EF2C2FC503,0x8523E421EFB703,0xB36A201DD0];
pub const CURVE_PYABB:[Chunk;NLEN]=[0x6213DA92841589D,0xB3D8B8A1E533731,0x7BDA503EE5E578F,0x817742770BA10D6,0x224333FA40DCED2,0x10E122D2742C89B,0x60DCEE23DD8B0E7,0x78762B1C2CDED33,0xEDC0688223FBBD4,0xAEC25A4621];
pub const CURVE_PYBAA:[Chunk;NLEN]=[0x47831F982E50137,0x857FDDDFCF7A43F,0x30135945D137B08,0xCA4E512B64F59F4,0x7FA238CDCE8A1E2,0x5F1129857ED85C7,0xB43DD93B5A95980,0x88325A2554DC541,0xA9C46916503FA5A,0xD209D5A223];
pub const CURVE_PYBAB:[Chunk;NLEN]=[0x4EEDC58CF90BEE4,0xA59ED8226CF3A59,0xFC198CAA72B679D,0xF47C180D139E3AA,0xE8C270841F6824,0x55AB7504FA8342,0xB16722B589D82E2,0xD537B90421AD66E,0x36B7A513D339D5A,0x7D0D037457];
pub const CURVE_PYBBA:[Chunk;NLEN]=[0xD41FAEAFEB23986,0xE884017D9AA62B3,0x40FA639F53DCCC9,0xAB8C74B2618B5BB,0x5AE3A2864F22C1F,0xE4C819A6DF98F42,0xC0841B064155F14,0xD17AF8A006F364F,0xE65EA25C2D05DFD,0x896767811B];
pub const CURVE_PYBBB:[Chunk;NLEN]=[0x667FFCB732718B6,0x5AC66E84069C55D,0xD8C4AB33F748E,0x333EC7192054173,0x8E69C31E97E1AD0,0xEF8ECA9A9533A3F,0x6BE8E50C87549B6,0x4F981B5E068F140,0x9029D393A5C07E8,0x35E2524FF8];
//pub const CURVE_W:[[Chunk;NLEN];2]=[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]];
//pub const CURVE_SB:[[[Chunk;NLEN];2];2]=[[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]],[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]]];
//pub const CURVE_WB:[[Chunk;NLEN];4]=[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]];
//pub const CURVE_BB:[[[Chunk;NLEN];4];4]=[[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]],[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]],[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]],[[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]]];
pub const USE_GLV: bool = true;
pub const USE_GS_G2: bool = true;
pub const USE_GS_GT: bool = true;
pub const GT_STRONG: bool = false;

File diff suppressed because it is too large Load Diff

508
crates/bls48581/src/gcm.rs Normal file
View File

@ -0,0 +1,508 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const GCM_NB: usize = 4;
const GCM_ACCEPTING_HEADER: usize = 0;
const GCM_ACCEPTING_CIPHER: usize = 1;
const GCM_NOT_ACCEPTING_MORE: usize = 2;
const GCM_FINISHED: usize = 3;
//const GCM_ENCRYPTING: usize = 0;
//const GCM_DECRYPTING: usize = 1;
use crate::aes;
use crate::aes::AES;
pub struct GCM {
table: [[u32; 4]; 128],
statex: [u8; 16],
y_0: [u8; 16],
// counter: usize,
lena: [u32; 2],
lenc: [u32; 2],
status: usize,
a: AES,
}
impl GCM {
fn pack(b: [u8; 4]) -> u32 {
/* pack bytes into a 32-bit Word */
((b[0] as u32) << 24)
| ((b[1] as u32) << 16)
| ((b[2] as u32) << 8)
| (b[3] as u32)
}
fn unpack(a: u32) -> [u8; 4] {
/* unpack bytes from a word */
[
((a >> 24) & 0xff) as u8,
((a >> 16) & 0xff) as u8,
((a >> 8) & 0xff) as u8,
(a & 0xff) as u8,
]
}
fn precompute(&mut self, h: &[u8]) {
let mut b: [u8; 4] = [0; 4];
let mut j = 0;
for i in 0..GCM_NB {
b[0] = h[j];
b[1] = h[j + 1];
b[2] = h[j + 2];
b[3] = h[j + 3];
self.table[0][i] = GCM::pack(b);
j += 4;
}
for i in 1..128 {
let mut c: u32 = 0;
for j in 0..GCM_NB {
self.table[i][j] = c | (self.table[i - 1][j]) >> 1;
c = self.table[i - 1][j] << 31;
}
if c != 0 {
self.table[i][0] ^= 0xE1000000
} /* irreducible polynomial */
}
}
fn gf2mul(&mut self) {
/* gf2m mul - Z=H*X mod 2^128 */
let mut p: [u32; 4] = [0; 4];
for i in 0..4 {
p[i] = 0
}
let mut j: usize = 8;
let mut m = 0;
for i in 0..128 {
j -= 1;
let mut c = ((self.statex[m] >> j) & 1) as u32;
c = (!c).wrapping_add(1); // + 1;
for k in 0..GCM_NB {
p[k] ^= self.table[i][k] & c
}
if j == 0 {
j = 8;
m += 1;
if m == 16 {
break;
}
}
}
j = 0;
for i in 0..GCM_NB {
let b = GCM::unpack(p[i]);
self.statex[j] = b[0];
self.statex[j + 1] = b[1];
self.statex[j + 2] = b[2];
self.statex[j + 3] = b[3];
j += 4;
}
}
fn wrap(&mut self) {
/* Finish off GHASH */
let mut f: [u32; 4] = [0; 4];
let mut el: [u8; 16] = [0; 16];
/* convert lengths from bytes to bits */
f[0] = (self.lena[0] << 3) | (self.lena[1] & 0xE0000000) >> 29;
f[1] = self.lena[1] << 3;
f[2] = (self.lenc[0] << 3) | (self.lenc[1] & 0xE0000000) >> 29;
f[3] = self.lenc[1] << 3;
let mut j = 0;
for i in 0..GCM_NB {
let b = GCM::unpack(f[i]);
el[j] = b[0];
el[j + 1] = b[1];
el[j + 2] = b[2];
el[j + 3] = b[3];
j += 4;
}
for i in 0..16 {
self.statex[i] ^= el[i]
}
self.gf2mul();
}
fn ghash(&mut self, plain: &[u8], len: usize) -> bool {
if self.status == GCM_ACCEPTING_HEADER {
self.status = GCM_ACCEPTING_CIPHER
}
if self.status != GCM_ACCEPTING_CIPHER {
return false;
}
let mut j = 0;
while j < len {
for i in 0..16 {
if j >= len {
break;
}
self.statex[i] ^= plain[j];
j += 1;
self.lenc[1] += 1;
if self.lenc[1] == 0 {
self.lenc[0] += 1
}
}
self.gf2mul();
}
if len % 16 != 0 {
self.status = GCM_NOT_ACCEPTING_MORE
}
true
}
/* Initialize GCM mode */
pub fn init(&mut self, nk: usize, key: &[u8], niv: usize, iv: &[u8]) {
/* iv size niv is usually 12 bytes (96 bits). AES key size nk can be 16,24 or 32 bytes */
let mut h: [u8; 16] = [0; 16];
for i in 0..16 {
h[i] = 0;
self.statex[i] = 0
}
self.a = AES::new();
self.a.init(aes::ECB, nk, key, None);
self.a.ecb_encrypt(&mut h); /* E(K,0) */
self.precompute(&h);
self.lena[0] = 0;
self.lenc[0] = 0;
self.lena[1] = 0;
self.lenc[1] = 0;
if niv == 12 {
for i in 0..12 {
self.a.f[i] = iv[i]
}
let b = GCM::unpack(1);
self.a.f[12] = b[0];
self.a.f[13] = b[1];
self.a.f[14] = b[2];
self.a.f[15] = b[3]; /* initialise IV */
for i in 0..16 {
self.y_0[i] = self.a.f[i]
}
} else {
self.status = GCM_ACCEPTING_CIPHER;
self.ghash(iv, niv); /* GHASH(H,0,IV) */
self.wrap();
for i in 0..16 {
self.a.f[i] = self.statex[i];
self.y_0[i] = self.a.f[i];
self.statex[i] = 0
}
self.lena[0] = 0;
self.lenc[0] = 0;
self.lena[1] = 0;
self.lenc[1] = 0;
}
self.status = GCM_ACCEPTING_HEADER;
}
pub fn new() -> GCM {
GCM {
table: [[0; 4]; 128],
statex: [0; 16],
y_0: [0; 16],
//counter:0,
lena: [0; 2],
lenc: [0; 2],
status: 0,
a: AES::new(),
}
}
/* Add Header data - included but not encrypted */
pub fn add_header(&mut self, header: &[u8], len: usize) -> bool {
/* Add some header. Won't be encrypted, but will be authenticated. len is length of header */
if self.status != GCM_ACCEPTING_HEADER {
return false;
}
let mut j = 0;
while j < len {
for i in 0..16 {
if j >= len {
break;
}
self.statex[i] ^= header[j];
j += 1;
self.lena[1] += 1;
if self.lena[1] == 0 {
self.lena[0] += 1
}
}
self.gf2mul();
}
if len % 16 != 0 {
self.status = GCM_ACCEPTING_CIPHER
}
true
}
/* Add Plaintext - included and encrypted */
/* if plain is None - encrypts cipher in place */
pub fn add_plain(&mut self, cipher: &mut [u8], plain: Option<&[u8]>, len: usize) -> bool {
let mut cb: [u8; 16] = [0; 16];
let mut b: [u8; 4] = [0; 4];
let mut counter: u32;
if self.status == GCM_ACCEPTING_HEADER {
self.status = GCM_ACCEPTING_CIPHER
}
if self.status != GCM_ACCEPTING_CIPHER {
return false;
}
let mut j = 0;
while j < len {
b[0] = self.a.f[12];
b[1] = self.a.f[13];
b[2] = self.a.f[14];
b[3] = self.a.f[15];
counter = GCM::pack(b);
counter += 1;
b = GCM::unpack(counter);
self.a.f[12] = b[0];
self.a.f[13] = b[1];
self.a.f[14] = b[2];
self.a.f[15] = b[3]; /* increment counter */
for i in 0..16 {
cb[i] = self.a.f[i]
}
self.a.ecb_encrypt(&mut cb); /* encrypt it */
for i in 0..16 {
if j >= len {
break;
}
if let Some(sp) = plain {
cipher[j] = sp[j] ^ cb[i];
} else {
cipher[j] ^=cb[i];
}
self.statex[i] ^= cipher[j];
j += 1;
self.lenc[1] += 1;
if self.lenc[1] == 0 {
self.lenc[0] += 1
}
}
self.gf2mul()
}
if len % 16 != 0 {
self.status = GCM_NOT_ACCEPTING_MORE
}
true
}
/* Add Ciphertext - decrypts to plaintext */
/* if cipher is None - decrypts plain in place */
pub fn add_cipher(&mut self, plain: &mut [u8], cipher: Option<&[u8]>, len: usize) -> bool {
let mut cb: [u8; 16] = [0; 16];
let mut b: [u8; 4] = [0; 4];
let mut counter: u32;
if self.status == GCM_ACCEPTING_HEADER {
self.status = GCM_ACCEPTING_CIPHER
}
if self.status != GCM_ACCEPTING_CIPHER {
return false;
}
let mut j = 0;
while j < len {
b[0] = self.a.f[12];
b[1] = self.a.f[13];
b[2] = self.a.f[14];
b[3] = self.a.f[15];
counter = GCM::pack(b);
counter += 1;
b = GCM::unpack(counter);
self.a.f[12] = b[0];
self.a.f[13] = b[1];
self.a.f[14] = b[2];
self.a.f[15] = b[3]; /* increment counter */
for i in 0..16 {
cb[i] = self.a.f[i]
}
self.a.ecb_encrypt(&mut cb); /* encrypt it */
for i in 0..16 {
if j >= len {
break;
}
let oc:u8;
if let Some(sc) = cipher {
oc = sc[j];
} else {
oc = plain[j];
}
plain[j] = oc ^ cb[i];
self.statex[i] ^= oc;
j += 1;
self.lenc[1] += 1;
if self.lenc[1] == 0 {
self.lenc[0] += 1
}
}
self.gf2mul()
}
if len % 16 != 0 {
self.status = GCM_NOT_ACCEPTING_MORE
}
true
}
/* Finish and extract Tag */
pub fn finish(&mut self,tag: &mut [u8], extract: bool) {
/* Finish off GHASH and extract tag (MAC) */
self.wrap();
/* extract tag */
if extract {
self.a.ecb_encrypt(&mut (self.y_0)); /* E(K,Y0) */
for i in 0..16 {
self.y_0[i] ^= self.statex[i]
}
for i in 0..16 {
tag[i] = self.y_0[i];
self.y_0[i] = 0;
self.statex[i] = 0
}
}
self.status = GCM_FINISHED;
self.a.end();
}
pub fn hex2bytes(hex: &[u8], bin: &mut [u8]) {
let len = hex.len();
for i in 0..len / 2 {
let mut v: u8;
let mut c = hex[2 * i];
if c >= b'0' && c <= b'9' {
v = c - b'0';
} else if c >= b'A' && c <= b'F' {
v = c - b'A' + 10;
} else if c >= b'a' && c <= b'f' {
v = c - b'a' + 10;
} else {
v = 0;
}
v <<= 4;
c = hex[2 * i + 1];
if c >= b'0' && c <= b'9' {
v += c - b'0';
} else if c >= b'A' && c <= b'F' {
v += c - b'A' + 10;
} else if c >= b'a' && c <= b'f' {
v += c - b'a' + 10;
} else {
v = 0;
}
bin[i] = v;
}
}
}
pub fn encrypt(c: &mut [u8],t: &mut [u8],k: &[u8],iv: &[u8],h: &[u8],p: &[u8]) {
let mut g=GCM::new();
g.init(k.len(),k,iv.len(),iv);
g.add_header(h,h.len());
g.add_plain(c,Some(p),p.len());
g.finish(t,true)
}
pub fn decrypt(p: &mut [u8],t: &mut [u8],k: &[u8],iv: &[u8],h: &[u8],c: &[u8]) {
let mut g=GCM::new();
g.init(k.len(),k,iv.len(),iv);
g.add_header(h,h.len());
g.add_cipher(p,Some(c),c.len());
g.finish(t,true);
}
/*
fn main()
{
let kt=b"feffe9928665731c6d6a8f9467308308";
let mt=b"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39";
let ht=b"feedfacedeadbeeffeedfacedeadbeefabaddad2";
let nt=b"9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b";
// Tag should be 619cc5aefffe0bfa462af43c1699d050
let mut gcm=GCM::new();
let len=mt.len()/2;
let lenh=ht.len()/2;
let lenk=kt.len()/2;
let leniv=nt.len()/2;
//let mut t:[u8;16]=[0;16]; // Tag
let mut k:[u8;16]=[0;16]; // AES Key
let mut h:[u8;64]=[0;64]; // Header - to be included in Authentication, but not encrypted
let mut n:[u8;100]=[0;100]; // IV - Initialisation vector
let mut m:[u8;100]=[0;100]; // Plaintext to be encrypted/authenticated
let mut c:[u8;100]=[0;100]; // Ciphertext
let mut p:[u8;100]=[0;100]; // Recovered Plaintext
GCM::hex2bytes(mt,&mut m);
GCM::hex2bytes(ht,&mut h);
GCM::hex2bytes(kt,&mut k);
GCM::hex2bytes(nt,&mut n);
println!("Plaintext=");
for i in 0..len {print!("{:02x}",m[i])}
println!("");
gcm.init(lenk,&k,leniv,&n);
gcm.add_header(&h,lenh);
gcm.add_plain(&mut c,&m,len);
let mut t=gcm.finish(true);
println!("Ciphertext=");
for i in 0..len {print!("{:02x}",c[i])}
println!("");
println!("Tag=");
for i in 0..16 {print!("{:02x}",t[i])}
println!("");
gcm.init(lenk,&k,leniv,&n);
gcm.add_header(&h,lenh);
gcm.add_cipher(&mut p,&c,len);
t=gcm.finish(true);
println!("Plaintext=");
for i in 0..len {print!("{:02x}",p[i])}
println!("");
println!("Tag=");
for i in 0..16 {print!("{:02x}",t[i])}
println!("");
}
*/

View File

@ -0,0 +1,288 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const HASH256_H0: u32 = 0x6A09E667;
const HASH256_H1: u32 = 0xBB67AE85;
const HASH256_H2: u32 = 0x3C6EF372;
const HASH256_H3: u32 = 0xA54FF53A;
const HASH256_H4: u32 = 0x510E527F;
const HASH256_H5: u32 = 0x9B05688C;
const HASH256_H6: u32 = 0x1F83D9AB;
const HASH256_H7: u32 = 0x5BE0CD19;
const HASH256_K: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
];
pub struct HASH256 {
length: [u32; 2],
h: [u32; 8],
w: [u32; 64],
}
impl HASH256 {
fn s(n: u32, x: u32) -> u32 {
((x) >> n) | ((x) << (32 - n))
}
fn r(n: u32, x: u32) -> u32 {
(x) >> n
}
fn ch(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (!(x) & z)
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (x & z) ^ (y & z)
}
fn sig0(x: u32) -> u32 {
HASH256::s(2, x) ^ HASH256::s(13, x) ^ HASH256::s(22, x)
}
fn sig1(x: u32) -> u32 {
HASH256::s(6, x) ^ HASH256::s(11, x) ^ HASH256::s(25, x)
}
fn theta0(x: u32) -> u32 {
HASH256::s(7, x) ^ HASH256::s(18, x) ^ HASH256::r(3, x)
}
fn theta1(x: u32) -> u32 {
HASH256::s(17, x) ^ HASH256::s(19, x) ^ HASH256::r(10, x)
}
pub fn as_bytes(&self,array: &mut [u8]) {
let mut ptr=0;
for i in 0..2 {
let mut t=self.length[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
for i in 0..8 {
let mut t=self.h[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
for i in 0..64 {
let mut t=self.w[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
}
pub fn from_bytes(&mut self,array: &[u8]) {
let mut ptr=0;
for i in 0..2 {
let mut t=array[ptr+3] as u32;
t=256*t+(array[ptr+2] as u32);
t=256*t+(array[ptr+1] as u32);
t=256*t+(array[ptr] as u32);
self.length[i]=t; ptr+=4;
}
for i in 0..8 {
let mut t=array[ptr+3] as u32;
t=256*t+(array[ptr+2] as u32);
t=256*t+(array[ptr+1] as u32);
t=256*t+(array[ptr] as u32);
self.h[i]=t; ptr+=4;
}
for i in 0..64 {
let mut t=array[ptr+3] as u32;
t=256*t+(array[ptr+2] as u32);
t=256*t+(array[ptr+1] as u32);
t=256*t+(array[ptr] as u32);
self.w[i]=t; ptr+=4;
}
}
fn transform(&mut self) {
/* basic transformation step */
for j in 16..64 {
self.w[j] = HASH256::theta1(self.w[j - 2])
.wrapping_add(self.w[j - 7])
.wrapping_add(HASH256::theta0(self.w[j - 15]))
.wrapping_add(self.w[j - 16]);
}
let mut a = self.h[0];
let mut b = self.h[1];
let mut c = self.h[2];
let mut d = self.h[3];
let mut e = self.h[4];
let mut f = self.h[5];
let mut g = self.h[6];
let mut hh = self.h[7];
for j in 0..64 {
/* 64 times - mush it up */
let t1 = hh
.wrapping_add(HASH256::sig1(e))
.wrapping_add(HASH256::ch(e, f, g))
.wrapping_add(HASH256_K[j])
.wrapping_add(self.w[j]);
let t2 = HASH256::sig0(a).wrapping_add(HASH256::maj(a, b, c));
hh = g;
g = f;
f = e;
e = d.wrapping_add(t1);
d = c;
c = b;
b = a;
a = t1.wrapping_add(t2);
}
self.h[0] = self.h[0].wrapping_add(a);
self.h[1] = self.h[1].wrapping_add(b);
self.h[2] = self.h[2].wrapping_add(c);
self.h[3] = self.h[3].wrapping_add(d);
self.h[4] = self.h[4].wrapping_add(e);
self.h[5] = self.h[5].wrapping_add(f);
self.h[6] = self.h[6].wrapping_add(g);
self.h[7] = self.h[7].wrapping_add(hh);
}
/* Initialise Hash function */
pub fn init(&mut self) {
/* initialise */
for i in 0..64 {
self.w[i] = 0
}
self.length[0] = 0;
self.length[1] = 0;
self.h[0] = HASH256_H0;
self.h[1] = HASH256_H1;
self.h[2] = HASH256_H2;
self.h[3] = HASH256_H3;
self.h[4] = HASH256_H4;
self.h[5] = HASH256_H5;
self.h[6] = HASH256_H6;
self.h[7] = HASH256_H7;
}
pub fn new() -> HASH256 {
let mut nh = HASH256 {
length: [0; 2],
h: [0; 8],
w: [0; 64],
};
nh.init();
nh
}
pub fn new_copy(hh: &HASH256) -> HASH256 {
let mut nh = HASH256 {
length: [0; 2],
h: [0; 8],
w: [0; 64],
};
nh.length[0]=hh.length[0];
nh.length[1]=hh.length[1];
for i in 0..64 {
nh.w[i] = hh.w[i];
}
for i in 0..8 {
nh.h[i] = hh.h[i];
}
nh
}
/* process a single byte */
pub fn process(&mut self, byt: u8) {
/* process the next message byte */
let cnt = ((self.length[0] / 32) % 16) as usize;
self.w[cnt] <<= 8;
self.w[cnt] |= byt as u32;
self.length[0] += 8;
if self.length[0] == 0 {
self.length[1] += 1;
self.length[0] = 0
}
if (self.length[0] % 512) == 0 {
self.transform()
}
}
/* process an array of bytes */
pub fn process_array(&mut self, b: &[u8]) {
for i in 0..b.len() {
self.process(b[i])
}
}
/* process a 32-bit integer */
pub fn process_num(&mut self, n: i32) {
self.process(((n >> 24) & 0xff) as u8);
self.process(((n >> 16) & 0xff) as u8);
self.process(((n >> 8) & 0xff) as u8);
self.process((n & 0xff) as u8);
}
/* Generate 32-byte Hash */
pub fn hash(&mut self) -> [u8; 32] {
/* pad message and finish - supply digest */
let mut digest: [u8; 32] = [0; 32];
let len0 = self.length[0];
let len1 = self.length[1];
self.process(0x80);
while (self.length[0] % 512) != 448 {
self.process(0)
}
self.w[14] = len1;
self.w[15] = len0;
self.transform();
for i in 0..32 {
/* convert to bytes */
digest[i] = ((self.h[i / 4] >> (8 * (3 - i % 4))) & 0xff) as u8;
}
self.init();
digest
}
pub fn continuing_hash(&self) -> [u8; 32] {
let mut sh=HASH256::new_copy(self);
sh.hash()
}
}
//248d6a61 d20638b8 e5c02693 0c3e6039 a33ce459 64ff2167 f6ecedd4 19db06c1
/*
fn main() {
let s = String::from("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq");
let test = s.into_bytes();
let mut sh=HASH256::new();
for i in 0..test.len(){
sh.process(test[i]);
}
let digest=sh.hash();
for i in 0..32 {print!("{:02x}",digest[i])}
}
*/

View File

@ -0,0 +1,383 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const HASH384_H0: u64 = 0xcbbb9d5dc1059ed8;
const HASH384_H1: u64 = 0x629a292a367cd507;
const HASH384_H2: u64 = 0x9159015a3070dd17;
const HASH384_H3: u64 = 0x152fecd8f70e5939;
const HASH384_H4: u64 = 0x67332667ffc00b31;
const HASH384_H5: u64 = 0x8eb44a8768581511;
const HASH384_H6: u64 = 0xdb0c2e0d64f98fa7;
const HASH384_H7: u64 = 0x47b5481dbefa4fa4;
const HASH384_K: [u64; 80] = [
0x428a2f98d728ae22,
0x7137449123ef65cd,
0xb5c0fbcfec4d3b2f,
0xe9b5dba58189dbbc,
0x3956c25bf348b538,
0x59f111f1b605d019,
0x923f82a4af194f9b,
0xab1c5ed5da6d8118,
0xd807aa98a3030242,
0x12835b0145706fbe,
0x243185be4ee4b28c,
0x550c7dc3d5ffb4e2,
0x72be5d74f27b896f,
0x80deb1fe3b1696b1,
0x9bdc06a725c71235,
0xc19bf174cf692694,
0xe49b69c19ef14ad2,
0xefbe4786384f25e3,
0x0fc19dc68b8cd5b5,
0x240ca1cc77ac9c65,
0x2de92c6f592b0275,
0x4a7484aa6ea6e483,
0x5cb0a9dcbd41fbd4,
0x76f988da831153b5,
0x983e5152ee66dfab,
0xa831c66d2db43210,
0xb00327c898fb213f,
0xbf597fc7beef0ee4,
0xc6e00bf33da88fc2,
0xd5a79147930aa725,
0x06ca6351e003826f,
0x142929670a0e6e70,
0x27b70a8546d22ffc,
0x2e1b21385c26c926,
0x4d2c6dfc5ac42aed,
0x53380d139d95b3df,
0x650a73548baf63de,
0x766a0abb3c77b2a8,
0x81c2c92e47edaee6,
0x92722c851482353b,
0xa2bfe8a14cf10364,
0xa81a664bbc423001,
0xc24b8b70d0f89791,
0xc76c51a30654be30,
0xd192e819d6ef5218,
0xd69906245565a910,
0xf40e35855771202a,
0x106aa07032bbd1b8,
0x19a4c116b8d2d0c8,
0x1e376c085141ab53,
0x2748774cdf8eeb99,
0x34b0bcb5e19b48a8,
0x391c0cb3c5c95a63,
0x4ed8aa4ae3418acb,
0x5b9cca4f7763e373,
0x682e6ff3d6b2b8a3,
0x748f82ee5defb2fc,
0x78a5636f43172f60,
0x84c87814a1f0ab72,
0x8cc702081a6439ec,
0x90befffa23631e28,
0xa4506cebde82bde9,
0xbef9a3f7b2c67915,
0xc67178f2e372532b,
0xca273eceea26619c,
0xd186b8c721c0c207,
0xeada7dd6cde0eb1e,
0xf57d4f7fee6ed178,
0x06f067aa72176fba,
0x0a637dc5a2c898a6,
0x113f9804bef90dae,
0x1b710b35131c471b,
0x28db77f523047d84,
0x32caab7b40c72493,
0x3c9ebe0a15c9bebc,
0x431d67c49c100d4c,
0x4cc5d4becb3e42b6,
0x597f299cfc657e2a,
0x5fcb6fab3ad6faec,
0x6c44198c4a475817,
];
pub struct HASH384 {
length: [u64; 2],
h: [u64; 8],
w: [u64; 80],
}
impl HASH384 {
fn s(n: u64, x: u64) -> u64 {
((x) >> n) | ((x) << (64 - n))
}
fn r(n: u64, x: u64) -> u64 {
(x) >> n
}
fn ch(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (!(x) & z)
}
fn maj(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (x & z) ^ (y & z)
}
fn sig0(x: u64) -> u64 {
HASH384::s(28, x) ^ HASH384::s(34, x) ^ HASH384::s(39, x)
}
fn sig1(x: u64) -> u64 {
HASH384::s(14, x) ^ HASH384::s(18, x) ^ HASH384::s(41, x)
}
fn theta0(x: u64) -> u64 {
HASH384::s(1, x) ^ HASH384::s(8, x) ^ HASH384::r(7, x)
}
fn theta1(x: u64) -> u64 {
HASH384::s(19, x) ^ HASH384::s(61, x) ^ HASH384::r(6, x)
}
pub fn as_bytes(&self,array: &mut [u8]) {
let mut ptr=0;
for i in 0..2 {
let mut t=self.length[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
for i in 0..8 {
let mut t=self.h[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
for i in 0..80 {
let mut t=self.w[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
}
pub fn from_bytes(&mut self,array: &[u8]) {
let mut ptr=0;
for i in 0..2 {
let mut t=array[ptr+7] as u64;
t=256*t+(array[ptr+6] as u64);
t=256*t+(array[ptr+5] as u64);
t=256*t+(array[ptr+4] as u64);
t=256*t+(array[ptr+3] as u64);
t=256*t+(array[ptr+2] as u64);
t=256*t+(array[ptr+1] as u64);
t=256*t+(array[ptr] as u64);
self.length[i]=t; ptr+=8;
}
for i in 0..8 {
let mut t=array[ptr+7] as u64;
t=256*t+(array[ptr+6] as u64);
t=256*t+(array[ptr+5] as u64);
t=256*t+(array[ptr+4] as u64);
t=256*t+(array[ptr+3] as u64);
t=256*t+(array[ptr+2] as u64);
t=256*t+(array[ptr+1] as u64);
t=256*t+(array[ptr] as u64);
self.h[i]=t; ptr+=8;
}
for i in 0..80 {
let mut t=array[ptr+7] as u64;
t=256*t+(array[ptr+6] as u64);
t=256*t+(array[ptr+5] as u64);
t=256*t+(array[ptr+4] as u64);
t=256*t+(array[ptr+3] as u64);
t=256*t+(array[ptr+2] as u64);
t=256*t+(array[ptr+1] as u64);
t=256*t+(array[ptr] as u64);
self.w[i]=t; ptr+=8;
}
}
fn transform(&mut self) {
/* basic transformation step */
for j in 16..80 {
self.w[j] = HASH384::theta1(self.w[j - 2])
.wrapping_add(self.w[j - 7])
.wrapping_add(HASH384::theta0(self.w[j - 15]))
.wrapping_add(self.w[j - 16]);
}
let mut a = self.h[0];
let mut b = self.h[1];
let mut c = self.h[2];
let mut d = self.h[3];
let mut e = self.h[4];
let mut f = self.h[5];
let mut g = self.h[6];
let mut hh = self.h[7];
for j in 0..80 {
/* 64 times - mush it up */
let t1 = hh
.wrapping_add(HASH384::sig1(e))
.wrapping_add(HASH384::ch(e, f, g))
.wrapping_add(HASH384_K[j])
.wrapping_add(self.w[j]);
let t2 = HASH384::sig0(a).wrapping_add(HASH384::maj(a, b, c));
hh = g;
g = f;
f = e;
e = d.wrapping_add(t1);
d = c;
c = b;
b = a;
a = t1.wrapping_add(t2);
}
self.h[0] = self.h[0].wrapping_add(a);
self.h[1] = self.h[1].wrapping_add(b);
self.h[2] = self.h[2].wrapping_add(c);
self.h[3] = self.h[3].wrapping_add(d);
self.h[4] = self.h[4].wrapping_add(e);
self.h[5] = self.h[5].wrapping_add(f);
self.h[6] = self.h[6].wrapping_add(g);
self.h[7] = self.h[7].wrapping_add(hh);
}
/* Initialise Hash function */
pub fn init(&mut self) {
/* initialise */
for i in 0..64 {
self.w[i] = 0
}
self.length[0] = 0;
self.length[1] = 0;
self.h[0] = HASH384_H0;
self.h[1] = HASH384_H1;
self.h[2] = HASH384_H2;
self.h[3] = HASH384_H3;
self.h[4] = HASH384_H4;
self.h[5] = HASH384_H5;
self.h[6] = HASH384_H6;
self.h[7] = HASH384_H7;
}
pub fn new() -> HASH384 {
let mut nh = HASH384 {
length: [0; 2],
h: [0; 8],
w: [0; 80],
};
nh.init();
nh
}
pub fn new_copy(hh: &HASH384) -> HASH384 {
let mut nh = HASH384 {
length: [0; 2],
h: [0; 8],
w: [0; 80],
};
nh.length[0]=hh.length[0];
nh.length[1]=hh.length[1];
for i in 0..80 {
nh.w[i] = hh.w[i];
}
for i in 0..8 {
nh.h[i] = hh.h[i];
}
nh
}
/* process a single byte */
pub fn process(&mut self, byt: u8) {
/* process the next message byte */
let cnt = ((self.length[0] / 64) % 16) as usize;
self.w[cnt] <<= 8;
self.w[cnt] |= byt as u64;
self.length[0] += 8;
if self.length[0] == 0 {
self.length[1] += 1;
self.length[0] = 0
}
if (self.length[0] % 1024) == 0 {
self.transform()
}
}
/* process an array of bytes */
pub fn process_array(&mut self, b: &[u8]) {
for i in 0..b.len() {
self.process(b[i])
}
}
/* process a 32-bit integer */
pub fn process_num(&mut self, n: i32) {
self.process(((n >> 24) & 0xff) as u8);
self.process(((n >> 16) & 0xff) as u8);
self.process(((n >> 8) & 0xff) as u8);
self.process((n & 0xff) as u8);
}
/* Generate 32-byte Hash */
pub fn hash(&mut self) -> [u8; 48] {
/* pad message and finish - supply digest */
let mut digest: [u8; 48] = [0; 48];
let len0 = self.length[0];
let len1 = self.length[1];
self.process(0x80);
while (self.length[0] % 1024) != 896 {
self.process(0)
}
self.w[14] = len1;
self.w[15] = len0;
self.transform();
for i in 0..48 {
/* convert to bytes */
digest[i] = ((self.h[i / 8] >> (8 * (7 - i % 8))) & 0xff) as u8;
}
self.init();
digest
}
pub fn continuing_hash(&self) -> [u8; 48] {
let mut sh=HASH384::new_copy(self);
sh.hash()
}
}
//09330c33f71147e8 3d192fc782cd1b47 53111b173b3b05d2 2fa08086e3b0f712 fcc7c71a557e2db9 66c3e9fa91746039
/*
fn main() {
let s = String::from("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
let test = s.into_bytes();
let mut sh=HASH384::new();
for i in 0..test.len(){
sh.process(test[i]);
}
let digest=sh.hash();
for i in 0..48 {print!("{:02x}",digest[i])}
} */

View File

@ -0,0 +1,383 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const HASH512_H0: u64 = 0x6a09e667f3bcc908;
const HASH512_H1: u64 = 0xbb67ae8584caa73b;
const HASH512_H2: u64 = 0x3c6ef372fe94f82b;
const HASH512_H3: u64 = 0xa54ff53a5f1d36f1;
const HASH512_H4: u64 = 0x510e527fade682d1;
const HASH512_H5: u64 = 0x9b05688c2b3e6c1f;
const HASH512_H6: u64 = 0x1f83d9abfb41bd6b;
const HASH512_H7: u64 = 0x5be0cd19137e2179;
const HASH512_K: [u64; 80] = [
0x428a2f98d728ae22,
0x7137449123ef65cd,
0xb5c0fbcfec4d3b2f,
0xe9b5dba58189dbbc,
0x3956c25bf348b538,
0x59f111f1b605d019,
0x923f82a4af194f9b,
0xab1c5ed5da6d8118,
0xd807aa98a3030242,
0x12835b0145706fbe,
0x243185be4ee4b28c,
0x550c7dc3d5ffb4e2,
0x72be5d74f27b896f,
0x80deb1fe3b1696b1,
0x9bdc06a725c71235,
0xc19bf174cf692694,
0xe49b69c19ef14ad2,
0xefbe4786384f25e3,
0x0fc19dc68b8cd5b5,
0x240ca1cc77ac9c65,
0x2de92c6f592b0275,
0x4a7484aa6ea6e483,
0x5cb0a9dcbd41fbd4,
0x76f988da831153b5,
0x983e5152ee66dfab,
0xa831c66d2db43210,
0xb00327c898fb213f,
0xbf597fc7beef0ee4,
0xc6e00bf33da88fc2,
0xd5a79147930aa725,
0x06ca6351e003826f,
0x142929670a0e6e70,
0x27b70a8546d22ffc,
0x2e1b21385c26c926,
0x4d2c6dfc5ac42aed,
0x53380d139d95b3df,
0x650a73548baf63de,
0x766a0abb3c77b2a8,
0x81c2c92e47edaee6,
0x92722c851482353b,
0xa2bfe8a14cf10364,
0xa81a664bbc423001,
0xc24b8b70d0f89791,
0xc76c51a30654be30,
0xd192e819d6ef5218,
0xd69906245565a910,
0xf40e35855771202a,
0x106aa07032bbd1b8,
0x19a4c116b8d2d0c8,
0x1e376c085141ab53,
0x2748774cdf8eeb99,
0x34b0bcb5e19b48a8,
0x391c0cb3c5c95a63,
0x4ed8aa4ae3418acb,
0x5b9cca4f7763e373,
0x682e6ff3d6b2b8a3,
0x748f82ee5defb2fc,
0x78a5636f43172f60,
0x84c87814a1f0ab72,
0x8cc702081a6439ec,
0x90befffa23631e28,
0xa4506cebde82bde9,
0xbef9a3f7b2c67915,
0xc67178f2e372532b,
0xca273eceea26619c,
0xd186b8c721c0c207,
0xeada7dd6cde0eb1e,
0xf57d4f7fee6ed178,
0x06f067aa72176fba,
0x0a637dc5a2c898a6,
0x113f9804bef90dae,
0x1b710b35131c471b,
0x28db77f523047d84,
0x32caab7b40c72493,
0x3c9ebe0a15c9bebc,
0x431d67c49c100d4c,
0x4cc5d4becb3e42b6,
0x597f299cfc657e2a,
0x5fcb6fab3ad6faec,
0x6c44198c4a475817,
];
pub struct HASH512 {
length: [u64; 2],
h: [u64; 8],
w: [u64; 80],
}
impl HASH512 {
fn s(n: u64, x: u64) -> u64 {
((x) >> n) | ((x) << (64 - n))
}
fn r(n: u64, x: u64) -> u64 {
(x) >> n
}
fn ch(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (!(x) & z)
}
fn maj(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (x & z) ^ (y & z)
}
fn sig0(x: u64) -> u64 {
HASH512::s(28, x) ^ HASH512::s(34, x) ^ HASH512::s(39, x)
}
fn sig1(x: u64) -> u64 {
HASH512::s(14, x) ^ HASH512::s(18, x) ^ HASH512::s(41, x)
}
fn theta0(x: u64) -> u64 {
HASH512::s(1, x) ^ HASH512::s(8, x) ^ HASH512::r(7, x)
}
fn theta1(x: u64) -> u64 {
HASH512::s(19, x) ^ HASH512::s(61, x) ^ HASH512::r(6, x)
}
pub fn as_bytes(&self,array: &mut [u8]) {
let mut ptr=0;
for i in 0..2 {
let mut t=self.length[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
for i in 0..8 {
let mut t=self.h[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
for i in 0..80 {
let mut t=self.w[i];
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=(t%256) as u8; t/=256; ptr+=1;
array[ptr]=t as u8; ptr+=1;
}
}
pub fn from_bytes(&mut self,array: &[u8]) {
let mut ptr=0;
for i in 0..2 {
let mut t=array[ptr+7] as u64;
t=256*t+(array[ptr+6] as u64);
t=256*t+(array[ptr+5] as u64);
t=256*t+(array[ptr+4] as u64);
t=256*t+(array[ptr+3] as u64);
t=256*t+(array[ptr+2] as u64);
t=256*t+(array[ptr+1] as u64);
t=256*t+(array[ptr] as u64);
self.length[i]=t; ptr+=8;
}
for i in 0..8 {
let mut t=array[ptr+7] as u64;
t=256*t+(array[ptr+6] as u64);
t=256*t+(array[ptr+5] as u64);
t=256*t+(array[ptr+4] as u64);
t=256*t+(array[ptr+3] as u64);
t=256*t+(array[ptr+2] as u64);
t=256*t+(array[ptr+1] as u64);
t=256*t+(array[ptr] as u64);
self.h[i]=t; ptr+=8;
}
for i in 0..80 {
let mut t=array[ptr+7] as u64;
t=256*t+(array[ptr+6] as u64);
t=256*t+(array[ptr+5] as u64);
t=256*t+(array[ptr+4] as u64);
t=256*t+(array[ptr+3] as u64);
t=256*t+(array[ptr+2] as u64);
t=256*t+(array[ptr+1] as u64);
t=256*t+(array[ptr] as u64);
self.w[i]=t; ptr+=8;
}
}
fn transform(&mut self) {
/* basic transformation step */
for j in 16..80 {
self.w[j] = HASH512::theta1(self.w[j - 2])
.wrapping_add(self.w[j - 7])
.wrapping_add(HASH512::theta0(self.w[j - 15]))
.wrapping_add(self.w[j - 16]);
}
let mut a = self.h[0];
let mut b = self.h[1];
let mut c = self.h[2];
let mut d = self.h[3];
let mut e = self.h[4];
let mut f = self.h[5];
let mut g = self.h[6];
let mut hh = self.h[7];
for j in 0..80 {
/* 64 times - mush it up */
let t1 = hh
.wrapping_add(HASH512::sig1(e))
.wrapping_add(HASH512::ch(e, f, g))
.wrapping_add(HASH512_K[j])
.wrapping_add(self.w[j]);
let t2 = HASH512::sig0(a).wrapping_add(HASH512::maj(a, b, c));
hh = g;
g = f;
f = e;
e = d.wrapping_add(t1);
d = c;
c = b;
b = a;
a = t1.wrapping_add(t2);
}
self.h[0] = self.h[0].wrapping_add(a);
self.h[1] = self.h[1].wrapping_add(b);
self.h[2] = self.h[2].wrapping_add(c);
self.h[3] = self.h[3].wrapping_add(d);
self.h[4] = self.h[4].wrapping_add(e);
self.h[5] = self.h[5].wrapping_add(f);
self.h[6] = self.h[6].wrapping_add(g);
self.h[7] = self.h[7].wrapping_add(hh);
}
/* Initialise Hash function */
pub fn init(&mut self) {
/* initialise */
for i in 0..64 {
self.w[i] = 0
}
self.length[0] = 0;
self.length[1] = 0;
self.h[0] = HASH512_H0;
self.h[1] = HASH512_H1;
self.h[2] = HASH512_H2;
self.h[3] = HASH512_H3;
self.h[4] = HASH512_H4;
self.h[5] = HASH512_H5;
self.h[6] = HASH512_H6;
self.h[7] = HASH512_H7;
}
pub fn new() -> HASH512 {
let mut nh = HASH512 {
length: [0; 2],
h: [0; 8],
w: [0; 80],
};
nh.init();
nh
}
pub fn new_copy(hh: &HASH512) -> HASH512 {
let mut nh = HASH512 {
length: [0; 2],
h: [0; 8],
w: [0; 80],
};
nh.length[0]=hh.length[0];
nh.length[1]=hh.length[1];
for i in 0..80 {
nh.w[i] = hh.w[i];
}
for i in 0..8 {
nh.h[i] = hh.h[i];
}
nh
}
/* process a single byte */
pub fn process(&mut self, byt: u8) {
/* process the next message byte */
let cnt = ((self.length[0] / 64) % 16) as usize;
self.w[cnt] <<= 8;
self.w[cnt] |= byt as u64;
self.length[0] += 8;
if self.length[0] == 0 {
self.length[1] += 1;
self.length[0] = 0
}
if (self.length[0] % 1024) == 0 {
self.transform()
}
}
/* process an array of bytes */
pub fn process_array(&mut self, b: &[u8]) {
for i in 0..b.len() {
self.process(b[i])
}
}
/* process a 32-bit integer */
pub fn process_num(&mut self, n: i32) {
self.process(((n >> 24) & 0xff) as u8);
self.process(((n >> 16) & 0xff) as u8);
self.process(((n >> 8) & 0xff) as u8);
self.process((n & 0xff) as u8);
}
/* Generate 32-byte Hash */
pub fn hash(&mut self) -> [u8; 64] {
/* pad message and finish - supply digest */
let mut digest: [u8; 64] = [0; 64];
let len0 = self.length[0];
let len1 = self.length[1];
self.process(0x80);
while (self.length[0] % 1024) != 896 {
self.process(0)
}
self.w[14] = len1;
self.w[15] = len0;
self.transform();
for i in 0..64 {
/* convert to bytes */
digest[i] = ((self.h[i / 8] >> (8 * (7 - i % 8))) & 0xff) as u8;
}
self.init();
digest
}
pub fn continuing_hash(&self) -> [u8; 64] {
let mut sh=HASH512::new_copy(self);
sh.hash()
}
}
//8e959b75dae313da 8cf4f72814fc143f 8f7779c6eb9f7fa1 7299aeadb6889018 501d289e4900f7e4 331b99dec4b5433a c7d329eeb6dd2654 5e96e55b874be909
/*
fn main() {
let s = String::from("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
let test = s.into_bytes();
let mut sh=HASH512::new();
for i in 0..test.len(){
sh.process(test[i]);
}
let digest=sh.hash();
for i in 0..64 {print!("{:02x}",digest[i])}
} */

870
crates/bls48581/src/hmac.rs Normal file
View File

@ -0,0 +1,870 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::hash256::HASH256;
use crate::hash384::HASH384;
use crate::hash512::HASH512;
use crate::sha3::SHA3;
use crate::rand::RAND;
pub const MC_SHA2: usize = 2;
pub const MC_SHA3: usize = 3;
pub const SHA256: usize = 32;
pub const SHA384: usize = 48;
pub const SHA512: usize = 64;
#[allow(non_snake_case)]
/* General Purpose Hash function */
#[allow(clippy::too_many_arguments)]
pub fn GPhashit(hash: usize, sha: usize,w: &mut [u8],pad: usize,zpad: usize,a: Option<&[u8]>, n: isize, b: Option<&[u8]>) {
let mut r: [u8; 64] = [0; 64];
if hash == MC_SHA2 {
if sha == SHA256 {
let mut h = HASH256::new();
for _ in 0..zpad {
h.process(0);
}
if let Some(x) = a {
h.process_array(x);
}
if n >= 0 {
h.process_num(n as i32)
}
if let Some(x) = b {
h.process_array(x);
}
let hs = h.hash();
for i in 0..sha {
r[i] = hs[i];
}
}
if sha == SHA384 {
let mut h = HASH384::new();
for _ in 0..zpad {
h.process(0);
}
if let Some(x) = a {
h.process_array(x);
}
if n >= 0 {
h.process_num(n as i32)
}
if let Some(x) = b {
h.process_array(x);
}
let hs = h.hash();
for i in 0..sha {
r[i] = hs[i];
}
}
if sha == SHA512 {
let mut h = HASH512::new();
for _ in 0..zpad {
h.process(0);
}
if let Some(x) = a {
h.process_array(x);
}
if n >= 0 {
h.process_num(n as i32)
}
if let Some(x) = b {
h.process_array(x);
}
let hs = h.hash();
for i in 0..sha {
r[i] = hs[i];
}
}
}
if hash == MC_SHA3 {
let mut h = SHA3::new(sha);
for _ in 0..zpad {
h.process(0);
}
if let Some(x) = a {
h.process_array(x);
}
if n >= 0 {
h.process_num(n as i32)
}
if let Some(x) = b {
h.process_array(x);
}
h.hash(&mut r);
}
if pad == 0 {
for i in 0..sha {
w[i] = r[i]
}
} else if pad <= sha {
for i in 0..pad {
w[i] = r[i]
}
} else {
for i in 0..sha {
w[i + pad - sha] = r[i]
}
for i in 0..(pad - sha) {
w[i] = 0
}
}
}
#[allow(non_snake_case)]
pub fn SPhashit(hash: usize, sha: usize,w: &mut [u8],a: Option<&[u8]>) {
GPhashit(hash,sha,w,0,0,a,-1,None);
}
pub fn inttobytes(n: usize, b: &mut [u8]) {
let mut i = b.len();
let mut m = n;
while m > 0 && i > 0 {
i -= 1;
b[i] = (m & 0xff) as u8;
m /= 256;
}
}
pub fn kdf2(hash: usize, sha: usize, z: &[u8], p: Option<&[u8]>, olen: usize, k: &mut [u8]) {
/* NOTE: the parameter olen is the length of the output K in bytes */
let hlen = sha;
let mut lk = 0;
let mut cthreshold = olen / hlen;
if olen % hlen != 0 {
cthreshold += 1
}
for counter in 1..cthreshold + 1 {
let mut b: [u8; 64] = [0; 64];
GPhashit(hash, sha, &mut b,0,0,Some(z), counter as isize, p);
if lk + hlen > olen {
for i in 0..(olen % hlen) {
k[lk] = b[i];
lk += 1
}
} else {
for i in 0..hlen {
k[lk] = b[i];
lk += 1
}
}
}
}
/* Password based Key Derivation Function */
/* Input password p, salt s, and repeat count */
/* Output key of length olen */
pub fn pbkdf2(hash: usize, sha: usize, pass: &[u8], salt: &[u8], rep: usize, olen: usize, k: &mut [u8]) {
let mut d = olen / sha;
if olen % sha != 0 {
d += 1
}
let mut f: [u8; 64] = [0; 64];
let mut u: [u8; 64] = [0; 64];
let mut ku: [u8; 64] = [0; 64];
let mut s: [u8; 36] = [0; 36]; // Maximum salt of 32 bytes + 4
let mut n: [u8; 4] = [0; 4];
let sl = salt.len();
let mut kp = 0;
for i in 0..d {
for j in 0..sl {
s[j] = salt[j]
}
inttobytes(i + 1, &mut n);
for j in 0..4 {
s[sl + j] = n[j]
}
hmac1(hash, sha, &mut f, sha, &s[0..sl + 4], pass);
for j in 0..sha {
u[j] = f[j]
}
for _ in 1..rep {
hmac1(hash, sha, &mut ku, sha, &u, pass);
for m in 0..sha {
u[m] = ku[m];
f[m] ^= u[m]
}
}
for j in 0..sha {
if kp < olen {
k[kp] = f[j]
}
kp += 1
}
}
}
fn blksize(hash: usize, sha: usize) -> usize {
let mut lb=0;
if hash == MC_SHA2 {
lb=64;
if sha > 32 {
lb=128;
}
}
if hash == MC_SHA3 {
lb=200-2*sha;
}
lb
}
/* Calculate HMAC of m using key k. HMAC is tag of length olen (which is length of tag) */
pub fn hmac1(hash: usize, sha: usize, tag: &mut [u8], olen: usize, k: &[u8], m: &[u8]) -> bool {
/* Input is from an octet m *
* olen is requested output length in bytes. k is the key *
* The output is the calculated tag */
let mut b: [u8; 64] = [0; 64]; /* Not good */
let mut k0: [u8; 128] = [0; 128];
let lb=blksize(hash,sha);
if lb == 0 {
return false;
}
for i in 0..lb {
k0[i] = 0
}
if k.len() > lb {
SPhashit(hash,sha,&mut b,Some(k));
//GPhashit(hash, sha, &mut b,0,0,k, 0, None);
for i in 0..sha {
k0[i] = b[i]
}
} else {
for i in 0..k.len() {
k0[i] = k[i]
}
}
for i in 0..lb {
k0[i] ^= 0x36
}
GPhashit(hash, sha, &mut b,0,0,Some(&k0[0..lb]), -1, Some(m));
for i in 0..lb {
k0[i] ^= 0x6a
}
GPhashit(hash, sha, tag,olen,0,Some(&k0[0..lb]), -1, Some(&b[0..sha]));
true
}
pub fn hkdf_extract(hash: usize, hlen: usize, prk: &mut [u8],salt: Option<&[u8]>,ikm: &[u8]) {
if let Some(x)=salt {
hmac1(hash,hlen,prk,hlen,x,ikm);
} else {
let h: [u8; 64] = [0; 64];
hmac1(hash,hlen,prk,hlen,&h[0..hlen],ikm);
}
}
pub fn hkdf_expand(hash: usize, hlen: usize, okm: &mut [u8], olen: usize, prk: &[u8], info: &[u8]) {
let n=olen/hlen;
let flen=olen%hlen;
let mut t: [u8; 1024] = [0; 1024]; // >= info.length+hlen+1
let mut k: [u8; 64] = [0; 64];
let mut l=0;
let mut m=0;
for i in 1..=n {
for j in 0..info.len() {
t[l]=info[j]; l+=1;
}
t[l]=i as u8; l+=1;
hmac1(hash,hlen,&mut k,hlen,prk,&t[0..l]);
l=0;
for j in 0..hlen {
okm[m]=k[j]; m+=1;
t[l]=k[j]; l+=1;
}
}
if flen>0 {
for j in 0..info.len() {
t[l]=info[j]; l+=1;
}
t[l]=(n+1) as u8; l+=1;
hmac1(hash,hlen,&mut k,flen,prk,&t[0..l]);
for j in 0..flen {
okm[m]=k[j]; m+=1;
}
}
}
fn ceil(a: usize,b: usize) -> usize {
(a-1)/b+1
}
pub fn xof_expand(hlen: usize,okm: &mut [u8],olen: usize,dst: &[u8],msg: &[u8]) {
let mut h = SHA3::new(hlen);
for i in 0..msg.len() {
h.process(msg[i]);
}
h.process(((olen >> 8) & 0xff) as u8);
h.process((olen & 0xff) as u8);
for i in 0..dst.len() {
h.process(dst[i]);
}
h.process((dst.len() & 0xff) as u8);
h.shake(okm,olen);
}
pub fn xmd_expand(hash: usize,hlen: usize,okm: &mut [u8],olen: usize,dst: &[u8],msg: &[u8]) {
let mut w:[u8; 64]=[0;64];
if dst.len() >= 256 {
GPhashit(hash, hlen, &mut w, 0, 0, Some(b"H2C-OVERSIZE-DST-"), -1, Some(&dst));
xmd_expand_short_dst(hash, hlen, okm, olen, &w[0..hlen], msg);
} else {
xmd_expand_short_dst(hash, hlen, okm, olen, dst, msg);
}
}
// Assumes dst.len() < 256.
fn xmd_expand_short_dst(hash: usize,hlen: usize,okm: &mut [u8],olen: usize,dst: &[u8],msg: &[u8]) {
let mut tmp: [u8; 260] = [0; 260];
let mut h0: [u8; 64]=[0;64];
let mut h1: [u8; 64]=[0;64];
let mut h2: [u8; 64]=[0;64];
let ell=ceil(olen,hlen);
let blk=blksize(hash,hlen);
tmp[0]=((olen >> 8) & 0xff) as u8;
tmp[1]=(olen & 0xff) as u8;
tmp[2]=0;
for j in 0..dst.len() {
tmp[3+j]=dst[j];
}
tmp[3+dst.len()]=(dst.len() & 0xff) as u8;
GPhashit(hash, hlen, &mut h0, 0, blk, Some(msg), -1, Some(&tmp[0..dst.len()+4]));
let mut k=0;
for i in 1..=ell {
for j in 0..hlen {
h1[j]^=h0[j];
h2[j]=h1[j];
}
tmp[0]=i as u8;
for j in 0..dst.len() {
tmp[1+j]=dst[j];
}
tmp[1+dst.len()]=(dst.len() & 0xff) as u8;
GPhashit(hash, hlen, &mut h1, 0, 0, Some(&h2[0..hlen]), -1, Some(&tmp[0..dst.len()+2]));
for j in 0..hlen {
okm[k]=h1[j];
k+=1;
if k==olen {
break;
}
}
}
}
/* Mask Generation Function */
pub fn mgf1(sha: usize, z: &[u8], olen: usize, k: &mut [u8]) {
let hlen = sha;
let mut j = 0;
for i in 0..k.len() {
k[i] = 0
}
let mut cthreshold = olen / hlen;
if olen % hlen != 0 {
cthreshold += 1
}
for counter in 0..cthreshold {
let mut b: [u8; 64] = [0; 64];
GPhashit(MC_SHA2,sha,&mut b,0,0,Some(z),counter as isize,None);
//hashit(sha, Some(z), counter as isize, &mut b);
if j + hlen > olen {
for i in 0..(olen % hlen) {
k[j] = b[i];
j += 1
}
} else {
for i in 0..hlen {
k[j] = b[i];
j += 1
}
}
}
}
pub fn mgf1xor(sha: usize, z: &[u8], olen: usize, k: &mut [u8]) {
let hlen = sha;
let mut j = 0;
let mut cthreshold = olen / hlen;
if olen % hlen != 0 {
cthreshold += 1
}
for counter in 0..cthreshold {
let mut b: [u8; 64] = [0; 64];
GPhashit(MC_SHA2,sha,&mut b,0,0,Some(z),counter as isize,None);
if j + hlen > olen {
for i in 0..(olen % hlen) {
k[j] ^= b[i];
j += 1
}
} else {
for i in 0..hlen {
k[j] ^= b[i];
j += 1
}
}
}
}
// PKCS 1.5
/* SHAXXX identifier strings */
const SHA256ID: [u8; 19] = [
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
0x00, 0x04, 0x20,
];
const SHA384ID: [u8; 19] = [
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05,
0x00, 0x04, 0x30,
];
const SHA512ID: [u8; 19] = [
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
0x00, 0x04, 0x40,
];
pub fn pkcs15(sha: usize, m: &[u8], w: &mut [u8],rfs: usize) -> bool {
let olen = rfs;
let hlen = sha;
let idlen = 19;
let mut b: [u8; 64] = [0; 64]; /* Not good */
if olen < idlen + hlen + 10 {
return false;
}
SPhashit(MC_SHA2,sha,&mut b,Some(m));
for i in 0..w.len() {
w[i] = 0
}
let mut i = 0;
w[i] = 0;
i += 1;
w[i] = 1;
i += 1;
for _ in 0..olen - idlen - hlen - 3 {
w[i] = 0xff;
i += 1
}
w[i] = 0;
i += 1;
if hlen == SHA256 {
for j in 0..idlen {
w[i] = SHA256ID[j];
i += 1
}
}
if hlen == SHA384 {
for j in 0..idlen {
w[i] = SHA384ID[j];
i += 1
}
}
if hlen == SHA512 {
for j in 0..idlen {
w[i] = SHA512ID[j];
i += 1
}
}
for j in 0..hlen {
w[i] = b[j];
i += 1
}
true
}
// Alternate PKCS 1.5
/* SHAXXX identifier strings */
const SHA256IDB: [u8; 17] = [
0x30, 0x2f, 0x30, 0x0b, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x04, 0x20];
const SHA384IDB: [u8; 17] = [
0x30, 0x3f, 0x30, 0x0b, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x04, 0x30];
const SHA512IDB: [u8; 17] = [
0x30, 0x4f, 0x30, 0x0b, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x04, 0x40];
pub fn pkcs15b(sha: usize, m: &[u8], w: &mut [u8],rfs: usize) -> bool {
let olen = rfs;
let hlen = sha;
let idlen = 17;
let mut b: [u8; 64] = [0; 64]; /* Not good */
if olen < idlen + hlen + 10 {
return false;
}
SPhashit(MC_SHA2,sha,&mut b,Some(m));
for i in 0..w.len() {
w[i] = 0
}
let mut i = 0;
w[i] = 0;
i += 1;
w[i] = 1;
i += 1;
for _ in 0..olen - idlen - hlen - 3 {
w[i] = 0xff;
i += 1
}
w[i] = 0;
i += 1;
if hlen == SHA256 {
for j in 0..idlen {
w[i] = SHA256IDB[j];
i += 1
}
}
if hlen == SHA384 {
for j in 0..idlen {
w[i] = SHA384IDB[j];
i += 1
}
}
if hlen == SHA512 {
for j in 0..idlen {
w[i] = SHA512IDB[j];
i += 1
}
}
for j in 0..hlen {
w[i] = b[j];
i += 1
}
true
}
pub fn pss_encode(sha: usize, m: &[u8], rng: &mut RAND, f: &mut [u8], rfs: usize) -> bool {
let emlen=rfs;
let embits=8*emlen-1;
let hlen=sha;
let mut h:[u8;64]=[0;64];
let mut salt: [u8; 64] = [0; 64];
let mut md: [u8;136]=[0;136];
for i in 0..hlen {
salt[i] = rng.getbyte()
}
let mask=(0xff as u8)>> (8*emlen-embits);
SPhashit(MC_SHA2,sha,&mut h,Some(m));
if emlen<hlen+hlen+2 {
return false;
}
for i in 0..8 {
md[i]=0;
}
for i in 0..hlen {
md[8+i]=h[i];
}
for i in 0..hlen {
md[8+hlen+i]=salt[i];
}
SPhashit(MC_SHA2,sha,&mut h,Some(&md[0..8+hlen+hlen]));
for i in 0..emlen-hlen-hlen-2 {
f[i]=0;
}
f[emlen-hlen-hlen-2]=0x01;
for i in 0..hlen {
f[emlen+i-hlen-hlen-1]=salt[i];
}
mgf1xor(sha,&h[0..hlen],emlen-hlen-1,f);
f[0]&=mask;
for i in 0..hlen {
f[emlen+i-hlen-1]=h[i];
}
f[emlen-1]=0xbc as u8;
true
}
pub fn pss_verify(sha: usize, m: &[u8],f: &[u8]) -> bool {
let emlen=f.len();
let embits=8*emlen-1;
let hlen=sha;
let mut db:[u8;512]=[0;512];
let mut hmask:[u8;64]=[0;64];
let mut h:[u8;64]=[0;64];
let mut salt: [u8; 64] = [0; 64];
let mut md: [u8;136]=[0;136];
let mask=(0xff as u8)>> (8*emlen-embits);
SPhashit(MC_SHA2,sha,&mut hmask,Some(m));
if emlen<hlen+hlen+2 {
return false;
}
if f[emlen-1]!=0xbc as u8 {
return false
}
if (f[0]&(!mask))!=0 {
return false
}
for i in 0..emlen-hlen-1 {
db[i]=f[i]
}
for i in 0..hlen {
h[i]=f[emlen+i-hlen-1]
}
mgf1xor(sha,&h[0..hlen],emlen-hlen-1,&mut db);
db[0]&=mask;
let mut k=0 as u8;
for i in 0..emlen-hlen-hlen-2 {
k|=db[i]
}
if k!=0 {
return false
}
if db[emlen-hlen-hlen-2]!=0x01 {
return false
}
for i in 0..hlen {
salt[i]=db[emlen+i-hlen-hlen-1]
}
for i in 0..8 {
md[i]=0
}
for i in 0..hlen {
md[8+i]=hmask[i]
}
for i in 0..hlen {
md[8+hlen+i]=salt[i]
}
SPhashit(MC_SHA2,sha,&mut hmask,Some(&md[0..8+hlen+hlen]));
k=0;
for i in 0..hlen {
k|=h[i]-hmask[i];
}
if k!=0 {
return false;
}
true
}
/* OAEP Message Encoding for Encryption */
pub fn oaep_encode(sha: usize, m: &[u8], rng: &mut RAND, p: Option<&[u8]>, f: &mut [u8], rfs: usize) -> bool {
let olen = rfs - 1;
let mlen = m.len();
let hlen = sha;
let mut seed: [u8; 64] = [0; 64];
let seedlen = hlen;
if mlen > olen - hlen - seedlen - 1 {
return false;
}
let mut dbmask: [u8; 512] = [0; 512];
SPhashit(MC_SHA2,sha,f,p);
//hashit(sha, p, -1, f);
let slen = olen - mlen - hlen - seedlen - 1;
for i in 0..slen {
f[hlen + i] = 0
}
f[hlen + slen] = 1;
for i in 0..mlen {
f[hlen + slen + 1 + i] = m[i]
}
for i in 0..seedlen {
seed[i] = rng.getbyte()
}
mgf1(sha, &seed[0..seedlen], olen - seedlen, &mut dbmask);
for i in 0..olen - seedlen {
dbmask[i] ^= f[i]
}
mgf1(sha, &dbmask[0..olen - seedlen], seedlen, f);
for i in 0..seedlen {
f[i] ^= seed[i]
}
for i in 0..olen - seedlen {
f[i + seedlen] = dbmask[i]
}
/* pad to length rfs */
let d = 1;
for i in (d..rfs).rev() {
f[i] = f[i - d];
}
for i in (0..d).rev() {
f[i] = 0;
}
true
}
/* OAEP Message Decoding for Decryption */
pub fn oaep_decode(sha: usize, p: Option<&[u8]>, f: &mut [u8],rfs :usize) -> usize {
let olen = rfs - 1;
let hlen = sha;
let mut seed: [u8; 64] = [0; 64];
let seedlen = hlen;
let mut chash: [u8; 64] = [0; 64];
if olen < seedlen + hlen + 1 {
return 0;
}
let mut dbmask: [u8; 512] = [0; 512];
if f.len() < rfs {
let d = rfs - f.len();
for i in (d..rfs).rev() {
f[i] = f[i - d];
}
for i in (0..d).rev() {
f[i] = 0;
}
}
SPhashit(MC_SHA2,sha,&mut chash,p);
//hashit(sha, p, -1, &mut chash);
let x = f[0];
for i in seedlen..olen {
dbmask[i - seedlen] = f[i + 1];
}
mgf1(sha, &dbmask[0..olen - seedlen], seedlen, &mut seed);
for i in 0..seedlen {
seed[i] ^= f[i + 1]
}
mgf1(sha, &seed[0..seedlen], olen - seedlen, f);
for i in 0..olen - seedlen {
dbmask[i] ^= f[i]
}
let mut comp=0;
for i in 0..hlen {
comp |= (chash[i]^dbmask[i]) as usize;
}
for i in 0..olen - seedlen - hlen {
dbmask[i] = dbmask[i + hlen]
}
for i in 0..hlen {
seed[i] = 0;
chash[i] = 0
}
// find first non-zero t in array
let mut k=0;
let mut t=0;
let m=olen-seedlen-hlen;
for i in 0..m {
if t==0 && dbmask[i]!=0 {
k=i;
t=dbmask[i];
}
}
if comp!=0 || x != 0 || t != 0x01 {
for i in 0..olen - seedlen {
dbmask[i] = 0
}
return 0;
}
for i in 0..m - k - 1 {
f[i] = dbmask[i + k + 1];
}
for i in 0..olen - seedlen {
dbmask[i] = 0
}
m - k - 1
}
/*
use core::sha3;
use core::hmac;
let mut okm: [u8;100]=[0;100];
let msg: &[u8] = b"abc";
let dst: &[u8] = b"P256_XMD:SHA-256_SSWU_RO_TESTGEN";
hmac::xof_expand(sha3::SHAKE128,&mut okm,48,&dst,&msg);
print!("okm= "); printbinary(&okm[0..48]);
hmac::xmd_expand(hmac::MC_SHA2,32,&mut okm,48,&dst,&msg);
print!("okm= "); printbinary(&okm[0..48]);
let mut ikm: [u8;22]=[0;22];
let mut salt: [u8;13]=[0;13];
let mut info: [u8;10]=[0;10];
let mut prk: [u8;32]=[0;32];
let mut okm: [u8;42]=[0;42];
for i in 0..22 {ikm[i]=0x0b;}
for i in 0..13 {salt[i]=i as u8;}
for i in 0..10 {info[i]=(0xf0+i) as u8;}
hmac::hkdf_extract(hmac::MC_SHA2,32,&mut prk,Some(&salt),&ikm);
print!("PRK= ");
for i in 0..32 {
print!("{:02X}",prk[i]);
}
hmac::hkdf_expand(hmac::MC_SHA2,32,&mut okm,42,&prk,&info);
print!("OKM= ");
for i in 0..42 {
print!("{:02X}",okm[i]);
}
*/

View File

@ -0,0 +1,705 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Kyber API high-level functions. Constant time where it matters. Slow (spends nearly all of its time running SHA3) but small.
M.Scott 06/07/2022
*/
use crate::sha3;
use crate::sha3::SHA3;
const LGN: usize = 8;
const DEGREE: usize = 1<<LGN;
const PRIME: i16 = 0xD01;
const ONE: i16 = 0x549; // r mod q
const QINV: i32 = -3327; // -1/q mod 2^16
//const TWO26: i32 = 1<<26; // 2^26
const TWO25: i32 = 1<<25; // 2^25
const BARC: i32 = 20159; // ((TWO26 + PRIME/2)/PRIME)
pub const SECRET_CPA_SIZE_512: usize=2*(DEGREE*3)/2;
pub const PUBLIC_SIZE_512: usize=32+2*(DEGREE*3)/2;
pub const CIPHERTEXT_SIZE_512: usize= (10*2+4)*DEGREE/8;
pub const SECRET_CCA_SIZE_512: usize=SECRET_CPA_SIZE_512+PUBLIC_SIZE_512+64;
pub const SHARED_SECRET_512: usize=32;
pub const SECRET_CPA_SIZE_768: usize=3*(DEGREE*3)/2;
pub const PUBLIC_SIZE_768: usize=32+3*(DEGREE*3)/2;
pub const CIPHERTEXT_SIZE_768: usize= (10*3+4)*DEGREE/8;
pub const SECRET_CCA_SIZE_768: usize=SECRET_CPA_SIZE_768+PUBLIC_SIZE_768+64;
pub const SHARED_SECRET_768: usize=32;
pub const SECRET_CPA_SIZE_1024: usize=4*(DEGREE*3)/2;
pub const PUBLIC_SIZE_1024: usize=32+4*(DEGREE*3)/2;
pub const CIPHERTEXT_SIZE_1024: usize= (11*4+5)*DEGREE/8;
pub const SECRET_CCA_SIZE_1024: usize=SECRET_CPA_SIZE_1024+PUBLIC_SIZE_1024+64;
pub const SHARED_SECRET_1024: usize=32;
pub const MAXK:usize = 4;
// parameters for each security level
// K,eta1,eta2,du,dv,shared secret
const PARAMS_512: [usize;6] = [2,3,2,10,4,32];
const PARAMS_768: [usize;6] = [3,2,2,10,4,32];
const PARAMS_1024: [usize;6] = [4,2,2,11,5,32];
/* Start of public domain reference implementation code - translated from https://github.com/pq-crystals/kyber */
const ZETAS: [i16; 128] = [
-1044, -758, -359, -1517, 1493, 1422, 287, 202,
-171, 622, 1577, 182, 962, -1202, -1474, 1468,
573, -1325, 264, 383, -829, 1458, -1602, -130,
-681, 1017, 732, 608, -1542, 411, -205, -1571,
1223, 652, -552, 1015, -1293, 1491, -282, -1544,
516, -8, -320, -666, -1618, -1162, 126, 1469,
-853, -90, -271, 830, 107, -1421, -247, -951,
-398, 961, -1508, -725, 448, -1065, 677, -1275,
-1103, 430, 555, 843, -1251, 871, 1550, 105,
422, 587, 177, -235, -291, -460, 1574, 1653,
-246, 778, 1159, -147, -777, 1483, -602, 1119,
-1590, 644, -872, 349, 418, 329, -156, -75,
817, 1097, 603, 610, 1322, -1285, -1465, 384,
-1215, -136, 1218, -1335, -874, 220, -1187, -1659,
-1185, -1530, -1278, 794, -1510, -854, -870, 478,
-108, -308, 996, 991, 958, -1460, 1522, 1628
];
/*
fn printbinary(array: &[u8]) {
for i in 0..array.len() {
print!("{:02X}", array[i])
}
println!("")
}
*/
/* Montgomery stuff */
fn montgomery_reduce(a: i32) -> i16 {
let dp=PRIME as i32;
let dt=(((a&0xffff)*QINV)&0xffff) as i16;
let t=((a-((dt as i32)*dp))>>16) as i16;
return t;
}
fn barrett_reduce(a: i16) -> i16 {
let da=a as i32;
let mut t=((BARC*da + TWO25) >> 26) as i16;
t*=PRIME;
return a-t;
}
fn fqmul(a: i16, b: i16) -> i16 {
return montgomery_reduce((a as i32)*(b as i32));
}
fn ntt(r: &mut [i16]) {
let mut k=1;
let mut len=128;
while len>=2 {
let mut start=0;
while start<256 {
let zeta=ZETAS[k]; k+=1;
let mut j=start;
while j<start+len {
let t=fqmul(zeta,r[j+len]);
r[j+len]=r[j]-t;
r[j] += t;
j+=1;
}
start = j+len
}
len >>= 1;
}
}
fn invntt(r: &mut [i16]) {
let f=1441 as i16;
let mut k=127;
let mut len=2;
while len<=128 {
let mut start=0;
while start<256 {
let zeta=ZETAS[k]; k-=1;
let mut j=start;
while j<start+len {
let t=r[j];
r[j]=barrett_reduce(t+r[j+len]); // problem here
r[j+len] -= t;
r[j+len]=fqmul(zeta,r[j+len]);
j+=1;
}
start=j+len;
}
len<<=1;
}
for j in 0..256 {
r[j]=fqmul(r[j],f);
}
}
fn basemul(r: &mut [i16],a: &[i16],b: &[i16],zeta: i16) {
r[0]=fqmul(a[1],b[1]);
r[0]=fqmul(r[0],zeta);
r[0]+=fqmul(a[0],b[0]);
r[1]=fqmul(a[0],b[1]);
r[1]+=fqmul(a[1],b[0]);
}
fn poly_reduce(r: &mut [i16]) {
for i in 0..DEGREE {
r[i]=barrett_reduce(r[i]);
}
}
fn poly_ntt(r: &mut [i16]) {
ntt(r);
poly_reduce(r);
}
fn poly_invntt(r: &mut [i16]) {
invntt(r);
}
// Note r must be distinct from a and b
fn poly_mul(r: &mut [i16],a: &[i16],b: &[i16]) {
for i in 0..DEGREE/4 {
let x=4*i; let y=x+2; let z=x+4;
basemul(&mut r[x..y],&a[x..y],&b[x..y],ZETAS[64+i]);
basemul(&mut r[y..z],&a[y..z],&b[y..z],-ZETAS[64+i]);
}
}
fn poly_tomont(r: &mut [i16]) {
for i in 0..DEGREE {
r[i]=montgomery_reduce((r[i] as i32)*(ONE as i32));
}
}
/* End of public domain reference code use */
fn poly_add(p1: &mut [i16],p2: &[i16],p3: &[i16]) {
for i in 0..DEGREE {
p1[i] = p2[i]+p3[i];
}
}
fn poly_acc(p1: &mut [i16],p3: &[i16]) {
for i in 0..DEGREE {
p1[i] += p3[i];
}
}
fn poly_dec(p1: &mut [i16],p3: &[i16]) {
for i in 0..DEGREE {
p1[i] -= p3[i];
}
}
// Generate a[i][j] from rho
fn expandaij(rho: &[u8],aij: &mut [i16],i:usize,j:usize) {
let mut buff: [u8; 3*DEGREE] = [0; 3*DEGREE];
let mut sh = SHA3::new(sha3::SHAKE128);
for m in 0..32 {
sh.process(rho[m])
}
sh.process(j as u8);
sh.process(i as u8);
sh.shake(&mut buff, 3*DEGREE);
let mut m=0;
let mut n=0;
let dp = PRIME as u32;
while n<DEGREE {
let d1=(buff[m] as u32) + 256*((buff[m+1]&0x0f) as u32);
let d2=((buff[m+1]/16) as u32) + 16*(buff[m+2] as u32);
if d1<dp {
aij[n]=d1 as i16; n+=1;
}
if d2<dp && n<DEGREE {
aij[n]=d2 as i16; n+=1;
}
m+=3;
}
}
fn getbit(b: &[u8],n: usize) -> i16 {
let wd=n/8;
let bt=n%8;
return ((b[wd]>>bt)&1) as i16;
}
fn cbd(bts: &[u8],eta: usize,f: &mut [i16]) {
for i in 0..DEGREE {
let mut a=0 as i16;
let mut b=0 as i16;
for j in 0..eta {
a+=getbit(bts,2*i*eta+j);
b+=getbit(bts,2*i*eta+eta+j);
}
f[i] = a-b;
}
}
// extract ab bits into word from dense byte stream
fn nextword(ab: usize,t: &[u8],ptr: &mut usize,bts: &mut usize) -> i16 {
let mut r=(t[*ptr]>>(*bts)) as i16;
let mask=((1<<ab)-1) as i16;
let mut i=0;
let mut gotbits=8-(*bts); // bits left in current byte
while gotbits<ab {
i+=1;
let w=t[(*ptr)+i] as i16;
r |= w<<gotbits;
gotbits+=8;
}
*bts += ab;
while *bts>=8 {
*bts -= 8;
*ptr += 1;
}
return r&mask;
}
fn nextbyte16(ab: usize,t: &[i16],ptr: &mut usize,bts: &mut usize) -> u8 {
let mut left=ab-(*bts);
let mut i=0;
let mut w=t[*ptr]; w+=(w>>15)&PRIME;
let mut r=w>>(*bts);
while left<8 {
i+=1;
w=t[(*ptr)+i]; w+=(w>>15)&PRIME;
r|=w<<left;
left += ab;
}
*bts += 8;
while *bts>=ab {
*bts -= ab;
*ptr += 1;
}
return (r&0xff) as u8;
}
fn encode(t: &[i16],len: usize,l: usize,pack: &mut [u8]) {
let mut ptr=0;
let mut bts=0;
for n in 0..len*(DEGREE*l)/8 {
pack[n]=nextbyte16(l,t,&mut ptr, &mut bts);
}
}
// return 0 if encoding is unchanged
fn chk_encode(t: &[i16],len: usize,l: usize,pack: &[u8]) -> u8 {
let mut ptr=0;
let mut bts=0;
let mut diff=0 as u8;
for n in 0..len*(DEGREE*l)/8 {
let m=nextbyte16(l,t,&mut ptr, &mut bts);
diff|=m^pack[n];
}
return diff;
}
fn decode(pack: &[u8],l: usize,t: &mut [i16],len: usize) {
let mut ptr=0;
let mut bts=0;
for i in 0..len*DEGREE {
t[i]=nextword(l,pack,&mut ptr,&mut bts);
}
}
fn compress(t: &mut [i16],len:usize,d:usize) {
let twod=(1<<d) as i32;
let dp=PRIME as i32;
for i in 0..len*DEGREE {
t[i]+=(t[i]>>15)&PRIME;
t[i]=(((twod*(t[i] as i32) + dp/2)/dp)&(twod-1)) as i16;
}
}
fn decompress(t: &mut [i16],len:usize,d:usize) {
let twod1=(1<<(d-1)) as i32;
let dp=PRIME as i32;
for i in 0..len*DEGREE {
t[i]=((dp*(t[i] as i32)+twod1)>>d) as i16;
}
}
fn cpa_keypair(params: &[usize],tau: &[u8],sk: &mut [u8],pk: &mut [u8]) {
let mut rho:[u8;32]=[0;32];
let mut sigma:[u8;33]=[0;33];
let mut buff:[u8;256]=[0;256];
let mut r:[i16;DEGREE]=[0;DEGREE];
let mut w:[i16;DEGREE]=[0;DEGREE];
let mut aij:[i16;DEGREE]=[0;DEGREE];
let mut s:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let mut e:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let mut p:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let mut sh = SHA3::new(sha3::HASH512);
let ck=params[0];
let eta1=params[1];
let public_key_size=32+ck*(DEGREE*3)/2;
for i in 0..32 {
sh.process(tau[i]);
}
sh.hash(&mut buff);
for i in 0..32 {
rho[i]=buff[i];
sigma[i]=buff[i+32];
}
sigma[32]=0;
// create s
for i in 0..ck {
sh=SHA3::new(sha3::SHAKE256);
for j in 0..33 {
sh.process(sigma[j]);
}
sh.shake(&mut buff,64*eta1);
cbd(&buff,eta1,&mut s[i*DEGREE..]);
sigma[32] += 1;
}
// create e
for i in 0..ck {
sh=SHA3::new(sha3::SHAKE256);
for j in 0..33 {
sh.process(sigma[j]);
}
sh.shake(&mut buff,64*eta1);
cbd(&buff,eta1,&mut e[i*DEGREE..]);
sigma[32] += 1;
}
for k in 0..ck {
let row=k*DEGREE;
poly_ntt(&mut s[row..]);
poly_ntt(&mut e[row..]);
}
for i in 0..ck {
let row=i*DEGREE;
expandaij(&rho,&mut aij,i,0);
poly_mul(&mut r,&aij,&s);
for j in 1..ck {
expandaij(&rho,&mut aij,i,j);
poly_mul(&mut w,&s[j*DEGREE..],&aij);
poly_acc(&mut r,&w);
}
poly_reduce(&mut r);
poly_tomont(&mut r);
poly_add(&mut p[row..],&r,&e[row..]);
poly_reduce(&mut p[row..]);
}
encode(&s,ck,12,sk);
encode(&p,ck,12,pk);
for i in 0..32 {
pk[public_key_size-32+i]=rho[i];
}
}
fn cpa_base_encrypt(params: &[usize],coins: &[u8],pk: &[u8],ss: &[u8],u: &mut [i16],v: &mut [i16]) {
let mut rho:[u8;32]=[0;32];
let mut sigma:[u8;33]=[0;33];
let mut buff:[u8;256]=[0;256];
let mut r:[i16;DEGREE]=[0;DEGREE];
let mut w:[i16;DEGREE]=[0;DEGREE];
let mut aij:[i16;DEGREE]=[0;DEGREE];
let mut q:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let mut p:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let ck=params[0];
let eta1=params[1];
let eta2=params[2];
let du=params[3];
let dv=params[4];
let public_key_size=32+ck*(DEGREE*3)/2;
for i in 0..32 {
sigma[i]=coins[i];
}
sigma[32]=0;
for i in 0..32 {
rho[i]=pk[i+public_key_size-32];
}
// create q
for i in 0..ck {
let mut sh=SHA3::new(sha3::SHAKE256);
for j in 0..33 {
sh.process(sigma[j]);
}
sh.shake(&mut buff,64*eta1);
cbd(&buff,eta1,&mut q[i*DEGREE..]);
sigma[32] += 1;
}
// create e1
for i in 0..ck {
let mut sh=SHA3::new(sha3::SHAKE256);
for j in 0..33 {
sh.process(sigma[j]);
}
sh.shake(&mut buff,64*eta2);
cbd(&buff,eta1,&mut u[i*DEGREE..]);
sigma[32] += 1;
}
for i in 0..ck {
let row=DEGREE*i;
poly_ntt(&mut q[row..]);
}
for i in 0..ck {
let row=i*DEGREE;
expandaij(&rho,&mut aij,0,i);
poly_mul(&mut r,&aij,&q);
for j in 1..ck {
expandaij(&rho,&mut aij,j,i);
poly_mul(&mut w,&q[j*DEGREE..],&aij);
poly_acc(&mut r,&w);
}
poly_reduce(&mut r);
poly_invntt(&mut r);
poly_acc(&mut u[row..],&r);
poly_reduce(&mut u[row..]);
}
decode(&pk,12,&mut p,ck);
poly_mul(v,&p,&q);
for i in 1..ck {
let row=DEGREE*i;
poly_mul(&mut r,&p[row..],&q[row..]);
poly_acc(v,&r);
}
poly_invntt(v);
let mut sh = SHA3::new(sha3::SHAKE256);
for j in 0..33 {
sh.process(sigma[j]);
}
sh.shake(&mut buff,64*eta2);
cbd(&buff,eta1,&mut w); // e2
poly_acc(v,&w);
decode(&ss,1,&mut r,1);
decompress(&mut r,1,1);
poly_acc(v,&r);
poly_reduce(v);
compress(u,ck,du);
compress(v,1,dv);
}
fn cpa_encrypt(params: &[usize],coins: &[u8],pk: &[u8],ss: &[u8],ct: &mut [u8]) {
let mut v:[i16;DEGREE]=[0;DEGREE];
let mut u:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let ck=params[0];
let du=params[3];
let dv=params[4];
let ciphertext_size=(du*ck+dv)*DEGREE/8;
cpa_base_encrypt(params,coins,pk,ss,&mut u,&mut v);
encode(&u,ck,du,ct);
encode(&v,1,dv,&mut ct[ciphertext_size-(dv*DEGREE/8)..]);
}
// Re-encrypt and check that ct is OK (if so return is zero)
fn cpa_check_encrypt(params: &[usize],coins: &[u8],pk: &[u8],ss: &[u8],ct: &[u8]) -> u8 {
let mut v:[i16;DEGREE]=[0;DEGREE];
let mut u:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let ck=params[0];
let du=params[3];
let dv=params[4];
let ciphertext_size=(du*ck+dv)*DEGREE/8;
cpa_base_encrypt(params,coins,pk,ss,&mut u,&mut v);
let d1=chk_encode(&u,ck,du,ct);
let d2=chk_encode(&v,1,dv,&ct[ciphertext_size-(dv*DEGREE/8)..]);
if (d1|d2)==0 {
return 0;
} else {
return 0xff;
}
}
fn cpa_decrypt(params: &[usize],sk: &[u8],ct: &[u8],ss: &mut [u8]) {
let mut w:[i16;DEGREE]=[0;DEGREE];
let mut v:[i16;DEGREE]=[0;DEGREE];
let mut r:[i16;DEGREE]=[0;DEGREE];
let mut u:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let mut s:[i16;MAXK*DEGREE]=[0;MAXK*DEGREE];
let ck=params[0];
let du=params[3];
let dv=params[4];
decode(ct,du,&mut u,ck);
decode(&ct[(du*ck*DEGREE)/8..],dv,&mut v,1);
decompress(&mut u,ck,du);
decompress(&mut v,1,dv);
decode(sk,12,&mut s,ck);
poly_ntt(&mut u);
poly_mul(&mut w,&u,&s);
for i in 1..ck {
let row=DEGREE*i;
poly_ntt(&mut u[row..]);
poly_mul(&mut r,&u[row..],&s[row..]);
poly_acc(&mut w,&r);
}
poly_reduce(&mut w);
poly_invntt(&mut w);
poly_dec(&mut v,&w);
compress(&mut v,1,1);
encode(&v,1,1,ss);
}
fn cca_keypair(params: &[usize],randbytes64: &[u8],sk: &mut [u8],pk: &mut [u8]) {
let ck=params[0];
let secret_cpa_key_size=ck*(DEGREE*3)/2;
let public_key_size=32+ck*(DEGREE*3)/2;
cpa_keypair(params,randbytes64,sk,pk);
for i in 0..public_key_size {
sk[i+secret_cpa_key_size]=pk[i];
}
let mut sh = SHA3::new(sha3::HASH256);
for i in 0..public_key_size {
sh.process(pk[i]);
}
sh.hash(&mut sk[secret_cpa_key_size+public_key_size..]);
for i in 0..32 {
sk[i+secret_cpa_key_size+public_key_size+32]=randbytes64[i+32];
}
}
fn cca_encrypt(params: &[usize],randbytes32: &[u8],pk: &[u8],ss: &mut [u8],ct: &mut [u8]) {
let mut hm:[u8;32]=[0;32];
let mut h:[u8;32]=[0;32];
let mut g:[u8;64]=[0;64];
let ck=params[0];
let du=params[3];
let dv=params[4];
let public_key_size=32+ck*(DEGREE*3)/2;
let ciphertext_size=(du*ck+dv)*DEGREE/8;
let shared_secret_size=params[5];
let mut sh = SHA3::new(sha3::HASH256);
for i in 0..32 {
sh.process(randbytes32[i]);
}
sh.hash(&mut hm);
sh = SHA3::new(sha3::HASH256);
for i in 0..public_key_size {
sh.process(pk[i]);
}
sh.hash(&mut h);
sh = SHA3::new(sha3::HASH512);
sh.process_array(&hm);
sh.process_array(&h);
sh.hash(&mut g);
cpa_encrypt(params,&g[32..],&pk,&hm,ct);
sh = SHA3::new(sha3::HASH256);
for i in 0..ciphertext_size {
sh.process(ct[i]);
}
sh.hash(&mut h);
sh = SHA3::new(sha3::SHAKE256);
sh.process_array(&g[0..32]);
sh.process_array(&h);
sh.shake(ss,shared_secret_size);
}
fn cca_decrypt(params: &[usize],sk: &[u8],ct: &[u8],ss: &mut [u8]) {
let mut m:[u8;32]=[0;32];
let mut g:[u8;64]=[0;64];
let ck=params[0];
let secret_cpa_key_size=ck*(DEGREE*3)/2;
let public_key_size=32+ck*(DEGREE*3)/2;
let shared_secret_size=params[5];
let pk=&sk[secret_cpa_key_size..secret_cpa_key_size+public_key_size];
let h=&sk[secret_cpa_key_size+public_key_size..secret_cpa_key_size+public_key_size+32];
let z=&sk[secret_cpa_key_size+public_key_size+32..secret_cpa_key_size+public_key_size+64];
cpa_decrypt(params,sk,ct,&mut m);
let mut sh = SHA3::new(sha3::HASH512);
sh.process_array(&m);
sh.process_array(h);
sh.hash(&mut g);
let mask=cpa_check_encrypt(params,&g[32..],pk,&m,ct); // FO check ct is correct
for i in 0..32 {
g[i]^=(g[i]^z[i])&mask;
}
sh = SHA3::new(sha3::HASH256);
sh.process_array(&ct);
sh.hash(&mut m);
sh = SHA3::new(sha3::SHAKE256);
sh.process_array(&g[0..32]);
sh.process_array(&m);
sh.shake(ss,shared_secret_size);
}
// ********************* Kyber API ******************************
pub fn keypair_512(randbytes64: &[u8],sk: &mut [u8],pk: &mut [u8]) {
cca_keypair(&PARAMS_512,randbytes64,sk,pk);
}
pub fn keypair_768(randbytes64: &[u8],sk: &mut [u8],pk: &mut [u8]) {
cca_keypair(&PARAMS_768,randbytes64,sk,pk);
}
pub fn keypair_1024(randbytes64: &[u8],sk: &mut [u8],pk: &mut [u8]) {
cca_keypair(&PARAMS_1024,randbytes64,sk,pk);
}
pub fn encrypt_512(randbytes32: &[u8],pk: &[u8],ss: &mut [u8],ct: &mut [u8]) {
cca_encrypt(&PARAMS_512,randbytes32,pk,ss,ct);
}
pub fn encrypt_768(randbytes32: &[u8],pk: &[u8],ss: &mut [u8],ct: &mut [u8]) {
cca_encrypt(&PARAMS_768,randbytes32,pk,ss,ct);
}
pub fn encrypt_1024(randbytes32: &[u8],pk: &[u8],ss: &mut [u8],ct: &mut [u8]) {
cca_encrypt(&PARAMS_1024,randbytes32,pk,ss,ct);
}
pub fn decrypt_512(sk: &[u8],ct: &[u8],ss: &mut [u8]) {
cca_decrypt(&PARAMS_512,sk,ct,ss);
}
pub fn decrypt_768(sk: &[u8],ct: &[u8],ss: &mut [u8]) {
cca_decrypt(&PARAMS_768,sk,ct,ss);
}
pub fn decrypt_1024(sk: &[u8],ct: &[u8],ss: &mut [u8]) {
cca_decrypt(&PARAMS_1024,sk,ct,ss);
}

486
crates/bls48581/src/lib.rs Normal file
View File

@ -0,0 +1,486 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#![allow(clippy::many_single_char_names)]
#![allow(clippy::needless_range_loop)]
#![allow(clippy::manual_memcpy)]
#![allow(clippy::new_without_default)]
pub mod bls48581;
pub mod bls;
pub mod arch;
pub mod rand;
pub mod hmac;
pub mod hash256;
pub mod hash384;
pub mod hash512;
pub mod sha3;
use std::error::Error;
use bls48581::big;
use bls48581::ecp;
use bls48581::ecp8;
use bls48581::rom;
use bls48581::pair8;
uniffi::include_scaffolding!("lib");
fn recurse_fft(
values: &[big::BIG],
offset: u64,
stride: u64,
roots_stride: u64,
out: &mut [big::BIG],
fft_width: u64,
inverse: bool,
) {
let roots = if inverse {
&bls::singleton().ReverseRootsOfUnityBLS48581[&fft_width]
} else {
&bls::singleton().RootsOfUnityBLS48581[&fft_width]
};
if out.len() <= 16 {
let l = out.len() as u64;
for i in 0..l {
let mut last = big::BIG::modmul(
&values[offset as usize],
&roots[0],
&big::BIG::new_ints(&rom::CURVE_ORDER),
);
for j in 1..l {
let mid = big::BIG::modmul(
&values[(offset + j * stride) as usize],
&roots[((i * j) % l) as usize * roots_stride as usize],
&big::BIG::new_ints(&rom::CURVE_ORDER),
);
last = big::BIG::modadd(
&last,
&mid,
&big::BIG::new_ints(&rom::CURVE_ORDER),
);
}
out[i as usize] = last;
}
return;
}
let half = (out.len() as u64) >> 1;
// slide to the left
recurse_fft(
values,
offset,
stride << 1,
roots_stride << 1,
&mut out[..half as usize],
fft_width,
inverse,
);
// slide to the right
recurse_fft(
values,
offset + stride,
stride << 1,
roots_stride << 1,
&mut out[half as usize..],
fft_width,
inverse,
);
// cha cha now, y'all
for i in 0..half {
let mul = big::BIG::modmul(
&out[(i + half) as usize],
&roots[(i * roots_stride) as usize],
&big::BIG::new_ints(&rom::CURVE_ORDER),
);
let mul_add = big::BIG::modadd(
&out[i as usize],
&mul,
&big::BIG::new_ints(&rom::CURVE_ORDER),
);
out[(i + half) as usize] = big::BIG::modadd(
&out[i as usize],
&big::BIG::modneg(&mul, &big::BIG::new_ints(&rom::CURVE_ORDER)),
&big::BIG::new_ints(&rom::CURVE_ORDER),
);
out[i as usize] = mul_add;
}
}
fn fft(
values: &[big::BIG],
fft_width: u64,
inverse: bool,
) -> Result<Vec<big::BIG>, String> {
let mut width = values.len() as u64;
if width > fft_width {
return Err("invalid width of values".into());
}
if width & (width - 1) != 0 {
width = nearest_power_of_two(width);
}
// We make a copy so we can mutate it during the work.
let mut working_values = vec![big::BIG::new(); width as usize];
for i in 0..values.len() {
working_values[i] = values[i].clone();
}
for i in values.len()..width as usize {
working_values[i] = big::BIG::new();
}
let mut out = vec![big::BIG::new(); width as usize];
let stride = fft_width / width;
if inverse {
let mut inv_len = big::BIG::new_int(width as isize);
inv_len.invmodp(&big::BIG::new_ints(&rom::CURVE_ORDER));
recurse_fft(&working_values, 0, 1, stride, &mut out, fft_width, inverse);
for i in 0..out.len() {
out[i] = big::BIG::modmul(&out[i], &inv_len, &big::BIG::new_ints(&rom::CURVE_ORDER));
}
Ok(out)
} else {
recurse_fft(&working_values, 0, 1, stride, &mut out, fft_width, inverse);
Ok(out)
}
}
fn recurse_fft_g1(
values: &[ecp::ECP],
offset: u64,
stride: u64,
roots_stride: u64,
out: &mut [ecp::ECP],
fft_width: u64,
inverse: bool,
) {
let roots = if inverse {
&bls::singleton().ReverseRootsOfUnityBLS48581[&fft_width]
} else {
&bls::singleton().RootsOfUnityBLS48581[&fft_width]
};
if out.len() <= 16 {
let l = out.len() as u64;
for i in 0..l {
let mut last = ecp::ECP::mul(&values[offset as usize].clone(), &roots[0]);
for j in 1..l {
let mid = ecp::ECP::mul(
&values[(offset + j * stride) as usize].clone(),
&roots[((i * j) % l) as usize * roots_stride as usize],
);
&last.add(&mid);
}
out[i as usize] = last.clone();
}
return;
}
let half = (out.len() as u64) >> 1;
// slide to the left
recurse_fft_g1(
values,
offset,
stride << 1,
roots_stride << 1,
&mut out[..half as usize],
fft_width,
inverse,
);
// slide to the right
recurse_fft_g1(
values,
offset + stride,
stride << 1,
roots_stride << 1,
&mut out[half as usize..],
fft_width,
inverse,
);
// cha cha now, y'all
for i in 0..half {
let mul = out[(i + half) as usize].clone().mul(&roots[(i * roots_stride) as usize].clone());
let mut mul_add = out[i as usize].clone();
mul_add.add(&mul.clone());
out[(i + half) as usize] = out[i as usize].clone();
out[(i + half) as usize].sub(&mul);
out[i as usize] = mul_add;
}
}
fn fft_g1(
values: &[ecp::ECP],
fft_width: u64,
inverse: bool,
) -> Result<Vec<ecp::ECP>, String> {
let mut width = values.len() as u64;
if width > fft_width {
return Err("invalid width of values".into());
}
if width & (width - 1) != 0 {
width = nearest_power_of_two(width);
}
let mut working_values = vec![ecp::ECP::new(); width as usize];
for i in 0..values.len() {
working_values[i] = values[i].clone();
}
for i in values.len()..width as usize {
working_values[i] = ecp::ECP::generator();
}
let mut out = vec![ecp::ECP::new(); width as usize];
let stride = fft_width / width;
if inverse {
let mut inv_len = big::BIG::new_int(width as isize);
inv_len.invmodp(&big::BIG::new_ints(&rom::CURVE_ORDER));
recurse_fft_g1(&working_values, 0, 1, stride, &mut out, fft_width, inverse);
for i in 0..out.len() {
out[i] = out[i].clone().mul(&inv_len);
}
Ok(out)
} else {
recurse_fft_g1(&working_values, 0, 1, stride, &mut out, fft_width, inverse);
Ok(out)
}
}
fn nearest_power_of_two(number: u64) -> u64 {
let mut power = 1;
while number > power {
power <<= 1;
}
power
}
fn bytes_to_polynomial(
bytes: &[u8],
) -> Vec<big::BIG> {
let size = bytes.len() / 64;
let trunc_last = bytes.len() % 64 > 0;
let mut poly = Vec::new();
for i in 0..size {
let scalar = big::BIG::frombytes(&bytes[i * 64..(i + 1) * 64]);
poly.push(scalar);
}
if trunc_last {
let scalar = big::BIG::frombytes(&bytes[size * 64..]);
poly.push(scalar);
}
return poly;
}
fn point_linear_combination(
points: &Vec<&ecp::ECP>,
scalars: &Vec<big::BIG>,
) -> Result<ecp::ECP, Box<dyn Error>> {
if points.len() != scalars.len() {
return Err(format!(
"length mismatch between arguments, points: {}, scalars: {}",
points.len(),
scalars.len(),
).into());
}
let mut result = ecp::ECP::new();
for (i, point) in points.iter().enumerate() {
let c = point.clone();
let p = c.mul(&scalars[i]);
&result.add(&p);
}
Ok(result)
}
fn verify(
commitment: &ecp::ECP,
z: &big::BIG,
y: &big::BIG,
proof: &ecp::ECP,
) -> bool {
let z2 = ecp8::ECP8::generator().mul(z);
let y1 = ecp::ECP::generator().mul(y);
let mut xz = bls::singleton().CeremonyBLS48581G2[1].clone();
xz.sub(&z2);
let mut cy = commitment.clone();
cy.sub(&y1);
cy.neg();
let mut r = pair8::initmp();
pair8::another(&mut r, &xz, &proof);
pair8::another(&mut r, &ecp8::ECP8::generator(), &cy);
let mut v = pair8::miller(&mut r);
v = pair8::fexp(&v);
return v.isunity();
}
pub fn commit_raw(
data: &[u8],
poly_size: u64,
) -> Vec<u8> {
let mut poly = bytes_to_polynomial(data);
while poly.len() < poly_size as usize {
poly.push(big::BIG::new());
}
match point_linear_combination(
&bls::singleton().FFTBLS48581[&poly_size].iter().collect(),
&poly,
) {
Ok(commit) => {
let mut b = [0u8; 74];
commit.tobytes(&mut b, true);
return b.to_vec();
}
Err(_e) => {
return [].to_vec();
}
}
}
pub fn prove_raw(
data: &[u8],
index: u64,
poly_size: u64,
) -> Vec<u8> {
let mut poly = bytes_to_polynomial(data);
while poly.len() < poly_size as usize {
poly.push(big::BIG::new());
}
let z = bls::singleton().RootsOfUnityBLS48581[&poly_size][index as usize];
match fft(
&poly,
poly_size,
true,
) {
Ok(eval_poly) => {
let mut subz = big::BIG::new_int(0);
subz = big::BIG::modadd(&subz, &big::BIG::modneg(&z, &big::BIG::new_ints(&rom::CURVE_ORDER)), &big::BIG::new_ints(&rom::CURVE_ORDER));
let mut subzinv = subz.clone();
subzinv.invmodp(&big::BIG::new_ints(&rom::CURVE_ORDER));
let mut o = big::BIG::new_int(1);
let mut oinv = o.clone();
oinv.invmodp(&big::BIG::new_ints(&rom::CURVE_ORDER));
let divisors: Vec<big::BIG> = vec![
subz,
o
];
let invdivisors: Vec<big::BIG> = vec![
subzinv,
oinv
];
let mut a: Vec<big::BIG> = eval_poly.iter().map(|x| x.clone()).collect();
// Adapted from Feist's amortized proofs:
let mut a_pos = a.len() - 1;
let b_pos = divisors.len() - 1;
let mut diff = a_pos as isize - b_pos as isize;
let mut out: Vec<big::BIG> = vec![big::BIG::new(); (diff + 1) as usize];
while diff >= 0 {
out[diff as usize] = a[a_pos].clone();
out[diff as usize] = big::BIG::modmul(&out[diff as usize], &invdivisors[b_pos], &big::BIG::new_ints(&rom::CURVE_ORDER));
for i in (0..=b_pos).rev() {
let den = &out[diff as usize].clone();
a[diff as usize + i] = a[diff as usize + i].clone();
a[diff as usize + i] = big::BIG::modadd(
&a[diff as usize + i],
&big::BIG::modneg(
&big::BIG::modmul(&den, &divisors[i], &big::BIG::new_ints(&rom::CURVE_ORDER)),
&big::BIG::new_ints(&rom::CURVE_ORDER)
),
&big::BIG::new_ints(&rom::CURVE_ORDER)
);
}
let mut b = [0u8;73];
out[diff as usize].tobytes(&mut b);
a_pos -= 1;
diff -= 1;
}
match point_linear_combination(
&bls::singleton().CeremonyBLS48581G1[..(poly_size as usize - 1)].iter().collect(),
&out,
) {
Ok(proof) => {
let mut b = [0u8; 74];
proof.tobytes(&mut b, true);
return b.to_vec();
}
Err(_e) => {
return [].to_vec();
}
}
},
Err(_e) => {
return [].to_vec();
}
}
}
pub fn verify_raw(
data: &[u8],
commit: &[u8],
index: u64,
proof: &[u8],
poly_size: u64,
) -> bool {
let z = bls::singleton().RootsOfUnityBLS48581[&poly_size][index as usize];
let y = big::BIG::frombytes(data);
let c = ecp::ECP::frombytes(commit);
let p = ecp::ECP::frombytes(proof);
return verify(
&c,
&z,
&y,
&p,
);
}
pub fn init() {
bls::singleton();
}

View File

@ -0,0 +1,6 @@
namespace bls48581 {
void init();
sequence<u8> commit_raw([ByRef] sequence<u8> data, u64 poly_size);
sequence<u8> prove_raw([ByRef] sequence<u8> data, u64 index, u64 poly_size);
boolean verify_raw([ByRef] sequence<u8> data, [ByRef] sequence<u8> commit, u64 index, [ByRef] sequence<u8> proof, u64 poly_size);
};

View File

@ -0,0 +1,3 @@
fn main() {
println!("Hello, world!");
}

709
crates/bls48581/src/nhs.rs Normal file
View File

@ -0,0 +1,709 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* NewHope Simple API high-level functions */
use crate::rand::RAND;
use crate::sha3;
use crate::sha3::SHA3;
const PRIME: i32 = 0x3001; // q in Hex
const LGN: usize = 10; // Degree n=2^LGN
const ND: u32 = 0xF7002FFF; // 1/(R-q) mod R
const ONE: i32 = 0x2AC8; // R mod q
const R2MODP: u64 = 0x1620; // R^2 mod q
const DEGREE: usize = 1 << LGN;
const WL: usize = 32;
const INV: i32 = 0xeab;
const INVPR: i32 = 0x2c2a;
const ROOTS: [i32; 1024] = [
0x2ac8, 0x2baf, 0x299b, 0x685, 0x2f04, 0x158d, 0x2d49, 0x24b5, 0x1edc, 0xab3, 0x2a95, 0x24d,
0x3cb, 0x6a8, 0x12f9, 0x15ba, 0x1861, 0x2a89, 0x1c5c, 0xbe6, 0xc1e, 0x2024, 0x207, 0x19ce,
0x2710, 0x1744, 0x18bc, 0x2cd7, 0x396, 0x18d5, 0x1c45, 0xc4, 0x21a6, 0xe03, 0x2b3c, 0x2d91,
0xc5d, 0x432, 0x1fbc, 0xcae, 0x2512, 0x2979, 0x3b2, 0x714, 0xb2e, 0x1a97, 0x1a03, 0x1bcd,
0x2216, 0x2701, 0xa, 0x263c, 0x1179, 0x200c, 0x2d08, 0x1c34, 0x291, 0x2c99, 0x2a5a, 0x723,
0xb1d, 0x1ccc, 0x1fb6, 0x2f58, 0x2bfe, 0x1cda, 0x2a0, 0x5f1, 0x2de, 0x1fc7, 0x1ea8, 0x1719,
0x2fa7, 0x27ec, 0x20ff, 0x12c0, 0x1ac1, 0x2232, 0x2f9b, 0xd3e, 0x2aed, 0x15f0, 0x11e8, 0xed0,
0x26a, 0x1de5, 0xa3f, 0xf43, 0xebf, 0x204e, 0xac7, 0x2d9c, 0x5ea, 0x25d1, 0xb6, 0x49c, 0x995,
0x2555, 0x26e2, 0x100, 0x1878, 0x5aa, 0x2e10, 0x271c, 0xcb, 0x1b4c, 0x2fb8, 0x25b7, 0x1543,
0x2c7b, 0x241a, 0x2223, 0x20ca, 0x24ed, 0x137, 0x1b65, 0x1dc2, 0x7c7, 0x2ec3, 0xd0c, 0x1169,
0x1c7a, 0x1ea1, 0xf89, 0x2199, 0x291d, 0x1088, 0x2046, 0x256d, 0x2bc7, 0x2e9b, 0x41f, 0x1b55,
0x2b38, 0xd0, 0x2e6a, 0x1755, 0x6bc, 0x2724, 0x3ba, 0x222e, 0x2c5c, 0x2da5, 0x213c, 0x10fe,
0x169a, 0x1552, 0x5d3, 0x300, 0x1b5d, 0x1342, 0x2004, 0x256f, 0x2039, 0x667, 0x23b5, 0x1123,
0xdb, 0x2da0, 0xe1e, 0x2f54, 0x2767, 0x154a, 0x40a, 0x11d3, 0x2821, 0xc09, 0x974, 0x694, 0xfbf,
0x27ba, 0x132, 0x83f, 0x2d06, 0x10e, 0x183f, 0x29ae, 0x28c3, 0x2dc9, 0x1144, 0x2c70, 0x2a4a,
0xf3c, 0x1e32, 0x1171, 0x1e43, 0xdd4, 0x2ddf, 0x28d2, 0xfac, 0x3c4, 0x2f19, 0x10a6, 0x2f7,
0xe1d, 0x828, 0x138f, 0x1332, 0xfab, 0xcf6, 0x13f8, 0x24a0, 0x112d, 0x2717, 0x6e7, 0x1044,
0x36e, 0xfe8, 0x6a, 0xba7, 0x1d69, 0x29ec, 0x23b2, 0xaee, 0x16df, 0x1068, 0x1a7e, 0x253f,
0x24c, 0xb33, 0x2683, 0x15ce, 0x1ad3, 0x1a36, 0xc96, 0xaea, 0x260a, 0xce, 0x28b1, 0xe4f,
0x2b11, 0x5f8, 0x1fc4, 0xe77, 0x2366, 0x11f9, 0x153c, 0x24eb, 0x20cd, 0x1398, 0x22, 0x2b97,
0x249b, 0x8eb, 0x12b2, 0x2fe3, 0x29c1, 0x1b00, 0x2663, 0xeaa, 0x2e06, 0xe0, 0x1569, 0x10f5,
0x284e, 0xa38, 0x201d, 0x1c53, 0x1681, 0x1f6f, 0x2f95, 0x2fe8, 0xacb, 0x1680, 0x17fd, 0x2c39,
0x165a, 0x10bb, 0x29d8, 0x2622, 0x1196, 0x884, 0x2a79, 0x140e, 0x2d80, 0x6fa, 0x11b2, 0x26c4,
0x355, 0x1054, 0x29e9, 0x23ed, 0xbe3, 0x24fa, 0x1fb3, 0x10ac, 0x2919, 0x2584, 0x10a4, 0xe85,
0x650, 0x1893, 0x1dc1, 0xd8e, 0x12dc, 0x2d42, 0x284d, 0xfff, 0x250f, 0xacd, 0x13c3, 0x6cc,
0x1a79, 0x1221, 0x2614, 0x270a, 0x1ea, 0x155, 0x2818, 0x222c, 0x2e5b, 0x25d8, 0x1dbf, 0x191c,
0xb0f, 0xdac, 0x1082, 0x12ef, 0x11b6, 0xfa8, 0x2b72, 0x159d, 0x209e, 0x31b, 0x2c7c, 0x14f7,
0xe09, 0x1bb2, 0x1ec7, 0x2404, 0x20ae, 0x6ad, 0xed6, 0x2b70, 0x1c7b, 0x18d1, 0x2732, 0x12da,
0xd56, 0x5c1, 0x1648, 0x18b7, 0x1605, 0x1bc4, 0x280, 0x2ece, 0xc, 0x1aae, 0x1c4, 0x1cdb,
0x22d6, 0x21d8, 0x257c, 0x51f, 0x211b, 0xff, 0x2ee0, 0x2585, 0xe1, 0x2c35, 0x26db, 0x2971,
0x2208, 0x17e1, 0x21be, 0x135e, 0x28d6, 0x2891, 0x1689, 0x2138, 0xb86, 0x2e3a, 0x1204, 0x2d10,
0x2324, 0xf3f, 0x2508, 0x33d, 0xcb2, 0x292a, 0xe27, 0x2e64, 0x29f8, 0x2d46, 0x9b7, 0x20eb,
0x1b7c, 0x9eb, 0x2b2a, 0x58c, 0x27d0, 0x121b, 0x272e, 0x29f6, 0x2dbd, 0x2697, 0x2aac, 0xd6f,
0x1c67, 0x2c5b, 0x108d, 0x363, 0x249d, 0x2d5e, 0x2fd, 0x2cb2, 0x1f8f, 0x20a4, 0xa19, 0x2ac9,
0x19b1, 0x1581, 0x17a2, 0x29eb, 0x1b72, 0x13b0, 0xee4, 0xa8f, 0x2315, 0x5e6, 0x951, 0x2e29,
0xdad, 0x1f2b, 0x224e, 0x37f, 0x1a72, 0xa91, 0x1407, 0x2df9, 0x3ad, 0x23f7, 0x1a24, 0x1d2a,
0x234b, 0x1df3, 0x1143, 0x7ff, 0x1a6d, 0x2774, 0x2690, 0x2ab5, 0x586, 0x2781, 0x2009, 0x2fdd,
0x2881, 0x399, 0x2fb6, 0x144, 0x137f, 0xfa0, 0x2e4c, 0x1c7f, 0x2fac, 0xb09, 0x1264, 0x127b,
0x198c, 0x2b40, 0x230, 0x1cf4, 0x180b, 0xb58, 0x144a, 0x2aec, 0xfb, 0x2602, 0x14ee, 0x783,
0x1098, 0x23d8, 0x203, 0xe9, 0x108a, 0x14b8, 0xeec, 0xc58, 0x1248, 0x243c, 0x28aa, 0x6bf,
0x27c4, 0x276e, 0x19b8, 0x1d11, 0x2e16, 0x472, 0x1464, 0x24b9, 0x662, 0x1097, 0x2067, 0x20d6,
0x171c, 0x4, 0x682, 0x17bb, 0x1186, 0x4f2, 0x3ff, 0x2a43, 0x1dc7, 0x1ae5, 0x8cc, 0x2e7c,
0x2ef8, 0x2ae0, 0x2904, 0xed4, 0x6c5, 0x14ae, 0xb72, 0x11c3, 0x337, 0x2da3, 0x2916, 0x6d8,
0x1cf9, 0x10ee, 0x1800, 0x1ae4, 0xa0d, 0x101b, 0x1a8d, 0x2e98, 0x24cd, 0x813, 0x1aa4, 0x9b9,
0x680, 0x2349, 0x24d1, 0x20f8, 0xe31, 0x249f, 0x216b, 0x12d9, 0x1d21, 0x19db, 0x191a, 0x1dd0,
0x5df, 0x55c, 0x2b86, 0x213, 0xe9e, 0x1ef1, 0x268a, 0x1d5e, 0x1e20, 0x28c1, 0x1379, 0x249,
0x19de, 0x18b, 0x1e41, 0x2a1e, 0x2612, 0x297, 0x2e96, 0x2102, 0x46, 0x1b9f, 0x1a4d, 0x2050,
0x1b32, 0x568, 0x11f7, 0x1829, 0x870, 0x1f4, 0x1dca, 0x990, 0x1df6, 0x2b62, 0x13ec, 0x9f2,
0x1260, 0x2997, 0x1412, 0x1e6d, 0x1694, 0x11ac, 0x2d8b, 0x276f, 0x26f5, 0x233e, 0x2b44, 0x2f5a,
0x2d37, 0x2cb1, 0xc75, 0x98d, 0x1d56, 0x7ae, 0x10e6, 0x113f, 0x17b8, 0xad3, 0x737, 0x221e,
0x1b70, 0x1f3e, 0x2966, 0x18b2, 0x4fa, 0x2044, 0x1312, 0x154e, 0x2029, 0x700, 0x1b45, 0x27a6,
0x226a, 0x21bf, 0x58d, 0x2f11, 0x2e02, 0x17fc, 0x4d2, 0x1757, 0xcb1, 0x2ef1, 0x2582, 0x1276,
0x881, 0x2fc0, 0x104a, 0x670, 0x274f, 0x2b53, 0x19dd, 0x752, 0x1663, 0xcbd, 0x2b2b, 0x2fc6,
0x13b6, 0x21e6, 0x15f6, 0x126b, 0x2637, 0x1cd9, 0x2f50, 0xe82, 0x5b0, 0x24e0, 0x1350, 0x2f24,
0x21f7, 0x1a16, 0x2f3e, 0x167e, 0x1f7d, 0x28a0, 0x16f0, 0xe33, 0x53b, 0x28c5, 0x1500, 0x2f88,
0x26cc, 0x2018, 0x1604, 0x218b, 0x2cd1, 0x9ee, 0x17f3, 0x5fd, 0x1f5a, 0x2d0, 0x2b46, 0x23cc,
0x503, 0x1c46, 0x1cc3, 0x28e2, 0x243e, 0x122b, 0x2e0c, 0xe37, 0x2611, 0x85e, 0x9b8, 0x1b24,
0x762, 0x19b6, 0x3bc, 0x2d50, 0x2079, 0x18da, 0x170a, 0x800, 0xaa2, 0x135a, 0x1a15, 0x13d1,
0xca, 0x2113, 0x2db9, 0xdb2, 0x1a5c, 0x29a9, 0x1488, 0x14c1, 0x2c9, 0x917, 0x28e7, 0x265c,
0xdab, 0x2ab9, 0x2bc6, 0x105b, 0x1839, 0x219c, 0x50, 0x11da, 0x1802, 0xf56, 0x2e6, 0x2190,
0xddb, 0x56e, 0x9d9, 0x1c81, 0x1016, 0x12d6, 0x296f, 0x14b4, 0x1014, 0x1e64, 0x1d90, 0x89f,
0x2bc2, 0x2777, 0x2819, 0x1c65, 0x1a41, 0x5a2, 0x2cd2, 0x427, 0xd71, 0x29c8, 0x1e58, 0x53f,
0x7c5, 0x1dcd, 0x4a1, 0x1268, 0x2597, 0x2926, 0xee, 0x111b, 0x1038, 0xe6c, 0x22dc, 0x2f2f,
0x441, 0x2cfd, 0x1cb0, 0x6a4, 0x2224, 0x620, 0x5dc, 0x16b1, 0x2a1d, 0x1787, 0x20c7, 0x641,
0xd84, 0x1c05, 0x2d0d, 0x2f52, 0x1b8c, 0xd7d, 0x17e8, 0x1589, 0xc73, 0x151b, 0x4e2, 0x1ae9,
0x1b18, 0xb9b, 0x949, 0x2c60, 0x1e7a, 0xd5, 0x1bdc, 0x1f57, 0x1753, 0x124a, 0x559, 0xb76,
0x2334, 0x12d1, 0x1de1, 0x14b2, 0x2faa, 0x1697, 0x147a, 0x5a1, 0x2c30, 0x1c02, 0x1043, 0x2ee1,
0x2402, 0x1cc8, 0x2a16, 0xff7, 0x1364, 0x1b9a, 0x2a53, 0x2f94, 0x294c, 0x1ee5, 0x1a87, 0x2141,
0xd66, 0x953, 0x28a3, 0x2f30, 0x2477, 0x18e3, 0x1035, 0x1fc1, 0x1d68, 0x2fb3, 0x138c, 0x2487,
0x1bf8, 0xd96, 0x1018, 0x748, 0x244e, 0x15bd, 0x175e, 0x2be, 0x23d, 0x1da, 0x176d, 0xc17,
0x24be, 0x2ebb, 0x7d8, 0x100a, 0x759, 0x1db4, 0x2259, 0x23f4, 0x2d59, 0x2847, 0xbf5, 0x1cfe,
0xa20, 0x258, 0x1180, 0x279c, 0x54, 0x2abf, 0xc5c, 0x9f9, 0x3d5, 0x2ce4, 0x165f, 0x23d9,
0x27b9, 0x6f9, 0x281a, 0x169e, 0x627, 0x156d, 0x1ff8, 0x211, 0x2e34, 0x1724, 0x2c2e, 0x2790,
0x2dd5, 0x2bf2, 0xdbc, 0x2884, 0x20a9, 0x2390, 0x1e1a, 0x1b6a, 0x5f7, 0xab7, 0x1333, 0x16ab,
0x28dd, 0x20, 0x30f, 0x24b6, 0x5c2, 0x1ce4, 0x1400, 0x2669, 0x60, 0x156c, 0xe20, 0x26d4,
0x26ab, 0x1ebb, 0x223d, 0x5b4, 0x2025, 0x1e1c, 0xaae, 0x2e08, 0x6cd, 0x1677, 0x13d9, 0x17b5,
0x1046, 0x1d8c, 0x14eb, 0x18d8, 0x1ce5, 0x2478, 0x16ae, 0xb79, 0x23d4, 0x684, 0x156b, 0x567,
0x1a, 0x29ce, 0x83a, 0x19e8, 0x58e, 0x294a, 0x1136, 0x2319, 0x2fba, 0x1a29, 0x1d, 0x1879,
0x291b, 0x19f6, 0x2c2f, 0x21c9, 0x19bb, 0xbbc, 0x26f9, 0xc22, 0x708, 0x11a1, 0x18d3, 0x7f8,
0x28f8, 0x2427, 0x1deb, 0xaed, 0x26aa, 0x2482, 0x203b, 0x2f05, 0x2b82, 0x192f, 0x2df4, 0x8dc,
0x2877, 0xd5e, 0x240e, 0x775, 0x2dae, 0x1d3e, 0x20ba, 0x215b, 0x22d1, 0xeba, 0xf50, 0xaa8,
0x184a, 0x1f67, 0x2e04, 0xc6e, 0x6dd, 0x1a09, 0x27f, 0x494, 0x1426, 0xae3, 0xe15, 0x65f,
0x13c4, 0x105, 0x872, 0x2667, 0x1ff6, 0xd9f, 0x2ca1, 0x2f39, 0x2657, 0x23fd, 0x2405, 0xb73,
0x2294, 0x1f1e, 0x2eba, 0x110a, 0x2cae, 0x141f, 0x22cd, 0x25d6, 0x11c1, 0x1c, 0x2d8e, 0x161a,
0x1aa8, 0x229e, 0x1bf9, 0x7cf, 0x106d, 0x2c40, 0xd93, 0x255e, 0x28c2, 0xc1a, 0x2f17, 0x7ca,
0x2f63, 0xbf,
];
const IROOTS: [i32; 1024] = [
0x2ac8, 0x452, 0x297c, 0x666, 0xb4c, 0x2b8, 0x1a74, 0xfd, 0x1a47, 0x1d08, 0x2959, 0x2c36,
0x2db4, 0x56c, 0x254e, 0x1125, 0x2f3d, 0x13bc, 0x172c, 0x2c6b, 0x32a, 0x1745, 0x18bd, 0x8f1,
0x1633, 0x2dfa, 0xfdd, 0x23e3, 0x241b, 0x13a5, 0x578, 0x17a0, 0xa9, 0x104b, 0x1335, 0x24e4,
0x28de, 0x5a7, 0x368, 0x2d70, 0x13cd, 0x2f9, 0xff5, 0x1e88, 0x9c5, 0x2ff7, 0x900, 0xdeb,
0x1434, 0x15fe, 0x156a, 0x24d3, 0x28ed, 0x2c4f, 0x688, 0xaef, 0x2353, 0x1045, 0x2bcf, 0x23a4,
0x270, 0x4c5, 0x21fe, 0xe5b, 0xfbb, 0x1f79, 0x6e4, 0xe68, 0x2078, 0x1160, 0x1387, 0x1e98,
0x22f5, 0x13e, 0x283a, 0x123f, 0x149c, 0x2eca, 0xb14, 0xf37, 0xdde, 0xbe7, 0x386, 0x1abe,
0xa4a, 0x49, 0x14b5, 0x2f36, 0x8e5, 0x1f1, 0x2a57, 0x1789, 0x2f01, 0x91f, 0xaac, 0x266c,
0x2b65, 0x2f4b, 0xa30, 0x2a17, 0x265, 0x253a, 0xfb3, 0x2142, 0x20be, 0x25c2, 0x121c, 0x2d97,
0x2131, 0x1e19, 0x1a11, 0x514, 0x22c3, 0x66, 0xdcf, 0x1540, 0x1d41, 0xf02, 0x815, 0x5a, 0x18e8,
0x1159, 0x103a, 0x2d23, 0x2a10, 0x2d61, 0x1327, 0x403, 0x25c9, 0x7b3, 0x1f0c, 0x1a98, 0x2f21,
0x1fb, 0x2157, 0x99e, 0x1501, 0x640, 0x1e, 0x1d4f, 0x2716, 0xb66, 0x46a, 0x2fdf, 0x1c69, 0xf34,
0xb16, 0x1ac5, 0x1e08, 0xc9b, 0x218a, 0x103d, 0x2a09, 0x4f0, 0x21b2, 0x750, 0x2f33, 0x9f7,
0x2517, 0x236b, 0x15cb, 0x152e, 0x1a33, 0x97e, 0x24ce, 0x2db5, 0xac2, 0x1583, 0x1f99, 0x1922,
0x2513, 0xc4f, 0x615, 0x1298, 0x245a, 0x2f97, 0x2019, 0x2c93, 0x1fbd, 0x291a, 0x8ea, 0x1ed4,
0xb61, 0x1c09, 0x230b, 0x2056, 0x1ccf, 0x1c72, 0x27d9, 0x21e4, 0x2d0a, 0x1f5b, 0xe8, 0x2c3d,
0x2055, 0x72f, 0x222, 0x222d, 0x11be, 0x1e90, 0x11cf, 0x20c5, 0x5b7, 0x391, 0x1ebd, 0x238,
0x73e, 0x653, 0x17c2, 0x2ef3, 0x2fb, 0x27c2, 0x2ecf, 0x847, 0x2042, 0x296d, 0x268d, 0x23f8,
0x7e0, 0x1e2e, 0x2bf7, 0x1ab7, 0x89a, 0xad, 0x21e3, 0x261, 0x2f26, 0x1ede, 0xc4c, 0x299a,
0xfc8, 0xa92, 0xffd, 0x1cbf, 0x14a4, 0x2d01, 0x2a2e, 0x1aaf, 0x1967, 0x1f03, 0xec5, 0x25c,
0x3a5, 0xdd3, 0x2c47, 0x8dd, 0x2945, 0x18ac, 0x197, 0x2f31, 0x4c9, 0x14ac, 0x2be2, 0x166,
0x43a, 0xa94, 0x1b53, 0x293c, 0x212d, 0x6fd, 0x521, 0x109, 0x185, 0x2735, 0x151c, 0x123a,
0x5be, 0x2c02, 0x2b0f, 0x1e7b, 0x1846, 0x297f, 0x2ffd, 0x18e5, 0xf2b, 0xf9a, 0x1f6a, 0x299f,
0xb48, 0x1b9d, 0x2b8f, 0x1eb, 0x12f0, 0x1649, 0x893, 0x83d, 0x2942, 0x757, 0xbc5, 0x1db9,
0x23a9, 0x2115, 0x1b49, 0x1f77, 0x2f18, 0x2dfe, 0xc29, 0x1f69, 0x287e, 0x1b13, 0x9ff, 0x2f06,
0x515, 0x1bb7, 0x24a9, 0x17f6, 0x130d, 0x2dd1, 0x4c1, 0x1675, 0x1d86, 0x1d9d, 0x24f8, 0x55,
0x1382, 0x1b5, 0x2061, 0x1c82, 0x2ebd, 0x4b, 0x2c68, 0x780, 0x24, 0xff8, 0x880, 0x2a7b, 0x54c,
0x971, 0x88d, 0x1594, 0x2802, 0x1ebe, 0x120e, 0xcb6, 0x12d7, 0x15dd, 0xc0a, 0x2c54, 0x208,
0x1bfa, 0x2570, 0x158f, 0x2c82, 0xdb3, 0x10d6, 0x2254, 0x1d8, 0x26b0, 0x2a1b, 0xcec, 0x2572,
0x211d, 0x1c51, 0x148f, 0x616, 0x185f, 0x1a80, 0x1650, 0x538, 0x25e8, 0xf5d, 0x1072, 0x34f,
0x2d04, 0x2a3, 0xb64, 0x2c9e, 0x1f74, 0x3a6, 0x139a, 0x2292, 0x555, 0x96a, 0x244, 0x60b, 0x8d3,
0x1de6, 0x831, 0x2a75, 0x4d7, 0x2616, 0x1485, 0xf16, 0x264a, 0x2bb, 0x609, 0x19d, 0x21da,
0x6d7, 0x234f, 0x2cc4, 0xaf9, 0x20c2, 0xcdd, 0x2f1, 0x1dfd, 0x1c7, 0x247b, 0xec9, 0x1978,
0x770, 0x72b, 0x1ca3, 0xe43, 0x1820, 0xdf9, 0x690, 0x926, 0x3cc, 0x2f20, 0xa7c, 0x121, 0x2f02,
0xee6, 0x2ae2, 0xa85, 0xe29, 0xd2b, 0x1326, 0x2e3d, 0x1553, 0x2ff5, 0x133, 0x2d81, 0x143d,
0x19fc, 0x174a, 0x19b9, 0x2a40, 0x22ab, 0x1d27, 0x8cf, 0x1730, 0x1386, 0x491, 0x212b, 0x2954,
0xf53, 0xbfd, 0x113a, 0x144f, 0x21f8, 0x1b0a, 0x385, 0x2ce6, 0xf63, 0x1a64, 0x48f, 0x2059,
0x1e4b, 0x1d12, 0x1f7f, 0x2255, 0x24f2, 0x16e5, 0x1242, 0xa29, 0x1a6, 0xdd5, 0x7e9, 0x2eac,
0x2e17, 0x8f7, 0x9ed, 0x1de0, 0x1588, 0x2935, 0x1c3e, 0x2534, 0xaf2, 0x2002, 0x7b4, 0x2bf,
0x1d25, 0x2273, 0x1240, 0x176e, 0x29b1, 0x217c, 0x1f5d, 0xa7d, 0x6e8, 0x1f55, 0x104e, 0xb07,
0x241e, 0xc14, 0x618, 0x1fad, 0x2cac, 0x93d, 0x1e4f, 0x2907, 0x281, 0x1bf3, 0x588, 0x277d,
0x1e6b, 0x9df, 0x629, 0x1f46, 0x19a7, 0x3c8, 0x1804, 0x1981, 0x2536, 0x19, 0x6c, 0x1092,
0x1980, 0x13ae, 0xfe4, 0x2f42, 0x9e, 0x2837, 0xea, 0x23e7, 0x73f, 0xaa3, 0x226e, 0x3c1, 0x1f94,
0x2832, 0x1408, 0xd63, 0x1559, 0x19e7, 0x273, 0x2fe5, 0x1e40, 0xa2b, 0xd34, 0x1be2, 0x353,
0x1ef7, 0x147, 0x10e3, 0xd6d, 0x248e, 0xbfc, 0xc04, 0x9aa, 0xc8, 0x360, 0x2262, 0x100b, 0x99a,
0x278f, 0x2efc, 0x1c3d, 0x29a2, 0x21ec, 0x251e, 0x1bdb, 0x2b6d, 0x2d82, 0x15f8, 0x2924, 0x2393,
0x1fd, 0x109a, 0x17b7, 0x2559, 0x20b1, 0x2147, 0xd30, 0xea6, 0xf47, 0x12c3, 0x253, 0x288c,
0xbf3, 0x22a3, 0x78a, 0x2725, 0x20d, 0x16d2, 0x47f, 0xfc, 0xfc6, 0xb7f, 0x957, 0x2514, 0x1216,
0xbda, 0x709, 0x2809, 0x172e, 0x1e60, 0x28f9, 0x23df, 0x908, 0x2445, 0x1646, 0xe38, 0x3d2,
0x160b, 0x6e6, 0x1788, 0x2fe4, 0x15d8, 0x47, 0xce8, 0x1ecb, 0x6b7, 0x2a73, 0x1619, 0x27c7,
0x633, 0x2fe7, 0x2a9a, 0x1a96, 0x297d, 0xc2d, 0x2488, 0x1953, 0xb89, 0x131c, 0x1729, 0x1b16,
0x1275, 0x1fbb, 0x184c, 0x1c28, 0x198a, 0x2934, 0x1f9, 0x2553, 0x11e5, 0xfdc, 0x2a4d, 0xdc4,
0x1146, 0x956, 0x92d, 0x21e1, 0x1a95, 0x2fa1, 0x998, 0x1c01, 0x131d, 0x2a3f, 0xb4b, 0x2cf2,
0x2fe1, 0x724, 0x1956, 0x1cce, 0x254a, 0x2a0a, 0x1497, 0x11e7, 0xc71, 0xf58, 0x77d, 0x2245,
0x40f, 0x22c, 0x871, 0x3d3, 0x18dd, 0x1cd, 0x2df0, 0x1009, 0x1a94, 0x29da, 0x1963, 0x7e7,
0x2908, 0x848, 0xc28, 0x19a2, 0x31d, 0x2c2c, 0x2608, 0x23a5, 0x542, 0x2fad, 0x865, 0x1e81,
0x2da9, 0x25e1, 0x1303, 0x240c, 0x7ba, 0x2a8, 0xc0d, 0xda8, 0x124d, 0x28a8, 0x1ff7, 0x2829,
0x146, 0xb43, 0x23ea, 0x1894, 0x2e27, 0x2dc4, 0x2d43, 0x18a3, 0x1a44, 0xbb3, 0x28b9, 0x1fe9,
0x226b, 0x1409, 0xb7a, 0x1c75, 0x4e, 0x1299, 0x1040, 0x1fcc, 0x171e, 0xb8a, 0xd1, 0x75e,
0x26ae, 0x229b, 0xec0, 0x157a, 0x111c, 0x6b5, 0x6d, 0x5ae, 0x1467, 0x1c9d, 0x200a, 0x5eb,
0x1339, 0xbff, 0x120, 0x1fbe, 0x13ff, 0x3d1, 0x2a60, 0x1b87, 0x196a, 0x57, 0x1b4f, 0x1220,
0x1d30, 0xccd, 0x248b, 0x2aa8, 0x1db7, 0x18ae, 0x10aa, 0x1425, 0x2f2c, 0x1187, 0x3a1, 0x26b8,
0x2466, 0x14e9, 0x1518, 0x2b1f, 0x1ae6, 0x238e, 0x1a78, 0x1819, 0x2284, 0x1475, 0xaf, 0x2f4,
0x13fc, 0x227d, 0x29c0, 0xf3a, 0x187a, 0x5e4, 0x1950, 0x2a25, 0x29e1, 0xddd, 0x295d, 0x1351,
0x304, 0x2bc0, 0xd2, 0xd25, 0x2195, 0x1fc9, 0x1ee6, 0x2f13, 0x6db, 0xa6a, 0x1d99, 0x2b60,
0x1234, 0x283c, 0x2ac2, 0x11a9, 0x639, 0x2290, 0x2bda, 0x32f, 0x2a5f, 0x15c0, 0x139c, 0x7e8,
0x88a, 0x43f, 0x2762, 0x1271, 0x119d, 0x1fed, 0x1b4d, 0x692, 0x1d2b, 0x1feb, 0x1380, 0x2628,
0x2a93, 0x2226, 0xe71, 0x2d1b, 0x20ab, 0x17ff, 0x1e27, 0x2fb1, 0xe65, 0x17c8, 0x1fa6, 0x43b,
0x548, 0x2256, 0x9a5, 0x71a, 0x26ea, 0x2d38, 0x1b40, 0x1b79, 0x658, 0x15a5, 0x224f, 0x248,
0xeee, 0x2f37, 0x1c30, 0x15ec, 0x1ca7, 0x255f, 0x2801, 0x18f7, 0x1727, 0xf88, 0x2b1, 0x2c45,
0x164b, 0x289f, 0x14dd, 0x2649, 0x27a3, 0x9f0, 0x21ca, 0x1f5, 0x1dd6, 0xbc3, 0x71f, 0x133e,
0x13bb, 0x2afe, 0xc35, 0x4bb, 0x2d31, 0x10a7, 0x2a04, 0x180e, 0x2613, 0x330, 0xe76, 0x19fd,
0xfe9, 0x935, 0x79, 0x1b01, 0x73c, 0x2ac6, 0x21ce, 0x1911, 0x761, 0x1084, 0x1983, 0xc3, 0x15eb,
0xe0a, 0xdd, 0x1cb1, 0xb21, 0x2a51, 0x217f, 0xb1, 0x1328, 0x9ca, 0x1d96, 0x1a0b, 0xe1b, 0x1c4b,
0x3b, 0x4d6, 0x2344, 0x199e, 0x28af, 0x1624, 0x4ae, 0x8b2, 0x2991, 0x1fb7, 0x41, 0x2780,
0x1d8b, 0xa7f, 0x110, 0x2350, 0x18aa, 0x2b2f, 0x1805, 0x1ff, 0xf0, 0x2a74, 0xe42, 0xd97, 0x85b,
0x14bc, 0x2901, 0xfd8, 0x1ab3, 0x1cef, 0xfbd, 0x2b07, 0x174f, 0x69b, 0x10c3, 0x1491, 0xde3,
0x28ca, 0x252e, 0x1849, 0x1ec2, 0x1f1b, 0x2853, 0x12ab, 0x2674, 0x238c, 0x350, 0x2ca, 0xa7,
0x4bd, 0xcc3, 0x90c, 0x892, 0x276, 0x1e55, 0x196d, 0x1194, 0x1bef, 0x66a, 0x1da1, 0x260f,
0x1c15, 0x49f, 0x120b, 0x2671, 0x1237, 0x2e0d, 0x2791, 0x17d8, 0x1e0a, 0x2a99, 0x14cf, 0xfb1,
0x15b4, 0x1462, 0x2fbb, 0xeff, 0x16b, 0x2d6a, 0x9ef, 0x5e3, 0x11c0, 0x2e76, 0x1623, 0x2db8,
0x1c88, 0x740, 0x11e1, 0x12a3, 0x977, 0x1110, 0x2163, 0x2dee, 0x47b, 0x2aa5, 0x2a22, 0x1231,
0x16e7, 0x1626, 0x12e0, 0x1d28, 0xe96, 0xb62, 0x21d0, 0xf09, 0xb30, 0xcb8, 0x2981, 0x2648,
0x155d, 0x27ee, 0xb34, 0x169, 0x1574, 0x1fe6, 0x25f4, 0x151d, 0x1801, 0x1f13, 0x1308, 0x2929,
0x6eb, 0x25e, 0x2cca, 0x1e3e, 0x248f,
];
fn round(a: i32, b: i32) -> i32 {
(a + b / 2) / b
}
/* Constant time absolute value */
fn nabs(x: i32) -> i32 {
let mask = x >> 31;
(x + mask) ^ mask
}
/* Montgomery stuff */
fn redc(t: u64) -> i32 {
let m = (t as u32).wrapping_mul(ND);
(((m as u64) * (PRIME as u64) + t) >> WL) as i32
}
fn nres(x: i32) -> i32 {
redc((x as u64) * R2MODP)
}
fn modmul(a: i32, b: i32) -> i32 {
redc((a as u64) * (b as u64))
}
/* Cooley-Tukey NTT */
fn ntt(x: &mut [i32]) {
let mut t = DEGREE / 2;
let q = PRIME;
/* Convert to Montgomery form */
for j in 0..DEGREE {
x[j] = nres(x[j])
}
let mut m = 1;
while m < DEGREE {
let mut k = 0;
for i in 0..m {
let s = ROOTS[m + i];
for j in k..k + t {
let u = x[j];
let v = modmul(x[j + t], s);
x[j] = u + v;
x[j + t] = u + 2 * q - v;
}
k += 2 * t;
}
t /= 2;
m *= 2;
}
}
/* Gentleman-Sande INTT */
fn intt(x: &mut [i32]) {
let mut t = 1;
let q = PRIME;
let mut m = DEGREE / 2;
while m > 1 {
let mut k = 0;
for i in 0..m {
let s = IROOTS[m + i];
for j in k..k + t {
let u = x[j];
let v = x[j + t];
x[j] = u + v;
let w = u + (DEGREE as i32) * q - v;
x[j + t] = modmul(w, s);
}
k += 2 * t;
}
t *= 2;
m /= 2;
}
/* Last iteration merged with n^-1 */
t = DEGREE / 2;
for j in 0..t {
let u = x[j];
let v = x[j + t];
let w = u + (DEGREE as i32) * q - v;
x[j + t] = modmul(w, INVPR);
x[j] = modmul(u + v, INV);
}
/* convert back from Montgomery to "normal" form */
for j in 0..DEGREE {
x[j] = redc(x[j] as u64);
x[j] -= q;
x[j] += (x[j] >> (WL - 1)) & q;
}
}
/* See https://eprint.iacr.org/2016/1157.pdf */
fn encode(key: &[u8], poly: &mut [i32]) {
let q2 = PRIME / 2;
let mut j = 0;
let mut i = 0;
while i < 256 {
let mut kj = key[j];
j += 1;
for _ in 0..8 {
let b = (kj & 1) as i32;
poly[i] = b * q2;
poly[i + 256] = b * q2;
poly[i + 512] = b * q2;
poly[i + 768] = b * q2;
kj >>= 1;
i += 1;
}
}
}
fn decode(poly: &[i32], key: &mut [u8]) {
let q2 = PRIME / 2;
for i in 0..32 {
key[i] = 0;
}
let mut i = 0;
let mut j = 0;
while i < 256 {
for _ in 0..8 {
let t = nabs(poly[i] - q2)
+ nabs(poly[i + 256] - q2)
+ nabs(poly[i + 512] - q2)
+ nabs(poly[i + 768] - q2);
let mut b = t - PRIME;
b = (b >> 31) & 1;
key[j] = (key[j] >> 1) + ((b << 7) as u8);
i += 1;
}
j += 1;
}
}
/* convert 32-byte seed to random polynomial */
fn parse(seed: &[u8], poly: &mut [i32]) {
let mut hash: [u8; 4 * DEGREE] = [0; 4 * DEGREE];
let mut sh = SHA3::new(sha3::SHAKE128);
for i in 0..32 {
sh.process(seed[i])
}
sh.shake(&mut hash, 4 * DEGREE);
let mut j = 0;
for i in 0..DEGREE {
let mut n = (hash[j] & 0x7f) as i32;
n <<= 8;
n += (hash[j + 1]) as i32;
n <<= 8;
n += (hash[j + 2]) as i32;
n <<= 8;
n += (hash[j + 3]) as i32;
j += 4;
poly[i] = nres(n);
//poly[i]=modmul(n,ONE); // reduce 31-bit random number mod q
}
}
/* Compress 14 bits polynomial coefficients into byte array */
/* 7 bytes is 3x14 */
fn nhs_pack(poly: &[i32], array: &mut [u8]) {
let mut j = 0;
let mut i = 0;
while i < DEGREE {
let a = poly[i];
let b = poly[i + 1];
let c = poly[i + 2];
let d = poly[i + 3];
i += 4;
array[j] = (a & 0xff) as u8;
array[j + 1] = (((a >> 8) | (b << 6)) & 0xff) as u8;
array[j + 2] = ((b >> 2) & 0xff) as u8;
array[j + 3] = (((b >> 10) | (c << 4)) & 0xff) as u8;
array[j + 4] = ((c >> 4) & 0xff) as u8;
array[j + 5] = (((c >> 12) | (d << 2)) & 0xff) as u8;
array[j + 6] = (d >> 6) as u8;
j += 7;
}
}
fn nhs_unpack(array: &[u8], poly: &mut [i32]) {
let mut j = 0;
let mut i = 0;
while i < DEGREE {
let a = array[j] as i32;
let b = array[j + 1] as i32;
let c = array[j + 2] as i32;
let d = array[j + 3] as i32;
let e = array[j + 4] as i32;
let f = array[j + 5] as i32;
let g = array[j + 6] as i32;
j += 7;
poly[i] = a | ((b & 0x3f) << 8);
poly[i + 1] = (b >> 6) | (c << 2) | ((d & 0xf) << 10);
poly[i + 2] = (d >> 4) | (e << 4) | ((f & 3) << 12);
poly[i + 3] = (f >> 2) | (g << 6);
i += 4;
}
}
/* See https://eprint.iacr.org/2016/1157.pdf */
fn compress(poly: &[i32], array: &mut [u8]) {
let mut col = 0 as i32;
let mut j = 0;
let mut i = 0;
while i < DEGREE {
for _ in 0..8 {
let b = round(poly[i] * 8, PRIME) & 7;
col = (col << 3) + b;
i += 1;
}
array[j] = (col & 0xff) as u8;
array[j + 1] = ((col >> 8) & 0xff) as u8;
array[j + 2] = ((col >> 16) & 0xff) as u8;
j += 3;
col = 0;
}
}
fn decompress(array: &[u8], poly: &mut [i32]) {
let mut j = 0;
let mut i = 0;
while i < DEGREE {
let mut col = (array[j + 2] as i32) & 0xff;
col = (col << 8) + ((array[j + 1] as i32) & 0xff);
col = (col << 8) + ((array[j] as i32) & 0xff);
j += 3;
for _ in 0..8 {
let b = (col & 0xe00000) >> 21;
col <<= 3;
poly[i] = round(b * PRIME, 8);
i += 1;
}
}
}
/* generate centered binomial distribution */
fn error(rng: &mut RAND, poly: &mut [i32]) {
for i in 0..DEGREE {
let mut n1 = ((rng.getbyte() as i32) & 0xff) + (((rng.getbyte() as i32) & 0xff) << 8);
let mut n2 = ((rng.getbyte() as i32) & 0xff) + (((rng.getbyte() as i32) & 0xff) << 8);
let mut r = 0 as i32;
for _ in 0..16 {
r += (n1 & 1) - (n2 & 1);
n1 >>= 1;
n2 >>= 1;
}
poly[i] = r + PRIME;
}
}
fn redc_it(p: &mut [i32]) {
for i in 0..DEGREE {
p[i] = redc(p[i] as u64);
}
}
fn nres_it(p: &mut [i32]) {
for i in 0..DEGREE {
p[i] = nres(p[i]);
}
}
fn poly_mul(p1: &mut [i32], p3: &[i32]) {
for i in 0..DEGREE {
p1[i] = modmul(p1[i], p3[i]);
}
}
fn poly_add(p1: &mut [i32], p3: &[i32]) {
for i in 0..DEGREE {
p1[i] += p3[i];
}
}
fn poly_rsub(p1: &mut [i32], p2: &[i32]) {
for i in 0..DEGREE {
p1[i] = p2[i] + PRIME - p1[i];
}
}
/* reduces inputs < 2q */
fn poly_soft_reduce(poly: &mut [i32]) {
for i in 0..DEGREE {
let e = poly[i] - PRIME;
poly[i] = e + ((e >> (WL - 1)) & PRIME);
}
}
/* fully reduces modulo q */
fn poly_hard_reduce(poly: &mut [i32]) {
for i in 0..DEGREE {
let mut e = modmul(poly[i], ONE);
e -= PRIME;
poly[i] = e + ((e >> (WL - 1)) & PRIME);
}
}
/* API functions. See https://eprint.iacr.org/2016/1157.pdf Protocol 1 */
// ss is secret key key, sb is seed|public key to be sent to client
pub fn server_1(rng: &mut RAND, sb: &mut [u8], ss: &mut [u8]) {
let mut seed: [u8; 32] = [0; 32];
let mut array: [u8; 1792] = [0; 1792];
let mut s: [i32; DEGREE] = [0; DEGREE];
let mut e: [i32; DEGREE] = [0; DEGREE];
let mut b: [i32; DEGREE] = [0; DEGREE];
for i in 0..32 {
seed[i] = rng.getbyte();
}
parse(&seed, &mut b);
error(rng, &mut e);
error(rng, &mut s);
ntt(&mut s);
ntt(&mut e);
poly_mul(&mut b, &s);
poly_add(&mut b, &e);
poly_hard_reduce(&mut b);
redc_it(&mut b);
nhs_pack(&b, &mut array);
for i in 0..32 {
sb[i] = seed[i];
}
for i in 0..1792 {
sb[i + 32] = array[i];
}
poly_hard_reduce(&mut s);
nhs_pack(&s, &mut array);
for i in 0..1792 {
ss[i] = array[i];
}
}
// optimized to reduce memory
// uc is U|cbar to be returned to server
// okey is shared key
pub fn client(rng: &mut RAND, sb: &[u8], uc: &mut [u8], okey: &mut [u8]) {
let mut sh = SHA3::new(sha3::HASH256);
let mut seed: [u8; 32] = [0; 32];
let mut array: [u8; 1792] = [0; 1792];
let mut key: [u8; 32] = [0; 32];
let mut cc: [u8; 384] = [0; 384];
let mut sd: [i32; DEGREE] = [0; DEGREE];
let mut ed: [i32; DEGREE] = [0; DEGREE];
let mut u: [i32; DEGREE] = [0; DEGREE];
let mut k: [i32; DEGREE] = [0; DEGREE];
let mut c: [i32; DEGREE] = [0; DEGREE];
error(rng, &mut sd);
error(rng, &mut ed);
ntt(&mut sd);
ntt(&mut ed);
for i in 0..32 {
seed[i] = sb[i];
}
for i in 0..1792 {
array[i] = sb[i + 32];
}
parse(&seed, &mut u);
poly_mul(&mut u, &sd);
poly_add(&mut u, &ed);
poly_hard_reduce(&mut u);
for i in 0..32 {
key[i] = rng.getbyte();
}
for i in 0..32 {
sh.process(key[i]);
}
sh.hash(&mut key);
encode(&key, &mut k);
nhs_unpack(&array, &mut c);
nres_it(&mut c);
poly_mul(&mut c, &sd);
intt(&mut c);
error(rng, &mut ed);
poly_add(&mut c, &ed);
poly_add(&mut c, &k);
compress(&c, &mut cc);
sh = SHA3::new(sha3::HASH256);
for i in 0..32 {
sh.process(key[i]);
}
sh.hash(&mut key);
for i in 0..32 {
okey[i] = key[i];
}
redc_it(&mut u);
nhs_pack(&u, &mut array);
for i in 0..1792 {
uc[i] = array[i];
}
for i in 0..384 {
uc[i + 1792] = cc[i];
}
}
// calculate shared okey from uc and secret key ss
pub fn server_2(ss: &[u8], uc: &[u8], okey: &mut [u8]) {
let mut sh = SHA3::new(sha3::HASH256);
let mut s: [i32; DEGREE] = [0; DEGREE];
let mut k: [i32; DEGREE] = [0; DEGREE];
let mut c: [i32; DEGREE] = [0; DEGREE];
let mut array: [u8; 1792] = [0; 1792];
let mut key: [u8; 32] = [0; 32];
let mut cc: [u8; 384] = [0; 384];
for i in 0..1792 {
array[i] = uc[i];
}
nhs_unpack(&array, &mut k);
nres_it(&mut k);
for i in 0..384 {
cc[i] = uc[i + 1792];
}
decompress(&cc, &mut c);
for i in 0..1792 {
array[i] = ss[i];
}
nhs_unpack(&array, &mut s);
poly_mul(&mut k, &s);
intt(&mut k);
poly_rsub(&mut k, &c);
poly_soft_reduce(&mut k);
decode(&k, &mut key);
for i in 0..32 {
sh.process(key[i]);
}
sh.hash(&mut key);
for i in 0..32 {
okey[i] = key[i];
}
}
/*
fn main() {
let x=3;
let y=redc(x as u64);
let z=redc((y as u64)*(R2MODP));
println!("{:02x}",z);
let mut a:[i32;1024]=[0;1024];
for i in 0..1024 {a[i]=i as i32}
ntt(&mut a);
for i in 0..1024 {a[i]=modmul(a[i],ONE)}
intt(&mut a);
println!("{:02x}",a[7]);
}
*/

File diff suppressed because it is too large Load Diff

183
crates/bls48581/src/rand.rs Normal file
View File

@ -0,0 +1,183 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//extern crate mcore;
use crate::hash256::HASH256;
pub const RAND_NK: usize = 21;
const RAND_NJ: usize = 6;
const RAND_NV: usize = 8;
// Marsaglia-Zaman random generator (https://projecteuclid.org/euclid.aoap/1177005878)
// Analysis: https://ieeexplore.ieee.org/document/669305
#[allow(non_camel_case_types)]
pub struct RAND {
pub ira: [u32; RAND_NK], /* random number... */
pub rndptr: usize,
pub borrow: u32,
pub pool_ptr: usize,
pub pool: [u8; 32],
}
impl RAND {
pub fn new() -> RAND {
RAND {
ira: [0; RAND_NK],
rndptr: 0,
borrow: 0,
pool_ptr: 0,
pool: [0; 32],
}
}
#[allow(dead_code)]
pub fn clean(&mut self) {
self.pool_ptr = 0;
self.rndptr = 0;
for i in 0..32 {
self.pool[i] = 0
}
for i in 0..RAND_NK {
self.ira[i] = 0
}
self.borrow = 0;
}
fn sbrand(&mut self) -> u32 {
/* Marsaglia & Zaman random number generator */
self.rndptr += 1;
if self.rndptr < RAND_NK {
return self.ira[self.rndptr];
}
self.rndptr = 0;
let mut k = RAND_NK - RAND_NJ;
for i in 0..RAND_NK {
/* calculate next NK values */
if k == RAND_NK {
k = 0
}
let t = self.ira[k];
let pdiff = t.wrapping_sub(self.ira[i]).wrapping_sub(self.borrow);
if pdiff < t {
self.borrow = 0
}
if pdiff > t {
self.borrow = 1
}
self.ira[i] = pdiff;
k += 1;
}
self.ira[0]
}
fn sirand(&mut self, seed: u32) {
let mut m: u32 = 1;
let mut sd = seed;
self.borrow = 0;
self.rndptr = 0;
self.ira[0] ^= sd;
for i in 1..RAND_NK {
/* fill initialisation vector */
let inn = (RAND_NV * i) % RAND_NK;
self.ira[inn] ^= m; /* note XOR */
let t = m;
m = sd.wrapping_sub(m);
sd = t;
}
for _ in 0..10000 {
self.sbrand();
} /* "warm-up" & stir the generator */
}
fn fill_pool(&mut self) {
let mut sh = HASH256::new();
for _ in 0..128 {
sh.process((self.sbrand() & 0xff) as u8)
}
let w = sh.hash();
for i in 0..32 {
self.pool[i] = w[i]
}
self.pool_ptr = 0;
}
fn pack(b: [u8; 4]) -> u32 {
/* pack 4 bytes into a 32-bit Word */
(((b[3] as u32) & 0xff) << 24)
| (((b[2] as u32) & 0xff) << 16)
| (((b[1] as u32) & 0xff) << 8)
| ((b[0] as u32) & 0xff)
}
pub fn seed(&mut self, rawlen: usize, raw: &[u8]) {
/* initialise from at least 128 byte string of raw random entropy */
let mut b: [u8; 4] = [0; 4];
let mut sh = HASH256::new();
self.pool_ptr = 0;
for i in 0..RAND_NK {
self.ira[i] = 0
}
if rawlen > 0 {
for i in 0..rawlen {
sh.process(raw[i]);
}
let digest = sh.hash();
/* initialise PRNG from distilled randomness */
for i in 0..8 {
b[0] = digest[4 * i];
b[1] = digest[4 * i + 1];
b[2] = digest[4 * i + 2];
b[3] = digest[4 * i + 3];
self.sirand(RAND::pack(b));
}
}
self.fill_pool();
}
pub fn getbyte(&mut self) -> u8 {
let r = self.pool[self.pool_ptr];
self.pool_ptr += 1;
if self.pool_ptr >= 32 {
self.fill_pool()
}
r
}
}
/* test main program
fn main() {
let mut raw : [u8;100]=[0;100];
let mut rng=RAND::new();
rng.clean();
for i in 0..100 {raw[i]=i as u8}
rng.seed(100,&raw);
for _ in 0..1000 {
print!("{:03} ",rng.getbyte());
}
} */

344
crates/bls48581/src/sha3.rs Normal file
View File

@ -0,0 +1,344 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pub const HASH224: usize = 28;
pub const HASH256: usize = 32;
pub const HASH384: usize = 48;
pub const HASH512: usize = 64;
pub const SHAKE128: usize = 16;
pub const SHAKE256: usize = 32;
const ROUNDS: usize = 24;
const RC: [u64; 24] = [
0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008,
];
pub struct SHA3 {
length: usize,
rate: usize,
len: usize,
//s: [[u64; 5]; 5],
s: [u64;25],
}
impl SHA3 {
fn rotl(x: u64, n: u64) -> u64 {
((x) << n) | ((x) >> (64 - n))
}
fn transform(&mut self) {
for k in 0..ROUNDS {
let c0=self.s[0]^self.s[5]^self.s[10]^self.s[15]^self.s[20];
let c1=self.s[1]^self.s[6]^self.s[11]^self.s[16]^self.s[21];
let c2=self.s[2]^self.s[7]^self.s[12]^self.s[17]^self.s[22];
let c3=self.s[3]^self.s[8]^self.s[13]^self.s[18]^self.s[23];
let c4=self.s[4]^self.s[9]^self.s[14]^self.s[19]^self.s[24];
let d0=c4^SHA3::rotl(c1,1);
let d1=c0^SHA3::rotl(c2,1);
let d2=c1^SHA3::rotl(c3,1);
let d3=c2^SHA3::rotl(c4,1);
let d4=c3^SHA3::rotl(c0,1);
let b00 = self.s[0]^d0;
let b02 = SHA3::rotl(self.s[1]^d1, 1);
let b04 = SHA3::rotl(self.s[2]^d2, 62);
let b01 = SHA3::rotl(self.s[3]^d3, 28);
let b03 = SHA3::rotl(self.s[4]^d4, 27);
let b13 = SHA3::rotl(self.s[5]^d0, 36);
let b10 = SHA3::rotl(self.s[6]^d1, 44);
let b12 = SHA3::rotl(self.s[7]^d2, 6);
let b14 = SHA3::rotl(self.s[8]^d3, 55);
let b11 = SHA3::rotl(self.s[9]^d4, 20);
let b21 = SHA3::rotl(self.s[10]^d0, 3);
let b23 = SHA3::rotl(self.s[11]^d1, 10);
let b20 = SHA3::rotl(self.s[12]^d2, 43);
let b22 = SHA3::rotl(self.s[13]^d3, 25);
let b24 = SHA3::rotl(self.s[14]^d4, 39);
let b34 = SHA3::rotl(self.s[15]^d0, 41);
let b31 = SHA3::rotl(self.s[16]^d1, 45);
let b33 = SHA3::rotl(self.s[17]^d2, 15);
let b30 = SHA3::rotl(self.s[18]^d3, 21);
let b32 = SHA3::rotl(self.s[19]^d4, 8);
let b42 = SHA3::rotl(self.s[20]^d0, 18);
let b44 = SHA3::rotl(self.s[21]^d1, 2);
let b41 = SHA3::rotl(self.s[22]^d2, 61);
let b43 = SHA3::rotl(self.s[23]^d3, 56);
let b40 = SHA3::rotl(self.s[24]^d4, 14);
self.s[0]=b00^(!b10&b20);
self.s[1]=b10^(!b20&b30);
self.s[2]=b20^(!b30&b40);
self.s[3]=b30^(!b40&b00);
self.s[4]=b40^(!b00&b10);
self.s[5]=b01^(!b11&b21);
self.s[6]=b11^(!b21&b31);
self.s[7]=b21^(!b31&b41);
self.s[8]=b31^(!b41&b01);
self.s[9]=b41^(!b01&b11);
self.s[10]=b02^(!b12&b22);
self.s[11]=b12^(!b22&b32);
self.s[12]=b22^(!b32&b42);
self.s[13]=b32^(!b42&b02);
self.s[14]=b42^(!b02&b12);
self.s[15]=b03^(!b13&b23);
self.s[16]=b13^(!b23&b33);
self.s[17]=b23^(!b33&b43);
self.s[18]=b33^(!b43&b03);
self.s[19]=b43^(!b03&b13);
self.s[20]=b04^(!b14&b24);
self.s[21]=b14^(!b24&b34);
self.s[22]=b24^(!b34&b44);
self.s[23]=b34^(!b44&b04);
self.s[24]=b44^(!b04&b14);
self.s[0] ^= RC[k];
}
}
/* Initialise Hash function */
pub fn init(&mut self, olen: usize) {
/* initialise */
for i in 0..25 {
self.s[i] = 0;
}
self.length = 0;
self.len = olen;
self.rate = 200 - 2 * olen;
}
pub fn new(olen: usize) -> SHA3 {
let mut nh = SHA3 {
length: 0,
rate: 0,
len: 0,
s: [0; 25],
};
nh.init(olen);
nh
}
pub fn new_copy(hh: &SHA3) -> SHA3 {
let mut nh = SHA3 {
length: 0,
rate: 0,
len: 0,
s: [0; 25],
};
nh.length=hh.length;
nh.len=hh.len;
nh.rate=hh.rate;
for i in 0..25 {
nh.s[i] = hh.s[i];
}
nh
}
/* process a single byte */
pub fn process(&mut self, byt: u8) {
/* process the next message byte */
let cnt = self.length as usize;
let b = cnt % 8;
let ind = cnt / 8;
self.s[ind] ^= (byt as u64) << (8 * b);
self.length += 1;
if self.length == self.rate {
self.length=0;
self.transform();
}
}
/* process an array of bytes */
pub fn process_array(&mut self, b: &[u8]) {
for i in 0..b.len() {
self.process(b[i])
}
}
/* process a 32-bit integer */
pub fn process_num(&mut self, n: i32) {
self.process(((n >> 24) & 0xff) as u8);
self.process(((n >> 16) & 0xff) as u8);
self.process(((n >> 8) & 0xff) as u8);
self.process((n & 0xff) as u8);
}
pub fn squeeze(&mut self, buff: &mut [u8], olen: usize) {
let mut m = 0;
let nb=olen/self.rate;
for _ in 0..nb {
for i in 0..self.rate/8 {
let mut el=self.s[i];
for _ in 0..8 {
buff[m]=(el & 0xff) as u8;
m += 1;
el >>= 8;
}
}
self.transform();
}
let mut i=0;
while m<olen {
let mut el=self.s[i]; i += 1;
for _ in 0..8 {
buff[m]=(el & 0xff) as u8;
m += 1;
if m >= olen {
break;
}
el >>= 8;
}
}
/*
loop {
for i in 0..25 {
let mut el = self.s[i];
for _ in 0..8 {
buff[m] = (el & 0xff) as u8;
m += 1;
if m >= olen || (m % self.rate) == 0 {
done = true;
break;
}
el >>= 8;
}
if done {
break;
}
}
if m >= olen {
break;
}
done = false;
self.transform();
} */
}
/* Generate 32-byte Hash */
pub fn hash(&mut self, digest: &mut [u8]) {
/* pad message and finish - supply digest */
let q = self.rate - self.length;
if q == 1 {
self.process(0x86);
} else {
self.process(0x06);
while self.length != self.rate - 1 {
self.process(0x00)
}
self.process(0x80);
}
let hlen = self.len as usize;
self.squeeze(digest, hlen);
}
pub fn continuing_hash(&mut self, digest: &mut [u8]) {
let mut sh=SHA3::new_copy(self);
sh.hash(digest)
}
pub fn shake(&mut self, digest: &mut [u8], olen: usize) {
let q = self.rate - self.length;
if q == 1 {
self.process(0x9f);
} else {
self.process(0x1f);
while self.length != self.rate - 1 {
self.process(0x00)
}
self.process(0x80);
}
self.squeeze(digest, olen);
}
pub fn continuing_shake(&mut self, digest: &mut [u8], olen: usize) {
let mut sh=SHA3::new_copy(self);
sh.shake(digest,olen);
}
}
//916f6061fe879741ca6469b43971dfdb28b1a32dc36cb3254e812be27aad1d18
//afebb2ef542e6579c50cad06d2e578f9f8dd6881d7dc824d26360feebf18a4fa73e3261122948efcfd492e74e82e2189ed0fb440d187f382270cb455f21dd185
//98be04516c04cc73593fef3ed0352ea9f6443942d6950e29a372a681c3deaf4535423709b02843948684e029010badcc0acd8303fc85fdad3eabf4f78cae165635f57afd28810fc2
/*
fn main() {
let s = String::from("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
let mut digest: [u8;100]=[0;100];
let test = s.into_bytes();
let mut sh=SHA3::new(HASH256);
for i in 0..test.len(){
sh.process(test[i]);
}
sh.hash(&mut digest);
for i in 0..32 {print!("{:02x}",digest[i])}
println!("");
sh=SHA3::new(HASH512);
for i in 0..test.len(){
sh.process(test[i]);
}
sh.hash(&mut digest);
for i in 0..64 {print!("{:02x}",digest[i])}
println!("");
sh=SHA3::new(SHAKE256);
for i in 0..test.len(){
sh.process(test[i]);
}
sh.shake(&mut digest,72);
for i in 0..72 {print!("{:02x}",digest[i])}
println!("");
} */

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Shamir threshold secret sharing module */
/* Split any byte array into number of shares < 256 */
/* Specify number of shares required for recovery - nsr */
/* See TestMPIN.rs for an example of use */
use crate::rand::RAND;
const PTAB: [u8; 256] = [
1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, 95, 225, 56, 72, 216, 115,
149, 164, 247, 2, 6, 10, 30, 34, 102, 170, 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217,
112, 144, 171, 230, 49, 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, 131, 158, 185, 208,
107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, 181, 196, 87, 249, 16, 48, 80, 240,
11, 29, 39, 105, 187, 214, 97, 163, 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174,
233, 32, 96, 160, 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, 195,
94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, 159, 186, 213, 100, 172,
239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, 155, 182, 193, 88, 232, 35, 101, 175,
234, 37, 111, 177, 200, 67, 197, 84, 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176,
203, 70, 202, 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, 18, 54,
90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, 57, 75, 221, 124, 132, 151,
162, 253, 28, 36, 108, 180, 199, 82, 246, 1,
];
const LTAB: [u8; 256] = [
0, 255, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3, 100, 4, 224, 14, 52, 141,
129, 239, 76, 113, 8, 200, 248, 105, 28, 193, 125, 194, 29, 181, 249, 185, 39, 106, 77, 228,
166, 114, 154, 201, 9, 120, 101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218,
142, 150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56, 102, 221, 253,
48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16, 126, 110, 72, 195, 163, 182, 30, 66,
58, 107, 40, 84, 250, 133, 61, 186, 43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243,
115, 167, 87, 175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232, 44,
215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160, 127, 12, 246, 111, 23,
196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183, 204, 187, 62, 90, 251, 96, 177, 134, 59, 82,
161, 108, 170, 85, 41, 157, 151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63,
91, 209, 83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171, 68, 17, 146,
217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165, 103, 74, 237, 222, 197, 49, 254,
24, 13, 99, 140, 128, 192, 247, 112, 7,
];
pub struct SHARE<'a> {
id: u8,
nsr: u8,
b: &'a [u8]
}
fn mul(x: u8, y: u8) -> u8 {
/* x.y= AntiLog(Log(x) + Log(y)) */
let ix = (x as usize) & 0xff;
let iy = (y as usize) & 0xff;
let lx = (LTAB[ix] as usize) & 0xff;
let ly = (LTAB[iy] as usize) & 0xff;
if x != 0 && y != 0 {
PTAB[(lx + ly) % 255]
} else {
0
}
}
fn add(x: u8,y: u8) -> u8 {
x^y
}
fn inv(x: u8) -> u8 {
let ix = (x as usize) & 0xff;
let lx = (LTAB[ix] as usize) & 0xff;
PTAB[255-lx]
}
/* Lagrange interpolation */
fn interpolate(n: usize, x: &[u8], y: &[u8]) -> u8 {
let mut yp=0 as u8;
for i in 0..n {
let mut p=1 as u8;
for j in 0..n {
if i!=j {
p=mul(p,mul(x[j],inv(add(x[i],x[j]))));
}
}
yp=add(yp,mul(p,y[i]));
}
yp
}
impl<'a> SHARE<'a> {
/* Return a share of M */
/* input id - Unique share ID */
/* input nsr - Number of shares required for recovery */
/* input Message M to be shared */
/* input Random number generator rng to be used */
/* return share structure */
// must bind lifetime of the byte array stored by structure, to lifetime of s
pub fn new(ident: usize,numshare: usize,s: &'a mut [u8],m: &[u8], rng: &mut RAND) -> SHARE<'a> {
if ident<1 || ident>=256 || numshare<2 || numshare>=256 {
return SHARE{id:0,nsr:0,b:s};
}
let len=m.len();
for j in 0..len {
let mut x=ident as u8;
s[j]=m[j];
for _ in 1..numshare {
s[j]=add(s[j],mul(rng.getbyte(),x));
x=mul(x,ident as u8);
}
}
SHARE{id: ident as u8,nsr: numshare as u8,b:s}
}
/* recover M from shares */
pub fn recover(m: &mut [u8],s: &[SHARE]) {
let len=s[0].b.len();
let nsr=s[0].nsr as usize;
if nsr!=s.len() {
return;
}
for i in 1..nsr {
if s[i].nsr as usize != nsr || s[i].b.len()!=len {
return;
}
}
let mut x: [u8; 256] = [0; 256];
let mut y: [u8; 256] = [0; 256];
for j in 0..len {
for i in 0..nsr {
x[i]=s[i].id;
y[i]=s[i].b[j];
}
m[j]=interpolate(nsr,&x,&y);
}
}
}

1251
crates/bls48581/src/x509.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,6 @@ package app
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"math/big"
@ -23,10 +22,7 @@ import (
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony/application"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
var (
@ -538,167 +534,6 @@ func (m model) View() string {
"\t\tType: %s\n",
m.frame.AggregateProofs[i].InclusionCommitments[0].TypeUrl,
)
switch m.frame.AggregateProofs[i].InclusionCommitments[0].TypeUrl {
case protobufs.IntrinsicExecutionOutputType:
explorerContent += "Application: Ceremony\n"
app, err := application.MaterializeApplicationFromFrame(m.frame)
if err != nil {
explorerContent += "Error: " + err.Error() + "\n"
continue
}
total := new(big.Int)
if app.RewardTrie.Root == nil ||
(app.RewardTrie.Root.External == nil &&
app.RewardTrie.Root.Internal == nil) {
explorerContent += "Total Rewards: 0 QUIL\n"
continue
}
limbs := []*tries.RewardInternalNode{}
if app.RewardTrie.Root.Internal != nil {
limbs = append(limbs, app.RewardTrie.Root.Internal)
} else {
total = total.Add(
total,
new(big.Int).SetUint64(app.RewardTrie.Root.External.Total),
)
}
for len(limbs) != 0 {
nextLimbs := []*tries.RewardInternalNode{}
for _, limb := range limbs {
for _, child := range limb.Child {
child := child
if child.Internal != nil {
nextLimbs = append(nextLimbs, child.Internal)
} else {
total = total.Add(
total,
new(big.Int).SetUint64(child.External.Total),
)
}
}
}
limbs = nextLimbs
}
explorerContent += "Total Rewards: " + total.String() + " QUIL\n"
state := app.LobbyState.String()
explorerContent += "Round State: " + state + "\n"
switch app.LobbyState {
case application.CEREMONY_APPLICATION_STATE_OPEN:
explorerContent += "Joins: \n"
for _, join := range app.LobbyJoins {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
join.PublicKeySignatureEd448.PublicKey.KeyValue,
) + "\n"
}
explorerContent += "Preferred Next Round Participants: \n"
for _, next := range app.NextRoundPreferredParticipants {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
next.KeyValue,
) + "\n"
}
explorerContent += fmt.Sprintf(
"State Transition Counter: %d\n",
app.StateCount,
)
case application.CEREMONY_APPLICATION_STATE_IN_PROGRESS:
explorerContent += fmt.Sprintf("Sub-Round: %d\n", app.RoundCount)
explorerContent += "Participants: \n"
for _, active := range app.ActiveParticipants {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
active.PublicKeySignatureEd448.PublicKey.KeyValue,
) + "\n"
}
explorerContent += "Latest Seen: \n"
for _, latest := range app.LatestSeenProverAttestations {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
latest.SeenProverKey.KeyValue,
) + " seen by " + base64.StdEncoding.EncodeToString(
latest.ProverSignature.PublicKey.KeyValue,
) + "\n"
}
explorerContent += "Dropped: \n"
for _, dropped := range app.DroppedParticipantAttestations {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
dropped.DroppedProverKey.KeyValue,
) + " confirmed by " + base64.StdEncoding.EncodeToString(
dropped.ProverSignature.PublicKey.KeyValue,
) + "\n"
}
explorerContent += "Preferred Next Round Participants: \n"
for _, next := range app.NextRoundPreferredParticipants {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
next.KeyValue,
) + "\n"
}
case application.CEREMONY_APPLICATION_STATE_FINALIZING:
explorerContent += fmt.Sprintf(
"Confirmed Shares: %d\n",
len(app.TranscriptShares),
)
explorerContent += "Participants: \n"
for _, active := range app.ActiveParticipants {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
active.PublicKeySignatureEd448.PublicKey.KeyValue,
) + "\n"
}
explorerContent += "Latest Seen: \n"
for _, latest := range app.LatestSeenProverAttestations {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
latest.SeenProverKey.KeyValue,
) + " seen by " + base64.StdEncoding.EncodeToString(
latest.ProverSignature.PublicKey.KeyValue,
) + "\n"
}
explorerContent += "Dropped: \n"
for _, dropped := range app.DroppedParticipantAttestations {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
dropped.DroppedProverKey.KeyValue,
) + " confirmed by " + base64.StdEncoding.EncodeToString(
dropped.ProverSignature.PublicKey.KeyValue,
) + "\n"
}
explorerContent += "Preferred Next Round Participants: \n"
for _, next := range app.NextRoundPreferredParticipants {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
next.KeyValue,
) + "\n"
}
case application.CEREMONY_APPLICATION_STATE_VALIDATING:
explorerContent += fmt.Sprintf(
"G1 Powers: %d\n", len(app.UpdatedTranscript.G1Powers),
)
explorerContent += "Preferred Next Round Participants: \n"
for _, next := range app.NextRoundPreferredParticipants {
explorerContent += "\t" + base64.StdEncoding.EncodeToString(
next.KeyValue,
) + "\n"
}
}
}
}
} else {
explorerContent = logoVersion(physicalWidth - 34)
@ -761,10 +596,6 @@ func consoleModel(
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}),
hex.EncodeToString(append(
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
)),
},
cursor: 0,
conn: conn,

View File

@ -11,7 +11,6 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
@ -77,7 +76,6 @@ var engineSet = wire.NewSet(
crypto.NewKZGInclusionProver,
wire.Bind(new(crypto.InclusionProver), new(*crypto.KZGInclusionProver)),
time.NewMasterTimeReel,
ceremony.NewCeremonyExecutionEngine,
)
var consensusSet = wire.NewSet(

View File

@ -14,7 +14,6 @@ import (
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
@ -130,7 +129,7 @@ var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPe
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)))
var engineSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Engine"), crypto.NewWesolowskiFrameProver, wire.Bind(new(crypto.FrameProver), new(*crypto.WesolowskiFrameProver)), crypto.NewKZGInclusionProver, wire.Bind(new(crypto.InclusionProver), new(*crypto.KZGInclusionProver)), time.NewMasterTimeReel, ceremony.NewCeremonyExecutionEngine)
var engineSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Engine"), crypto.NewWesolowskiFrameProver, wire.Bind(new(crypto.FrameProver), new(*crypto.WesolowskiFrameProver)), crypto.NewKZGInclusionProver, wire.Bind(new(crypto.InclusionProver), new(*crypto.KZGInclusionProver)), time.NewMasterTimeReel)
var consensusSet = wire.NewSet(master.NewMasterClockConsensusEngine, wire.Bind(
new(consensus.ConsensusEngine),

View File

@ -20,14 +20,14 @@ case "$os_type" in
# Check if the architecture is ARM
if [[ "$(uname -m)" == "arm64" ]]; then
# MacOS ld doesn't support -Bstatic and -Bdynamic, so it's important that there is only a static version of the library
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -lvdf -ldl -lm'" "$@"
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -lvdf -lbls48581 -ldl -lm'" "$@"
else
echo "Unsupported platform"
exit 1
fi
;;
"Linux")
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -Wl,-Bstatic -lvdf -Wl,-Bdynamic -ldl -lm'" "$@"
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -Wl,-Bstatic -lvdf -lbls48581 -Wl,-Bdynamic -ldl -lm'" "$@"
;;
*)
echo "Unsupported platform"

View File

@ -29,5 +29,5 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x00
return 0x01
}

View File

@ -1,124 +0,0 @@
package ceremony
import (
"encoding/binary"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *CeremonyDataClockConsensusEngine) handleMessage(
message *pb.Message,
) error {
go func() {
e.messageProcessorCh <- message
}()
return nil
}
func (e *CeremonyDataClockConsensusEngine) publishProof(
frame *protobufs.ClockFrame,
) error {
e.logger.Debug(
"publishing frame and aggregations",
zap.Uint64("frame_number", frame.FrameNumber),
)
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
peers, max, err := e.GetMostAheadPeer(head.FrameNumber)
if err != nil || len(peers) == 0 || head.FrameNumber > max {
timestamp := time.Now().UnixMilli()
msg := binary.BigEndian.AppendUint64([]byte{}, frame.FrameNumber)
msg = append(msg, config.GetVersion()...)
msg = binary.BigEndian.AppendUint64(msg, uint64(timestamp))
sig, err := e.pubSub.SignMessage(msg)
if err != nil {
panic(err)
}
e.peerMapMx.Lock()
e.peerMap[string(e.pubSub.GetPeerID())] = &peerInfo{
peerId: e.pubSub.GetPeerID(),
multiaddr: "",
maxFrame: frame.FrameNumber,
version: config.GetVersion(),
signature: sig,
publicKey: e.pubSub.GetPublicKey(),
timestamp: timestamp,
totalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
}
list := &protobufs.CeremonyPeerListAnnounce{
PeerList: []*protobufs.CeremonyPeer{},
}
list.PeerList = append(list.PeerList, &protobufs.CeremonyPeer{
PeerId: e.pubSub.GetPeerID(),
Multiaddr: "",
MaxFrame: frame.FrameNumber,
Version: config.GetVersion(),
Signature: sig,
PublicKey: e.pubSub.GetPublicKey(),
Timestamp: timestamp,
TotalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
})
e.peerMapMx.Unlock()
if err := e.publishMessage(e.filter, list); err != nil {
e.logger.Debug("error publishing message", zap.Error(err))
}
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) publishMessage(
filter []byte,
message proto.Message,
) error {
any := &anypb.Any{}
if err := any.MarshalFrom(message); err != nil {
return errors.Wrap(err, "publish message")
}
any.TypeUrl = strings.Replace(
any.TypeUrl,
"type.googleapis.com",
"types.quilibrium.com",
1,
)
payload, err := proto.Marshal(any)
if err != nil {
return errors.Wrap(err, "publish message")
}
h, err := poseidon.HashBytes(payload)
if err != nil {
return errors.Wrap(err, "publish message")
}
msg := &protobufs.Message{
Hash: h.Bytes(),
Address: e.provingKeyAddress,
Payload: payload,
}
data, err := proto.Marshal(msg)
if err != nil {
return errors.Wrap(err, "publish message")
}
return e.pubSub.PublishToBitmask(filter, data)
}

View File

@ -1,672 +0,0 @@
package ceremony
import (
"bytes"
"context"
"crypto"
"crypto/tls"
"encoding/binary"
"sync"
"time"
"github.com/multiformats/go-multiaddr"
mn "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qtime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
const PEER_INFO_TTL = 60 * 60 * 1000
const UNCOOPERATIVE_PEER_INFO_TTL = 5 * 60 * 1000
type InclusionMap = map[curves.PairingPoint]*protobufs.InclusionCommitment
type PolynomialMap = map[curves.PairingPoint][]curves.PairingScalar
type SyncStatusType int
const (
SyncStatusNotSyncing = iota
SyncStatusAwaitingResponse
SyncStatusSynchronizing
SyncStatusFailed
)
type peerInfo struct {
peerId []byte
multiaddr string
maxFrame uint64
timestamp int64
lastSeen int64
version []byte
signature []byte
publicKey []byte
direct bool
totalDistance []byte
}
type ChannelServer = protobufs.CeremonyService_GetPublicChannelServer
type CeremonyDataClockConsensusEngine struct {
protobufs.UnimplementedCeremonyServiceServer
difficulty uint32
logger *zap.Logger
state consensus.EngineState
clockStore store.ClockStore
keyStore store.KeyStore
pubSub p2p.PubSub
keyManager keys.KeyManager
masterTimeReel *qtime.MasterTimeReel
dataTimeReel *qtime.DataTimeReel
peerInfoManager p2p.PeerInfoManager
provingKey crypto.Signer
provingKeyBytes []byte
provingKeyType keys.KeyType
provingKeyAddress []byte
lastFrameReceivedAt time.Time
latestFrameReceived uint64
frameProverTrie *tries.RollingFrecencyCritbitTrie
dependencyMap map[string]*anypb.Any
pendingCommits chan *anypb.Any
pendingCommitWorkers int64
inclusionProver qcrypto.InclusionProver
frameProver qcrypto.FrameProver
stagedLobbyStateTransitions *protobufs.CeremonyLobbyStateTransition
minimumPeersRequired int
statsClient protobufs.NodeStatsClient
currentReceivingSyncPeersMx sync.Mutex
currentReceivingSyncPeers int
frameChan chan *protobufs.ClockFrame
executionEngines map[string]execution.ExecutionEngine
filter []byte
input []byte
parentSelector []byte
syncingStatus SyncStatusType
syncingTarget []byte
previousHead *protobufs.ClockFrame
engineMx sync.Mutex
dependencyMapMx sync.Mutex
stagedLobbyStateTransitionsMx sync.Mutex
peerMapMx sync.RWMutex
peerAnnounceMapMx sync.Mutex
lastKeyBundleAnnouncementFrame uint64
peerMap map[string]*peerInfo
uncooperativePeersMap map[string]*peerInfo
messageProcessorCh chan *pb.Message
}
var _ consensus.DataConsensusEngine = (*CeremonyDataClockConsensusEngine)(nil)
// Creates a new data clock for ceremony execution  this is a hybrid clock,
// normally data clocks are bloom sharded and have node-specific proofs along
// with the public VDF proofs, but in this case it is a proof from the execution
// across all participating nodes.
func NewCeremonyDataClockConsensusEngine(
engineConfig *config.EngineConfig,
logger *zap.Logger,
keyManager keys.KeyManager,
clockStore store.ClockStore,
keyStore store.KeyStore,
pubSub p2p.PubSub,
frameProver qcrypto.FrameProver,
inclusionProver qcrypto.InclusionProver,
masterTimeReel *qtime.MasterTimeReel,
dataTimeReel *qtime.DataTimeReel,
peerInfoManager p2p.PeerInfoManager,
filter []byte,
seed []byte,
) *CeremonyDataClockConsensusEngine {
if logger == nil {
panic(errors.New("logger is nil"))
}
if engineConfig == nil {
panic(errors.New("engine config is nil"))
}
if keyManager == nil {
panic(errors.New("key manager is nil"))
}
if clockStore == nil {
panic(errors.New("clock store is nil"))
}
if keyStore == nil {
panic(errors.New("key store is nil"))
}
if pubSub == nil {
panic(errors.New("pubsub is nil"))
}
if frameProver == nil {
panic(errors.New("frame prover is nil"))
}
if inclusionProver == nil {
panic(errors.New("inclusion prover is nil"))
}
if masterTimeReel == nil {
panic(errors.New("master time reel is nil"))
}
if dataTimeReel == nil {
panic(errors.New("data time reel is nil"))
}
if peerInfoManager == nil {
panic(errors.New("peer info manager is nil"))
}
minimumPeersRequired := engineConfig.MinimumPeersRequired
if minimumPeersRequired == 0 {
minimumPeersRequired = 3
}
difficulty := engineConfig.Difficulty
if difficulty == 0 || difficulty == 10000 {
difficulty = 100000
}
var statsClient protobufs.NodeStatsClient
if engineConfig.StatsMultiaddr != "" {
ma, err := multiaddr.NewMultiaddr(engineConfig.StatsMultiaddr)
if err != nil {
panic(err)
}
_, addr, err := mn.DialArgs(ma)
if err != nil {
panic(err)
}
cc, err := grpc.Dial(
addr,
grpc.WithTransportCredentials(
credentials.NewTLS(&tls.Config{InsecureSkipVerify: false}),
),
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(600*1024*1024),
grpc.MaxCallRecvMsgSize(600*1024*1024),
),
)
if err != nil {
panic(err)
}
statsClient = protobufs.NewNodeStatsClient(cc)
}
e := &CeremonyDataClockConsensusEngine{
difficulty: difficulty,
logger: logger,
state: consensus.EngineStateStopped,
clockStore: clockStore,
keyStore: keyStore,
keyManager: keyManager,
pubSub: pubSub,
frameChan: make(chan *protobufs.ClockFrame),
executionEngines: map[string]execution.ExecutionEngine{},
dependencyMap: make(map[string]*anypb.Any),
parentSelector: []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
currentReceivingSyncPeers: 0,
lastFrameReceivedAt: time.Time{},
frameProverTrie: &tries.RollingFrecencyCritbitTrie{},
inclusionProver: inclusionProver,
syncingStatus: SyncStatusNotSyncing,
peerMap: map[string]*peerInfo{},
uncooperativePeersMap: map[string]*peerInfo{},
minimumPeersRequired: minimumPeersRequired,
frameProver: frameProver,
masterTimeReel: masterTimeReel,
dataTimeReel: dataTimeReel,
peerInfoManager: peerInfoManager,
statsClient: statsClient,
messageProcessorCh: make(chan *pb.Message),
}
logger.Info("constructing consensus engine")
signer, keyType, bytes, address := e.GetProvingKey(
engineConfig,
)
e.filter = filter
e.input = seed
e.provingKey = signer
e.provingKeyType = keyType
e.provingKeyBytes = bytes
e.provingKeyAddress = address
return e
}
func (e *CeremonyDataClockConsensusEngine) Start() <-chan error {
e.logger.Info("starting ceremony consensus engine")
e.state = consensus.EngineStateStarting
errChan := make(chan error)
e.state = consensus.EngineStateLoading
e.logger.Info("loading last seen state")
err := e.dataTimeReel.Start()
if err != nil {
panic(err)
}
e.frameProverTrie = e.dataTimeReel.GetFrameProverTrie()
err = e.createCommunicationKeys()
if err != nil {
panic(err)
}
go e.runMessageHandler()
e.logger.Info("subscribing to pubsub messages")
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
go func() {
server := grpc.NewServer(
grpc.MaxSendMsgSize(600*1024*1024),
grpc.MaxRecvMsgSize(600*1024*1024),
)
protobufs.RegisterCeremonyServiceServer(server, e)
if err := e.pubSub.StartDirectChannelListener(
e.pubSub.GetPeerID(),
"",
server,
); err != nil {
panic(err)
}
}()
e.state = consensus.EngineStateCollecting
go func() {
thresholdBeforeConfirming := 4
for {
list := &protobufs.CeremonyPeerListAnnounce{
PeerList: []*protobufs.CeremonyPeer{},
}
frame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
e.latestFrameReceived = frame.FrameNumber
e.logger.Info(
"preparing peer announce",
zap.Uint64("frame_number", frame.FrameNumber),
)
timestamp := time.Now().UnixMilli()
msg := binary.BigEndian.AppendUint64([]byte{}, frame.FrameNumber)
msg = append(msg, config.GetVersion()...)
msg = binary.BigEndian.AppendUint64(msg, uint64(timestamp))
sig, err := e.pubSub.SignMessage(msg)
if err != nil {
panic(err)
}
e.peerMapMx.Lock()
e.peerMap[string(e.pubSub.GetPeerID())] = &peerInfo{
peerId: e.pubSub.GetPeerID(),
multiaddr: "",
maxFrame: frame.FrameNumber,
version: config.GetVersion(),
signature: sig,
publicKey: e.pubSub.GetPublicKey(),
timestamp: timestamp,
totalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
}
deletes := []*peerInfo{}
list.PeerList = append(list.PeerList, &protobufs.CeremonyPeer{
PeerId: e.pubSub.GetPeerID(),
Multiaddr: "",
MaxFrame: frame.FrameNumber,
Version: config.GetVersion(),
Signature: sig,
PublicKey: e.pubSub.GetPublicKey(),
Timestamp: timestamp,
TotalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
})
for _, v := range e.uncooperativePeersMap {
if v == nil {
continue
}
if v.timestamp <= time.Now().UnixMilli()-UNCOOPERATIVE_PEER_INFO_TTL ||
thresholdBeforeConfirming > 0 {
deletes = append(deletes, v)
}
}
for _, v := range deletes {
delete(e.uncooperativePeersMap, string(v.peerId))
}
e.peerMapMx.Unlock()
if e.statsClient != nil {
_, err := e.statsClient.PutPeerInfo(
context.Background(),
&protobufs.PutPeerInfoRequest{
PeerInfo: []*protobufs.PeerInfo{
{
PeerId: e.pubSub.GetPeerID(),
Multiaddrs: []string{""},
MaxFrame: frame.FrameNumber,
Version: config.GetVersion(),
Signature: sig,
PublicKey: e.pubSub.GetPublicKey(),
Timestamp: timestamp,
TotalDistance: e.dataTimeReel.GetTotalDistance().FillBytes(
make([]byte, 256),
),
},
},
UncooperativePeerInfo: []*protobufs.PeerInfo{},
},
)
if err != nil {
e.logger.Error("could not emit stats", zap.Error(err))
}
}
e.logger.Info(
"broadcasting peer info",
zap.Uint64("frame_number", frame.FrameNumber),
)
if err := e.publishMessage(e.filter, list); err != nil {
e.logger.Debug("error publishing message", zap.Error(err))
}
if thresholdBeforeConfirming > 0 {
thresholdBeforeConfirming--
}
time.Sleep(120 * time.Second)
}
}()
go e.runLoop()
go func() {
errChan <- nil
}()
return errChan
}
func (e *CeremonyDataClockConsensusEngine) runLoop() {
dataFrameCh := e.dataTimeReel.NewFrameCh()
e.logger.Info("waiting for peer list mappings")
// We need to re-tune this so that libp2p's peerstore activation threshold
// considers DHT peers to be correct:
time.Sleep(30 * time.Second)
for e.state < consensus.EngineStateStopping {
peerCount := e.pubSub.GetNetworkPeersCount()
if peerCount < e.minimumPeersRequired {
e.logger.Info(
"waiting for minimum peers",
zap.Int("peer_count", peerCount),
)
time.Sleep(1 * time.Second)
} else {
latestFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
select {
case dataFrame := <-dataFrameCh:
if latestFrame, err = e.collect(dataFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
}
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if latestFrame != nil &&
dataFrame.FrameNumber > latestFrame.FrameNumber {
latestFrame = dataFrame
}
if e.latestFrameReceived < latestFrame.FrameNumber {
e.latestFrameReceived = latestFrame.FrameNumber
go func() {
select {
case e.frameChan <- latestFrame:
default:
}
}()
}
var nextFrame *protobufs.ClockFrame
if nextFrame, err = e.prove(latestFrame); err != nil {
e.logger.Error("could not prove", zap.Error(err))
e.state = consensus.EngineStateCollecting
continue
}
if bytes.Equal(
e.frameProverTrie.FindNearest(e.provingKeyAddress).External.Key,
e.provingKeyAddress,
) {
e.dataTimeReel.Insert(nextFrame, false)
if err = e.publishProof(nextFrame); err != nil {
e.logger.Error("could not publish", zap.Error(err))
e.state = consensus.EngineStateCollecting
}
}
case <-time.After(20 * time.Second):
dataFrame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if latestFrame, err = e.collect(dataFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
continue
}
if latestFrame == nil ||
latestFrame.FrameNumber < dataFrame.FrameNumber {
latestFrame, err = e.dataTimeReel.Head()
if err != nil {
panic(err)
}
}
if e.latestFrameReceived < latestFrame.FrameNumber {
e.latestFrameReceived = latestFrame.FrameNumber
go func() {
select {
case e.frameChan <- latestFrame:
default:
}
}()
}
var nextFrame *protobufs.ClockFrame
if nextFrame, err = e.prove(latestFrame); err != nil {
e.logger.Error("could not prove", zap.Error(err))
e.state = consensus.EngineStateCollecting
continue
}
if bytes.Equal(
e.frameProverTrie.FindNearest(e.provingKeyAddress).External.Key,
e.provingKeyAddress,
) {
e.dataTimeReel.Insert(nextFrame, false)
if err = e.publishProof(nextFrame); err != nil {
e.logger.Error("could not publish", zap.Error(err))
e.state = consensus.EngineStateCollecting
}
}
}
}
}
}
func (e *CeremonyDataClockConsensusEngine) Stop(force bool) <-chan error {
e.logger.Info("stopping ceremony consensus engine")
e.state = consensus.EngineStateStopping
errChan := make(chan error)
wg := sync.WaitGroup{}
wg.Add(len(e.executionEngines))
for name := range e.executionEngines {
name := name
go func(name string) {
frame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
err = <-e.UnregisterExecutor(name, frame.FrameNumber, force)
if err != nil {
errChan <- err
}
wg.Done()
}(name)
}
e.logger.Info("waiting for execution engines to stop")
wg.Wait()
e.logger.Info("execution engines stopped")
e.dataTimeReel.Stop()
e.state = consensus.EngineStateStopped
e.engineMx.Lock()
defer e.engineMx.Unlock()
go func() {
errChan <- nil
}()
return errChan
}
func (e *CeremonyDataClockConsensusEngine) GetDifficulty() uint32 {
return e.difficulty
}
func (e *CeremonyDataClockConsensusEngine) GetFrame() *protobufs.ClockFrame {
frame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
return frame
}
func (e *CeremonyDataClockConsensusEngine) GetState() consensus.EngineState {
return e.state
}
func (
e *CeremonyDataClockConsensusEngine,
) GetFrameChannel() <-chan *protobufs.ClockFrame {
return e.frameChan
}
func (
e *CeremonyDataClockConsensusEngine,
) GetPeerInfo() *protobufs.PeerInfoResponse {
resp := &protobufs.PeerInfoResponse{}
e.peerMapMx.RLock()
for _, v := range e.peerMap {
resp.PeerInfo = append(resp.PeerInfo, &protobufs.PeerInfo{
PeerId: v.peerId,
Multiaddrs: []string{v.multiaddr},
MaxFrame: v.maxFrame,
Timestamp: v.timestamp,
Version: v.version,
Signature: v.signature,
PublicKey: v.publicKey,
TotalDistance: v.totalDistance,
})
}
for _, v := range e.uncooperativePeersMap {
resp.UncooperativePeerInfo = append(
resp.UncooperativePeerInfo,
&protobufs.PeerInfo{
PeerId: v.peerId,
Multiaddrs: []string{v.multiaddr},
MaxFrame: v.maxFrame,
Timestamp: v.timestamp,
Version: v.version,
Signature: v.signature,
PublicKey: v.publicKey,
TotalDistance: v.totalDistance,
},
)
}
e.peerMapMx.RUnlock()
return resp
}
func (e *CeremonyDataClockConsensusEngine) createCommunicationKeys() error {
_, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
_, err = e.keyManager.CreateAgreementKey(
"q-ratchet-idk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
} else {
return errors.Wrap(err, "announce key bundle")
}
}
_, err = e.keyManager.GetAgreementKey("q-ratchet-spk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
_, err = e.keyManager.CreateAgreementKey(
"q-ratchet-spk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "announce key bundle")
}
} else {
return errors.Wrap(err, "announce key bundle")
}
}
return nil
}

View File

@ -1,310 +0,0 @@
package ceremony
import (
"bytes"
"context"
"time"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony/application"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *CeremonyDataClockConsensusEngine) prove(
previousFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
if !e.frameProverTrie.Contains(e.provingKeyAddress) {
e.stagedLobbyStateTransitionsMx.Lock()
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
e.state = consensus.EngineStateCollecting
return previousFrame, nil
}
e.stagedLobbyStateTransitionsMx.Lock()
executionOutput := &protobufs.IntrinsicExecutionOutput{}
app, err := application.MaterializeApplicationFromFrame(previousFrame)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
if e.stagedLobbyStateTransitions == nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
}
e.logger.Info(
"proving new frame",
zap.Int("state_transitions", len(e.stagedLobbyStateTransitions.TypeUrls)),
)
var validLobbyTransitions *protobufs.CeremonyLobbyStateTransition
var skippedTransition *protobufs.CeremonyLobbyStateTransition
app, validLobbyTransitions, skippedTransition, err = app.ApplyTransition(
previousFrame.FrameNumber,
e.stagedLobbyStateTransitions,
true,
)
if err != nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
}
e.stagedLobbyStateTransitions = skippedTransition
defer e.stagedLobbyStateTransitionsMx.Unlock()
lobbyState, err := app.MaterializeLobbyStateFromApplication()
if err != nil {
return nil, errors.Wrap(err, "prove")
}
executionOutput.Address = application.CEREMONY_ADDRESS
executionOutput.Output, err = proto.Marshal(lobbyState)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
executionOutput.Proof, err = proto.Marshal(validLobbyTransitions)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
data, err := proto.Marshal(executionOutput)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
e.logger.Debug("encoded execution output")
commitment, err := e.inclusionProver.Commit(
data,
protobufs.IntrinsicExecutionOutputType,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
e.logger.Debug("creating kzg proof")
proof, err := e.inclusionProver.ProveAggregate(
[]*qcrypto.InclusionCommitment{commitment},
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
e.logger.Debug("finalizing execution proof")
frame, err := e.frameProver.ProveDataClockFrame(
previousFrame,
[][]byte{proof.AggregateCommitment},
[]*protobufs.InclusionAggregateProof{
{
Filter: e.filter,
FrameNumber: previousFrame.FrameNumber + 1,
InclusionCommitments: []*protobufs.InclusionCommitment{
{
Filter: e.filter,
FrameNumber: previousFrame.FrameNumber + 1,
TypeUrl: proof.InclusionCommitments[0].TypeUrl,
Commitment: proof.InclusionCommitments[0].Commitment,
Data: data,
Position: 0,
},
},
Proof: proof.Proof,
},
},
e.provingKey,
time.Now().UnixMilli(),
e.difficulty,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
e.logger.Info(
"returning new proven frame",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
zap.Int("commitment_count", len(frame.Input[516:])/74),
)
return frame, nil
}
func (e *CeremonyDataClockConsensusEngine) GetMostAheadPeer(
frameNumber uint64,
) (
[]byte,
uint64,
error,
) {
e.logger.Info(
"checking peer list",
zap.Int("peers", len(e.peerMap)),
zap.Int("uncooperative_peers", len(e.uncooperativePeersMap)),
zap.Uint64("current_head_frame", frameNumber),
)
max := frameNumber
var peer []byte = nil
e.peerMapMx.RLock()
for _, v := range e.peerMap {
e.logger.Debug(
"checking peer info",
zap.Binary("peer_id", v.peerId),
zap.Uint64("max_frame_number", v.maxFrame),
zap.Int64("timestamp", v.timestamp),
zap.Binary("version", v.version),
)
_, ok := e.uncooperativePeersMap[string(v.peerId)]
if v.maxFrame > max &&
v.timestamp > config.GetMinimumVersionCutoff().UnixMilli() &&
bytes.Compare(v.version, config.GetMinimumVersion()) >= 0 && !ok {
peer = v.peerId
max = v.maxFrame
}
}
e.peerMapMx.RUnlock()
if peer == nil {
return nil, 0, p2p.ErrNoPeersAvailable
}
return peer, max, nil
}
func (e *CeremonyDataClockConsensusEngine) sync(
currentLatest *protobufs.ClockFrame,
maxFrame uint64,
peerId []byte,
) (*protobufs.ClockFrame, error) {
latest := currentLatest
e.logger.Info("polling peer for new frames", zap.Binary("peer_id", peerId))
cc, err := e.pubSub.GetDirectChannel(peerId, "")
if err != nil {
e.logger.Debug(
"could not establish direct channel",
zap.Error(err),
)
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
return latest, errors.Wrap(err, "sync")
}
client := protobufs.NewCeremonyServiceClient(cc)
response, err := client.GetDataFrame(
context.TODO(),
&protobufs.GetDataFrameRequest{
FrameNumber: 0,
},
grpc.MaxCallRecvMsgSize(600*1024*1024),
)
if err != nil {
e.logger.Debug(
"could not get frame",
zap.Error(err),
)
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
return latest, errors.Wrap(err, "sync")
}
if response == nil {
e.logger.Debug("received no response from peer")
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
return latest, nil
}
e.logger.Info(
"received new leading frame",
zap.Uint64("frame_number", response.ClockFrame.FrameNumber),
)
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
e.dataTimeReel.Insert(response.ClockFrame, false)
return response.ClockFrame, nil
}
func (e *CeremonyDataClockConsensusEngine) collect(
currentFramePublished *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
e.logger.Info("collecting vdf proofs")
latest := currentFramePublished
for {
peerId, maxFrame, err := e.GetMostAheadPeer(latest.FrameNumber)
if maxFrame > latest.FrameNumber {
e.syncingStatus = SyncStatusSynchronizing
if err != nil {
e.logger.Info("no peers available for sync, waiting")
time.Sleep(5 * time.Second)
} else if maxFrame > latest.FrameNumber {
masterHead, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
if masterHead.FrameNumber < maxFrame {
e.logger.Info(
"master frame synchronization needed to continue, waiting",
zap.Uint64("master_frame_head", masterHead.FrameNumber),
zap.Uint64("max_data_frame_target", maxFrame),
)
time.Sleep(30 * time.Second)
continue
}
latest, err = e.sync(latest, maxFrame, peerId)
if err == nil {
break
}
}
} else {
break
}
}
e.syncingStatus = SyncStatusNotSyncing
if latest.FrameNumber < currentFramePublished.FrameNumber {
latest = currentFramePublished
}
e.logger.Info(
"returning leader frame",
zap.Uint64("frame_number", latest.FrameNumber),
)
return latest, nil
}

View File

@ -1,88 +0,0 @@
package ceremony
import (
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
)
func (e *CeremonyDataClockConsensusEngine) RegisterExecutor(
exec execution.ExecutionEngine,
frame uint64,
) <-chan error {
logger := e.logger.With(zap.String("execution_engine_name", exec.GetName()))
logger.Info("registering execution engine")
errChan := make(chan error)
go func() {
for {
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
logger.Info(
"awaiting frame",
zap.Uint64("current_frame", masterFrame.FrameNumber),
zap.Uint64("target_frame", frame),
)
newFrame := masterFrame.FrameNumber
if newFrame >= frame {
logger.Info(
"injecting execution engine at frame",
zap.Uint64("current_frame", newFrame),
)
e.engineMx.Lock()
e.executionEngines[exec.GetName()] = exec
e.engineMx.Unlock()
errChan <- nil
break
}
}
}()
return errChan
}
func (e *CeremonyDataClockConsensusEngine) UnregisterExecutor(
name string,
frame uint64,
force bool,
) <-chan error {
logger := e.logger.With(zap.String("execution_engine_name", name))
logger.Info("unregistering execution engine")
errChan := make(chan error)
go func() {
for {
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
logger.Info(
"awaiting frame",
zap.Uint64("current_frame", masterFrame.FrameNumber),
zap.Uint64("target_frame", frame),
)
newFrame := masterFrame.FrameNumber
if newFrame >= frame {
logger.Info(
"removing execution engine at frame",
zap.Uint64("current_frame", newFrame),
)
e.engineMx.Lock()
delete(e.executionEngines, name)
e.engineMx.Unlock()
errChan <- nil
break
}
}
}()
return errChan
}

View File

@ -1,360 +0,0 @@
package ceremony
import (
"bytes"
"encoding/binary"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *CeremonyDataClockConsensusEngine) runMessageHandler() {
for {
select {
case message := <-e.messageProcessorCh:
e.logger.Debug("handling message")
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
continue
}
e.peerMapMx.RLock()
peer, ok := e.peerMap[string(message.From)]
e.peerMapMx.RUnlock()
if ok && bytes.Compare(peer.version, config.GetMinimumVersion()) >= 0 &&
bytes.Equal(
e.frameProverTrie.FindNearest(e.provingKeyAddress).External.Key,
e.provingKeyAddress,
) && e.syncingStatus == SyncStatusNotSyncing {
for name := range e.executionEngines {
name := name
go func() error {
messages, err := e.executionEngines[name].ProcessMessage(
msg.Address,
msg,
)
if err != nil {
e.logger.Debug(
"could not process message for engine",
zap.Error(err),
zap.String("engine_name", name),
)
return nil
}
for _, appMessage := range messages {
appMsg := &anypb.Any{}
err := proto.Unmarshal(appMessage.Payload, appMsg)
if err != nil {
e.logger.Error(
"could not unmarshal app message",
zap.Error(err),
zap.String("engine_name", name),
)
continue
}
switch appMsg.TypeUrl {
case protobufs.CeremonyLobbyStateTransitionType:
t := &protobufs.CeremonyLobbyStateTransition{}
err := proto.Unmarshal(appMsg.Value, t)
if err != nil {
continue
}
if err := e.handleCeremonyLobbyStateTransition(t); err != nil {
continue
}
}
}
return nil
}()
}
}
any := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, any); err != nil {
e.logger.Error("error while unmarshaling", zap.Error(err))
continue
}
go func() {
switch any.TypeUrl {
case protobufs.ClockFrameType:
if !ok || bytes.Compare(
peer.version,
config.GetMinimumVersion(),
) < 0 {
e.logger.Debug("received frame from unknown or outdated peer")
return
}
if err := e.handleClockFrameData(
message.From,
msg.Address,
any,
false,
); err != nil {
return
}
case protobufs.CeremonyPeerListAnnounceType:
if err := e.handleCeremonyPeerListAnnounce(
message.From,
msg.Address,
any,
); err != nil {
return
}
}
}()
}
}
}
func (e *CeremonyDataClockConsensusEngine) handleCeremonyPeerListAnnounce(
peerID []byte,
address []byte,
any *anypb.Any,
) error {
announce := &protobufs.CeremonyPeerListAnnounce{}
if err := any.UnmarshalTo(announce); err != nil {
return errors.Wrap(err, "handle ceremony peer list announce")
}
for _, p := range announce.PeerList {
if bytes.Equal(p.PeerId, e.pubSub.GetPeerID()) {
continue
}
if !bytes.Equal(p.PeerId, peerID) {
continue
}
if p.PublicKey == nil || p.Signature == nil || p.Version == nil {
continue
}
if p.PublicKey != nil && p.Signature != nil && p.Version != nil {
key, err := pcrypto.UnmarshalEd448PublicKey(p.PublicKey)
if err != nil {
e.logger.Warn(
"peer announcement contained invalid pubkey",
zap.Binary("public_key", p.PublicKey),
)
continue
}
if !(peer.ID(p.PeerId)).MatchesPublicKey(key) {
e.logger.Warn(
"peer announcement peer id does not match pubkey",
zap.Binary("peer_id", p.PeerId),
zap.Binary("public_key", p.PublicKey),
)
continue
}
msg := binary.BigEndian.AppendUint64([]byte{}, p.MaxFrame)
msg = append(msg, p.Version...)
msg = binary.BigEndian.AppendUint64(msg, uint64(p.Timestamp))
b, err := key.Verify(msg, p.Signature)
if err != nil || !b {
e.logger.Warn(
"peer provided invalid signature",
zap.Binary("msg", msg),
zap.Binary("public_key", p.PublicKey),
zap.Binary("signature", p.Signature),
)
continue
}
if bytes.Compare(p.Version, config.GetMinimumVersion()) < 0 &&
p.Timestamp > config.GetMinimumVersionCutoff().UnixMilli() {
e.logger.Debug(
"peer provided outdated version, penalizing app score",
zap.Binary("peer_id", p.PeerId),
)
e.pubSub.SetPeerScore(p.PeerId, -10000)
continue
}
}
e.peerMapMx.RLock()
if _, ok := e.uncooperativePeersMap[string(p.PeerId)]; ok {
e.peerMapMx.RUnlock()
continue
}
e.peerMapMx.RUnlock()
multiaddr := e.pubSub.GetMultiaddrOfPeer(p.PeerId)
e.pubSub.SetPeerScore(p.PeerId, 10)
e.peerMapMx.RLock()
existing, ok := e.peerMap[string(p.PeerId)]
e.peerMapMx.RUnlock()
if ok {
if existing.signature != nil && p.Signature == nil {
continue
}
if existing.publicKey != nil && p.PublicKey == nil {
continue
}
if existing.version != nil && p.Version == nil {
continue
}
if existing.timestamp > p.Timestamp {
continue
}
}
e.peerMapMx.Lock()
e.peerMap[string(p.PeerId)] = &peerInfo{
peerId: p.PeerId,
multiaddr: multiaddr,
maxFrame: p.MaxFrame,
direct: bytes.Equal(p.PeerId, peerID),
lastSeen: time.Now().Unix(),
timestamp: p.Timestamp,
version: p.Version,
signature: p.Signature,
publicKey: p.PublicKey,
totalDistance: p.TotalDistance,
}
e.peerMapMx.Unlock()
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleCeremonyLobbyStateTransition(
transition *protobufs.CeremonyLobbyStateTransition,
) error {
if len(transition.TransitionInputs) != len(transition.TypeUrls) {
return errors.Wrap(
errors.New("invalid state transition"),
"handle ceremony lobby state transition",
)
}
e.stagedLobbyStateTransitionsMx.Lock()
if e.stagedLobbyStateTransitions == nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
}
found := false
for _, ti := range e.stagedLobbyStateTransitions.TransitionInputs {
for _, nti := range transition.TransitionInputs {
if bytes.Equal(ti, nti) {
found = true
}
}
}
if !found {
for i := range transition.TransitionInputs {
e.stagedLobbyStateTransitions.TypeUrls = append(
e.stagedLobbyStateTransitions.TypeUrls,
transition.TypeUrls[i],
)
e.stagedLobbyStateTransitions.TransitionInputs = append(
e.stagedLobbyStateTransitions.TransitionInputs,
transition.TransitionInputs[i],
)
}
}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil
}
func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
peerID []byte,
address []byte,
any *anypb.Any,
isSync bool,
) error {
frame := &protobufs.ClockFrame{}
if err := any.UnmarshalTo(frame); err != nil {
return errors.Wrap(err, "handle clock frame data")
}
if e.latestFrameReceived > frame.FrameNumber {
return nil
}
addr, err := poseidon.HashBytes(
frame.GetPublicKeySignatureEd448().PublicKey.KeyValue,
)
if err != nil {
return errors.Wrap(err, "handle clock frame data")
}
prover := e.frameProverTrie.FindNearest(addr.Bytes())
if !bytes.Equal(prover.External.Key, addr.Bytes()) {
e.logger.Info(
"prover not in trie at frame, address may be in fork",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
return nil
}
e.logger.Info(
"got clock frame",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
)
if err := e.frameProver.VerifyDataClockFrame(frame); err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
if err := e.inclusionProver.VerifyFrame(frame); err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "handle clock frame data")
}
e.logger.Info(
"clock frame was valid",
zap.Binary("address", address),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
)
if e.latestFrameReceived < frame.FrameNumber {
e.latestFrameReceived = frame.FrameNumber
go func() {
select {
case e.frameChan <- frame:
default:
}
}()
}
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if frame.FrameNumber > head.FrameNumber {
e.dataTimeReel.Insert(frame, e.latestFrameReceived < frame.FrameNumber)
}
return nil
}

View File

@ -1,402 +0,0 @@
package ceremony
import (
"bytes"
"context"
"time"
"github.com/mr-tron/base58"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony/application"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
var ErrNoNewFrames = errors.New("peer reported no frames")
func (e *CeremonyDataClockConsensusEngine) GetDataFrame(
ctx context.Context,
request *protobufs.GetDataFrameRequest,
) (*protobufs.DataFrameResponse, error) {
e.logger.Debug(
"received frame request",
zap.Uint64("frame_number", request.FrameNumber),
)
var frame *protobufs.ClockFrame
var err error
if request.FrameNumber == 0 {
frame, err = e.dataTimeReel.Head()
if frame.FrameNumber == 0 {
return nil, errors.Wrap(
errors.New("not currently syncable"),
"get data frame",
)
}
} else {
frame, _, err = e.clockStore.GetDataClockFrame(
e.filter,
request.FrameNumber,
false,
)
}
if err != nil {
e.logger.Error(
"received error while fetching time reel head",
zap.Error(err),
)
return nil, errors.Wrap(err, "get data frame")
}
idx, err := e.frameProver.GenerateWeakRecursiveProofIndex(frame)
if err != nil {
return nil, errors.Wrap(err, "get data frame")
}
indexFrame, _, err := e.clockStore.GetDataClockFrame(e.filter, idx, false)
if err != nil {
return &protobufs.DataFrameResponse{
ClockFrame: frame,
}, nil
}
proof := e.frameProver.FetchRecursiveProof(indexFrame)
e.logger.Debug(
"sending frame response",
zap.Uint64("frame_number", frame.FrameNumber),
)
return &protobufs.DataFrameResponse{
ClockFrame: frame,
Proof: proof,
}, nil
}
func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
server protobufs.CeremonyService_NegotiateCompressedSyncFramesServer,
) error {
return nil
}
// Deprecated: Use NegotiateCompressedSyncFrames.
// GetCompressedSyncFrames implements protobufs.CeremonyServiceServer.
func (e *CeremonyDataClockConsensusEngine) GetCompressedSyncFrames(
request *protobufs.ClockFramesRequest,
server protobufs.CeremonyService_GetCompressedSyncFramesServer,
) error {
e.logger.Debug(
"received clock frame request",
zap.Uint64("from_frame_number", request.FromFrameNumber),
zap.Uint64("to_frame_number", request.ToFrameNumber),
)
if err := server.SendMsg(
&protobufs.ClockFramesResponse{
Filter: request.Filter,
FromFrameNumber: 0,
ToFrameNumber: 0,
ClockFrames: []*protobufs.ClockFrame{},
},
); err != nil {
return errors.Wrap(err, "get compressed sync frames")
}
return nil
}
func (e *CeremonyDataClockConsensusEngine) decompressAndStoreCandidates(
peerId []byte,
syncMsg *protobufs.CeremonyCompressedSync,
) (*protobufs.ClockFrame, error) {
if len(syncMsg.TruncatedClockFrames) == 0 {
return nil, ErrNoNewFrames
}
head, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
if len(syncMsg.TruncatedClockFrames) < int(
syncMsg.ToFrameNumber-syncMsg.FromFrameNumber+1,
) {
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
return nil, errors.New("invalid continuity for compressed sync response")
}
var final *protobufs.ClockFrame
for _, frame := range syncMsg.TruncatedClockFrames {
frame := frame
commits := (len(frame.Input) - 516) / 74
e.logger.Info(
"processing frame",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("aggregate_commits", commits),
)
for j := 0; j < commits; j++ {
e.logger.Debug(
"processing commit",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("commit_index", j),
)
commit := frame.Input[516+(j*74) : 516+((j+1)*74)]
var aggregateProof *protobufs.InclusionProofsMap
for _, a := range syncMsg.Proofs {
a := a
if bytes.Equal(a.FrameCommit, commit) {
e.logger.Info(
"found matching proof",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("commit_index", j),
)
aggregateProof = a
break
}
}
if aggregateProof == nil {
e.logger.Error(
"could not find matching proof",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("commit_index", j),
zap.Binary("proof", aggregateProof.Proof),
)
return nil, errors.Wrap(
store.ErrInvalidData,
"decompress and store candidates",
)
}
inc := &protobufs.InclusionAggregateProof{
Filter: e.filter,
FrameNumber: frame.FrameNumber,
InclusionCommitments: []*protobufs.InclusionCommitment{},
Proof: aggregateProof.Proof,
}
for k, c := range aggregateProof.Commitments {
k := k
c := c
e.logger.Debug(
"adding inclusion commitment",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("commit_index", j),
zap.Int("inclusion_commit_index", k),
zap.String("type_url", c.TypeUrl),
)
incCommit := &protobufs.InclusionCommitment{
Filter: e.filter,
FrameNumber: frame.FrameNumber,
Position: uint32(k),
TypeUrl: c.TypeUrl,
Data: []byte{},
Commitment: c.Commitment,
}
var output *protobufs.IntrinsicExecutionOutput
if c.TypeUrl == protobufs.IntrinsicExecutionOutputType {
output = &protobufs.IntrinsicExecutionOutput{}
}
for l, h := range c.SegmentHashes {
l := l
h := h
for _, s := range syncMsg.Segments {
s := s
if bytes.Equal(s.Hash, h) {
if output != nil {
if l == 0 {
e.logger.Debug(
"found first half of matching segment data",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("commit_index", j),
zap.Int("inclusion_commit_index", k),
zap.String("type_url", c.TypeUrl),
)
output.Address = s.Data[:32]
output.Output = s.Data[32:]
} else {
e.logger.Debug(
"found second half of matching segment data",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("commit_index", j),
zap.Int("inclusion_commit_index", k),
zap.String("type_url", c.TypeUrl),
)
output.Proof = s.Data
b, err := proto.Marshal(output)
if err != nil {
return nil, errors.Wrap(
err,
"decompress and store candidates",
)
}
incCommit.Data = b
break
}
} else {
e.logger.Debug(
"found matching segment data",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("commit_index", j),
zap.Int("inclusion_commit_index", k),
zap.String("type_url", c.TypeUrl),
)
incCommit.Data = append(incCommit.Data, s.Data...)
break
}
}
}
}
inc.InclusionCommitments = append(
inc.InclusionCommitments,
incCommit,
)
}
frame.AggregateProofs = append(
frame.AggregateProofs,
inc,
)
}
f, err := proto.Marshal(frame)
if err != nil {
return nil, errors.Wrap(err, "decompress and store candidates")
}
any := &anypb.Any{
TypeUrl: protobufs.ClockFrameType,
Value: f,
}
if err = e.handleClockFrameData(
e.syncingTarget,
append(
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
),
any,
// We'll tell the time reel to process it (isSync = false) if we're caught
// up beyond the head and frame number is divisible by 100 (limited to
// avoid thrash):
head.FrameNumber > frame.FrameNumber || frame.FrameNumber%100 != 0,
); err != nil {
return nil, errors.Wrap(err, "decompress and store candidates")
}
final = frame
}
e.logger.Info(
"decompressed and stored sync for range",
zap.Uint64("from", syncMsg.FromFrameNumber),
zap.Uint64("to", syncMsg.ToFrameNumber),
)
return final, nil
}
type svr struct {
protobufs.UnimplementedCeremonyServiceServer
svrChan chan protobufs.CeremonyService_GetPublicChannelServer
}
func (e *svr) GetCompressedSyncFrames(
request *protobufs.ClockFramesRequest,
server protobufs.CeremonyService_GetCompressedSyncFramesServer,
) error {
return errors.New("not supported")
}
func (e *svr) NegotiateCompressedSyncFrames(
server protobufs.CeremonyService_NegotiateCompressedSyncFramesServer,
) error {
return errors.New("not supported")
}
func (e *svr) GetPublicChannel(
server protobufs.CeremonyService_GetPublicChannelServer,
) error {
go func() {
e.svrChan <- server
}()
<-server.Context().Done()
return nil
}
func (e *CeremonyDataClockConsensusEngine) GetPublicChannelForProvingKey(
initiator bool,
peerID []byte,
provingKey []byte,
) (p2p.PublicChannelClient, error) {
if initiator {
svrChan := make(
chan protobufs.CeremonyService_GetPublicChannelServer,
)
after := time.After(20 * time.Second)
go func() {
server := grpc.NewServer(
grpc.MaxSendMsgSize(600*1024*1024),
grpc.MaxRecvMsgSize(600*1024*1024),
)
s := &svr{
svrChan: svrChan,
}
protobufs.RegisterCeremonyServiceServer(server, s)
if err := e.pubSub.StartDirectChannelListener(
peerID,
base58.Encode(provingKey),
server,
); err != nil {
e.logger.Error(
"could not get public channel for proving key",
zap.Error(err),
)
svrChan <- nil
}
}()
select {
case s := <-svrChan:
return s, nil
case <-after:
return nil, errors.Wrap(
errors.New("timed out"),
"get public channel for proving key",
)
}
} else {
cc, err := e.pubSub.GetDirectChannel(peerID, base58.Encode(provingKey))
if err != nil {
e.logger.Error(
"could not get public channel for proving key",
zap.Error(err),
)
return nil, nil
}
client := protobufs.NewCeremonyServiceClient(cc)
s, err := client.GetPublicChannel(
context.Background(),
grpc.MaxCallSendMsgSize(600*1024*1024),
grpc.MaxCallRecvMsgSize(600*1024*1024),
)
return s, errors.Wrap(err, "get public channel for proving key")
}
}
// GetPublicChannel implements protobufs.CeremonyServiceServer.
func (e *CeremonyDataClockConsensusEngine) GetPublicChannel(
server protobufs.CeremonyService_GetPublicChannelServer,
) error {
return errors.New("not supported")
}

View File

@ -1,61 +0,0 @@
package ceremony
import (
"crypto"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
)
func (e *CeremonyDataClockConsensusEngine) GetProvingKey(
engineConfig *config.EngineConfig,
) (crypto.Signer, keys.KeyType, []byte, []byte) {
provingKey, err := e.keyManager.GetSigningKey(engineConfig.ProvingKeyId)
if errors.Is(err, keys.KeyNotFoundErr) {
e.logger.Info("could not get proving key, generating")
provingKey, err = e.keyManager.CreateSigningKey(
engineConfig.ProvingKeyId,
keys.KeyTypeEd448,
)
}
if err != nil {
e.logger.Error("could not get proving key", zap.Error(err))
panic(err)
}
rawKey, err := e.keyManager.GetRawKey(engineConfig.ProvingKeyId)
if err != nil {
e.logger.Error("could not get proving key type", zap.Error(err))
panic(err)
}
provingKeyType := rawKey.Type
h, err := poseidon.HashBytes(rawKey.PublicKey)
if err != nil {
e.logger.Error("could not hash proving key", zap.Error(err))
panic(err)
}
provingKeyAddress := h.Bytes()
provingKeyAddress = append(
make([]byte, 32-len(provingKeyAddress)),
provingKeyAddress...,
)
return provingKey, provingKeyType, rawKey.PublicKey, provingKeyAddress
}
func (e *CeremonyDataClockConsensusEngine) IsInProverTrie(key []byte) bool {
h, err := poseidon.HashBytes(key)
if err != nil {
return false
}
provingKeyAddress := h.Bytes()
return e.frameProverTrie.Contains(provingKeyAddress)
}

View File

@ -511,20 +511,37 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
for i := uint32(0); i < parallelism; i++ {
i := i
go func() {
resp, err :=
clients[i].CalculateChallengeProof(
context.Background(),
&protobufs.ChallengeProofRequest{
Challenge: challenge,
Core: i,
Increment: increment,
},
)
if err != nil {
panic(err)
}
for j := 3; j > 0; j-- {
resp, err :=
clients[i].CalculateChallengeProof(
context.Background(),
&protobufs.ChallengeProofRequest{
Challenge: challenge,
Core: i,
Increment: increment,
},
)
if err != nil {
if j == 1 || len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
panic(err)
}
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
e.logger.Error(
"client failed, reconnecting after 50ms",
zap.Uint32("client", i),
)
time.Sleep(50 * time.Millisecond)
clients[i], err = e.createParallelDataClientsFromListAndIndex(i)
if err != nil {
panic(err)
}
}
continue
}
proofs[i] = resp.Output
proofs[i] = resp.Output
break
}
wg.Done()
}()
}
@ -568,6 +585,45 @@ func (e *MasterClockConsensusEngine) PerformDataCommitment(
return output, nextInput, prevIndex
}
func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
index uint32,
) (
protobufs.DataIPCServiceClient,
error,
) {
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index])
if err != nil {
panic(err)
}
_, addr, err := mn.DialArgs(ma)
if err != nil {
panic(err)
}
conn, err := grpc.Dial(
addr,
grpc.WithTransportCredentials(
insecure.NewCredentials(),
),
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(10*1024*1024),
grpc.MaxCallRecvMsgSize(10*1024*1024),
),
)
if err != nil {
panic(err)
}
client := protobufs.NewDataIPCServiceClient(conn)
e.logger.Info(
"connected to data worker process",
zap.Uint32("client", index),
)
return client, nil
}
func (e *MasterClockConsensusEngine) createParallelDataClientsFromList() (
[]protobufs.DataIPCServiceClient,
error,

View File

@ -1,7 +1,5 @@
package crypto
import "source.quilibrium.com/quilibrium/monorepo/node/protobufs"
type InclusionCommitment struct {
TypeUrl string
Data []byte
@ -15,16 +13,16 @@ type InclusionAggregateProof struct {
}
type InclusionProver interface {
Commit(
data []byte,
typeUrl string,
) (*InclusionCommitment, error)
ProveAggregate(commits []*InclusionCommitment) (
*InclusionAggregateProof,
error,
)
VerifyAggregate(proof *InclusionAggregateProof) (bool, error)
VerifyFrame(frame *protobufs.ClockFrame) error
// Commit(
// data []byte,
// typeUrl string,
// ) (*InclusionCommitment, error)
// ProveAggregate(commits []*InclusionCommitment) (
// *InclusionAggregateProof,
// error,
// )
// VerifyAggregate(proof *InclusionAggregateProof) (bool, error)
// VerifyFrame(frame *protobufs.ClockFrame) error
CommitRaw(
data []byte,
polySize uint64,

View File

@ -9,12 +9,11 @@ import (
"hash"
"math/big"
"os"
"runtime"
"sync"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"golang.org/x/sync/errgroup"
rbls48581 "source.quilibrium.com/quilibrium/monorepo/bls48581"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
)
@ -92,13 +91,13 @@ func TestInit(file string) {
panic(err)
}
g1s := make([]curves.PairingPoint, 1024)
g1s := make([]curves.PairingPoint, 65536)
g2s := make([]curves.PairingPoint, 257)
g1ffts := make([]curves.PairingPoint, 1024)
g1ffts := make([]curves.PairingPoint, 65536)
wg := sync.WaitGroup{}
wg.Add(1024)
wg.Add(65536)
for i := 0; i < 1024; i++ {
for i := 0; i < 65536; i++ {
i := i
go func() {
b, err := hex.DecodeString(cs.PowersOfTau.G1Affines[i][2:])
@ -207,13 +206,13 @@ func TestInit(file string) {
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
sizes := []int64{16, 128, 1024}
sizes := []int64{16, 32, 64, 128, 256, 512, 1024, 2048, 65536}
wg.Add(len(sizes))
root := make([]curves.PairingScalar, 3)
roots := make([][]curves.PairingScalar, 3)
reverseRoots := make([][]curves.PairingScalar, 3)
ffts := make([][]curves.PairingPoint, 3)
root := make([]curves.PairingScalar, 9)
roots := make([][]curves.PairingScalar, 9)
reverseRoots := make([][]curves.PairingScalar, 9)
ffts := make([][]curves.PairingPoint, 9)
for idx, i := range sizes {
i := i
@ -297,208 +296,7 @@ func TestInit(file string) {
var csBytes []byte
func Init() {
// start with phase 1 ceremony:
bls48581.Init()
cs := &CeremonyState{}
if err := json.Unmarshal(csBytes, cs); err != nil {
panic(err)
}
g1s := make([]curves.PairingPoint, 65536)
g2s := make([]curves.PairingPoint, 257)
g1ffts := make([]curves.PairingPoint, 65536)
wg := errgroup.Group{}
wg.SetLimit(runtime.NumCPU())
for i := 0; i < 65536; i++ {
i := i
wg.Go(func() error {
b, err := hex.DecodeString(cs.PowersOfTau.G1Affines[i][2:])
if err != nil {
panic(err)
}
g1, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(b)
if err != nil {
panic(err)
}
g1s[i] = g1.(curves.PairingPoint)
f, err := hex.DecodeString(cs.PowersOfTau.G1FFT[i][2:])
if err != nil {
panic(err)
}
g1fft, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(f)
if err != nil {
panic(err)
}
g1ffts[i] = g1fft.(curves.PairingPoint)
if i < 257 {
b, err := hex.DecodeString(cs.PowersOfTau.G2Affines[i][2:])
if err != nil {
panic(err)
}
g2, err := curves.BLS48581G2().NewGeneratorPoint().FromAffineCompressed(
b,
)
if err != nil {
panic(err)
}
g2s[i] = g2.(curves.PairingPoint)
}
return nil
})
}
CeremonyRunningProducts = make([]curves.PairingPoint, len(cs.Witness.RunningProducts))
for i, s := range cs.Witness.RunningProducts {
i, s := i, s
wg.Go(func() error {
b, err := hex.DecodeString(s[2:])
if err != nil {
panic(err)
}
g1, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(b)
if err != nil {
panic(err)
}
CeremonyRunningProducts[i] = g1.(curves.PairingPoint)
return nil
})
}
CeremonyPotPubKeys = make([]curves.PairingPoint, len(cs.Witness.PotPubKeys))
for i, s := range cs.Witness.PotPubKeys {
i, s := i, s
wg.Go(func() error {
b, err := hex.DecodeString(s[2:])
if err != nil {
panic(err)
}
g2, err := curves.BLS48581G2().NewGeneratorPoint().FromAffineCompressed(b)
if err != nil {
panic(err)
}
CeremonyPotPubKeys[i] = g2.(curves.PairingPoint)
return nil
})
}
CeremonySignatories = make([]curves.Point, len(cs.VoucherPubKeys))
for i, s := range cs.VoucherPubKeys {
i, s := i, s
wg.Go(func() error {
b, err := hex.DecodeString(s[2:])
if err != nil {
panic(err)
}
CeremonySignatories[i], err = curves.ED448().Point.FromAffineCompressed(b)
if err != nil {
panic(err)
}
return nil
})
}
wg.Wait()
CeremonyBLS48581G1 = g1s
CeremonyBLS48581G2 = g2s
// Post-ceremony, precompute everything and put it in the finalized ceremony
// state
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
sizes := []int64{16, 128, 1024, 2048, 65536}
wg = errgroup.Group{}
wg.SetLimit(runtime.NumCPU())
root := make([]curves.PairingScalar, 5)
roots := make([][]curves.PairingScalar, 5)
reverseRoots := make([][]curves.PairingScalar, 5)
ffts := make([][]curves.PairingPoint, 5)
for idx, i := range sizes {
i := i
idx := idx
wg.Go(func() error {
exp := new(big.Int).Quo(
new(big.Int).Sub(q, big.NewInt(1)),
big.NewInt(i),
)
rootOfUnity := new(big.Int).Exp(big.NewInt(int64(37)), exp, q)
roots[idx] = make([]curves.PairingScalar, i+1)
reverseRoots[idx] = make([]curves.PairingScalar, i+1)
wg2 := sync.WaitGroup{}
wg2.Add(int(i))
for j := int64(0); j < i; j++ {
j := j
go func() {
rev := big.NewInt(int64(j))
r := new(big.Int).Exp(
rootOfUnity,
rev,
q,
)
scalar, _ := (&curves.ScalarBls48581{}).SetBigInt(r)
if rev.Cmp(big.NewInt(1)) == 0 {
root[idx] = scalar.(curves.PairingScalar)
}
roots[idx][j] = scalar.(curves.PairingScalar)
reverseRoots[idx][i-j] = roots[idx][j]
wg2.Done()
}()
}
wg2.Wait()
roots[idx][i] = roots[idx][0]
reverseRoots[idx][0] = reverseRoots[idx][i]
return nil
})
}
wg.Wait()
wg = errgroup.Group{}
wg.SetLimit(runtime.NumCPU())
for i := range root {
i := i
RootOfUnityBLS48581[uint64(sizes[i])] = root[i]
RootsOfUnityBLS48581[uint64(sizes[i])] = roots[i]
ReverseRootsOfUnityBLS48581[uint64(sizes[i])] = reverseRoots[i]
wg.Go(func() error {
// We precomputed 65536, others are cheap and will be fully precomputed
// post-ceremony
if sizes[i] < 65536 {
fftG1, err := FFTG1(
CeremonyBLS48581G1[:sizes[i]],
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
uint64(sizes[i]),
true,
)
if err != nil {
panic(err)
}
ffts[i] = fftG1
} else {
ffts[i] = g1ffts
}
return nil
})
}
wg.Wait()
for i := range root {
FFTBLS48581[uint64(sizes[i])] = ffts[i]
}
rbls48581.Init()
}
func NewKZGProver(

View File

@ -1,12 +1,9 @@
package crypto
import (
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
rbls48581 "source.quilibrium.com/quilibrium/monorepo/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type KZGInclusionProver struct {
@ -21,464 +18,451 @@ func NewKZGInclusionProver(logger *zap.Logger) *KZGInclusionProver {
}
}
// Commit implements InclusionProver.
func (k *KZGInclusionProver) Commit(
data []byte,
typeUrl string,
) (*InclusionCommitment, error) {
if typeUrl == protobufs.IntrinsicExecutionOutputType {
digest := sha3.NewShake256()
_, err := digest.Write(data)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
// // Commit implements InclusionProver.
// func (k *KZGInclusionProver) Commit(
// data []byte,
// typeUrl string,
// ) (*InclusionCommitment, error) {
// if typeUrl == protobufs.IntrinsicExecutionOutputType {
// digest := sha3.NewShake256()
// _, err := digest.Write(data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
// expand := make([]byte, 1024)
// _, err = digest.Read(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
poly, err := k.prover.BytesToPolynomial(expand)
if err != nil {
return nil, errors.Wrap(err, "commit")
}
// poly, err := k.prover.BytesToPolynomial(expand)
// if err != nil {
// return nil, errors.Wrap(err, "commit")
// }
k.logger.Debug("proving execution output for inclusion")
polys, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
// k.logger.Debug("proving execution output for inclusion")
// polys, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 16,
// false,
// )
// if err != nil {
// return nil, errors.Wrap(err, "prove")
// }
k.logger.Debug("converted execution output chunk to evaluation form")
// k.logger.Debug("converted execution output chunk to evaluation form")
k.logger.Debug("creating kzg commitment")
points, err := k.prover.Commit(polys)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
// k.logger.Debug("creating kzg commitment")
// points, err := k.prover.Commit(polys)
// if err != nil {
// return nil, errors.Wrap(err, "prove")
// }
return &InclusionCommitment{
TypeUrl: typeUrl,
Data: data,
Commitment: points.ToAffineCompressed(),
}, nil
}
// return &InclusionCommitment{
// TypeUrl: typeUrl,
// Data: data,
// Commitment: points.ToAffineCompressed(),
// }, nil
// }
poly, err := k.prover.BytesToPolynomial(data)
if err != nil {
return nil, errors.Wrap(err, "commit")
}
// poly, err := k.prover.BytesToPolynomial(data)
// if err != nil {
// return nil, errors.Wrap(err, "commit")
// }
points, err := k.prover.Commit(poly)
if err != nil {
return nil, errors.Wrap(err, "commit")
}
// points, err := k.prover.Commit(poly)
// if err != nil {
// return nil, errors.Wrap(err, "commit")
// }
return &InclusionCommitment{
TypeUrl: typeUrl,
Data: data,
Commitment: points.ToAffineCompressed(),
}, nil
}
// return &InclusionCommitment{
// TypeUrl: typeUrl,
// Data: data,
// Commitment: points.ToAffineCompressed(),
// }, nil
// }
// ProveAggregate implements InclusionProver.
func (k *KZGInclusionProver) ProveAggregate(
commits []*InclusionCommitment,
) (*InclusionAggregateProof, error) {
polys := [][]curves.PairingScalar{}
commitPoints := []curves.PairingPoint{}
for _, commit := range commits {
switch commit.TypeUrl {
case protobufs.IntrinsicExecutionOutputType:
k.logger.Debug("confirming inclusion in aggregate")
digest := sha3.NewShake256()
_, err := digest.Write(commit.Data)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
// // ProveAggregate implements InclusionProver.
// func (k *KZGInclusionProver) ProveAggregate(
// commits []*InclusionCommitment,
// ) (*InclusionAggregateProof, error) {
// polys := [][]curves.PairingScalar{}
// commitPoints := []curves.PairingPoint{}
// for _, commit := range commits {
// switch commit.TypeUrl {
// case protobufs.IntrinsicExecutionOutputType:
// k.logger.Debug("confirming inclusion in aggregate")
// digest := sha3.NewShake256()
// _, err := digest.Write(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
// expand := make([]byte, 1024)
// _, err = digest.Read(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
poly, err := k.prover.BytesToPolynomial(expand)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
// poly, err := k.prover.BytesToPolynomial(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
evalPoly, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
if err != nil {
k.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
k.logger.Debug(
"created fft of polynomial",
zap.Int("poly_size", len(evalPoly)),
)
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 16,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
polys = append(polys, evalPoly)
// polys = append(polys, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
return nil, errors.Wrap(err, "prove aggregate")
}
commitPoints = append(commitPoints, c.(curves.PairingPoint))
default:
k.logger.Debug("confirming inclusion in aggregate")
poly, err := k.prover.BytesToPolynomial(commit.Data)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// return nil, errors.Wrap(err, "prove aggregate")
// }
// commitPoints = append(commitPoints, c.(curves.PairingPoint))
// default:
// k.logger.Debug("confirming inclusion in aggregate")
// poly, err := k.prover.BytesToPolynomial(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
for i := 0; i < 1024-len(poly); i++ {
poly = append(
poly,
curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
)
}
// for i := 0; i < 1024-len(poly); i++ {
// poly = append(
// poly,
// curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
// )
// }
evalPoly, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
1024,
false,
)
if err != nil {
k.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return nil, errors.Wrap(err, "prove aggregate")
}
k.logger.Debug(
"created fft of polynomial",
zap.Int("poly_size", len(evalPoly)),
)
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 1024,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
polys = append(polys, evalPoly)
// polys = append(polys, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
k.logger.Error("could not verify clock frame", zap.Error(err))
return nil, errors.Wrap(err, "prove aggregate")
}
commitPoints = append(commitPoints, c.(curves.PairingPoint))
}
}
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return nil, errors.Wrap(err, "prove aggregate")
// }
// commitPoints = append(commitPoints, c.(curves.PairingPoint))
// }
// }
proof, commitment, err := k.prover.ProveAggregate(
polys,
commitPoints,
)
if err != nil {
return nil, errors.Wrap(err, "prove aggregate")
}
// proof, commitment, err := k.prover.ProveAggregate(
// polys,
// commitPoints,
// )
// if err != nil {
// return nil, errors.Wrap(err, "prove aggregate")
// }
if proof.IsIdentity() {
return nil, errors.Wrap(errors.New("invalid proof"), "prove aggregate")
}
// if proof.IsIdentity() {
// return nil, errors.Wrap(errors.New("invalid proof"), "prove aggregate")
// }
return &InclusionAggregateProof{
InclusionCommitments: commits,
AggregateCommitment: commitment.ToAffineCompressed(),
Proof: proof.ToAffineCompressed(),
}, nil
}
// return &InclusionAggregateProof{
// InclusionCommitments: commits,
// AggregateCommitment: commitment.ToAffineCompressed(),
// Proof: proof.ToAffineCompressed(),
// }, nil
// }
// VerifyAggregate implements InclusionProver.
func (k *KZGInclusionProver) VerifyAggregate(
proof *InclusionAggregateProof,
) (bool, error) {
polys := [][]curves.PairingScalar{}
commitPoints := []curves.PairingPoint{}
for _, commit := range proof.InclusionCommitments {
poly, err := k.prover.BytesToPolynomial(commit.Data)
if err != nil {
return false, errors.Wrap(err, "verify aggregate")
}
// // VerifyAggregate implements InclusionProver.
// func (k *KZGInclusionProver) VerifyAggregate(
// proof *InclusionAggregateProof,
// ) (bool, error) {
// polys := [][]curves.PairingScalar{}
// commitPoints := []curves.PairingPoint{}
// for _, commit := range proof.InclusionCommitments {
// poly, err := k.prover.BytesToPolynomial(commit.Data)
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
polys = append(polys, poly)
// polys = append(polys, poly)
point, err := curves.BLS48581G1().Point.FromAffineCompressed(
commit.Commitment,
)
if err != nil {
return false, errors.Wrap(err, "verify aggregate")
}
// point, err := curves.BLS48581G1().Point.FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
commitPoints = append(commitPoints, point.(curves.PairingPoint))
}
// commitPoints = append(commitPoints, point.(curves.PairingPoint))
// }
aggregate, err := curves.BLS48581G1().Point.FromAffineCompressed(
proof.AggregateCommitment,
)
if err != nil {
return false, errors.Wrap(err, "verify aggregate")
}
// aggregate, err := curves.BLS48581G1().Point.FromAffineCompressed(
// proof.AggregateCommitment,
// )
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
proofPoint, err := curves.BLS48581G1().Point.FromAffineCompressed(
proof.Proof,
)
if err != nil {
return false, errors.Wrap(err, "verify aggregate")
}
// proofPoint, err := curves.BLS48581G1().Point.FromAffineCompressed(
// proof.Proof,
// )
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
verify, err := k.prover.VerifyAggregateProof(
polys,
commitPoints,
aggregate.(curves.PairingPoint),
proofPoint.(curves.PairingPoint),
)
return verify, errors.Wrap(err, "verify aggregate")
}
// verify, err := k.prover.VerifyAggregateProof(
// polys,
// commitPoints,
// aggregate.(curves.PairingPoint),
// proofPoint.(curves.PairingPoint),
// )
// return verify, errors.Wrap(err, "verify aggregate")
// }
func (k *KZGInclusionProver) VerifyFrame(
frame *protobufs.ClockFrame,
) error {
aggregateCommitments := []curves.PairingPoint{}
for i := 0; i < (len(frame.Input)-516)/74; i++ {
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
frame.Input[516+(i*74) : 516+(i*74)+74],
)
if err != nil {
k.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "verify frame")
}
aggregateCommitments = append(aggregateCommitments, c.(curves.PairingPoint))
}
// func (k *KZGInclusionProver) VerifyFrame(
// frame *protobufs.ClockFrame,
// ) error {
// aggregateCommitments := []curves.PairingPoint{}
// for i := 0; i < (len(frame.Input)-516)/74; i++ {
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// frame.Input[516+(i*74) : 516+(i*74)+74],
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
// aggregateCommitments = append(aggregateCommitments, c.(curves.PairingPoint))
// }
if len(aggregateCommitments) != len(frame.AggregateProofs) {
k.logger.Error(
"commit length mismatched proof for frame",
zap.Int("commit_length", len(aggregateCommitments)),
zap.Int("proof_length", len(frame.AggregateProofs)),
)
return errors.Wrap(
errors.New("commit length mismatched proof for frame"),
"verify frame",
)
}
// if len(aggregateCommitments) != len(frame.AggregateProofs) {
// k.logger.Error(
// "commit length mismatched proof for frame",
// zap.Int("commit_length", len(aggregateCommitments)),
// zap.Int("proof_length", len(frame.AggregateProofs)),
// )
// return errors.Wrap(
// errors.New("commit length mismatched proof for frame"),
// "verify frame",
// )
// }
for i, proof := range frame.AggregateProofs {
aggregatePoly := [][]curves.PairingScalar{}
commitments := []curves.PairingPoint{}
// for i, proof := range frame.AggregateProofs {
// aggregatePoly := [][]curves.PairingScalar{}
// commitments := []curves.PairingPoint{}
for _, commit := range proof.GetInclusionCommitments() {
switch commit.TypeUrl {
case protobufs.IntrinsicExecutionOutputType:
k.logger.Debug("confirming inclusion in aggregate")
digest := sha3.NewShake256()
_, err := digest.Write(commit.Data)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "verify frame")
}
// for _, commit := range proof.GetInclusionCommitments() {
// switch commit.TypeUrl {
// case protobufs.IntrinsicExecutionOutputType:
// k.logger.Debug("confirming inclusion in aggregate")
// digest := sha3.NewShake256()
// _, err := digest.Write(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
expand := make([]byte, 1024)
_, err = digest.Read(expand)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "verify frame")
}
// expand := make([]byte, 1024)
// _, err = digest.Read(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
poly, err := k.prover.BytesToPolynomial(expand)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "verify frame")
}
// poly, err := k.prover.BytesToPolynomial(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
evalPoly, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
if err != nil {
k.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return errors.Wrap(err, "verify frame")
}
k.logger.Debug(
"created fft of polynomial",
zap.Int("poly_size", len(evalPoly)),
)
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 16,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
aggregatePoly = append(aggregatePoly, evalPoly)
// aggregatePoly = append(aggregatePoly, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
k.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "verify frame")
}
commitments = append(commitments, c.(curves.PairingPoint))
default:
k.logger.Debug("confirming inclusion in aggregate")
poly, err := k.prover.BytesToPolynomial(commit.Data)
if err != nil {
k.logger.Error(
"error converting key bundle to polynomial",
zap.Error(err),
)
return errors.Wrap(err, "verify frame")
}
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
// commitments = append(commitments, c.(curves.PairingPoint))
// default:
// k.logger.Debug("confirming inclusion in aggregate")
// poly, err := k.prover.BytesToPolynomial(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
for i := 0; i < 1024-len(poly); i++ {
poly = append(
poly,
curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
)
}
// for i := 0; i < 1024-len(poly); i++ {
// poly = append(
// poly,
// curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
// )
// }
evalPoly, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
1024,
false,
)
if err != nil {
k.logger.Error(
"error performing fast fourier transform on key bundle",
zap.Error(err),
)
return errors.Wrap(err, "verify frame")
}
k.logger.Debug(
"created fft of polynomial",
zap.Int("poly_size", len(evalPoly)),
)
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 1024,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
aggregatePoly = append(aggregatePoly, evalPoly)
// aggregatePoly = append(aggregatePoly, evalPoly)
c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
commit.Commitment,
)
if err != nil {
k.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "verify frame")
}
commitments = append(commitments, c.(curves.PairingPoint))
}
}
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
// commitments = append(commitments, c.(curves.PairingPoint))
// }
// }
p, err := curves.BLS48581G1().Point.FromAffineCompressed(
proof.Proof,
)
if err != nil {
k.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(err, "verify frame")
}
// p, err := curves.BLS48581G1().Point.FromAffineCompressed(
// proof.Proof,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
result, err := k.prover.VerifyAggregateProof(
aggregatePoly,
commitments,
aggregateCommitments[i],
p.(curves.PairingPoint),
)
if err != nil {
k.logger.Error(
"could not verify clock frame",
zap.Error(err),
)
return errors.Wrap(err, "verify frame")
}
// result, err := k.prover.VerifyAggregateProof(
// aggregatePoly,
// commitments,
// aggregateCommitments[i],
// p.(curves.PairingPoint),
// )
// if err != nil {
// k.logger.Error(
// "could not verify clock frame",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
if !result {
k.logger.Error("could not verify clock frame", zap.Error(err))
return errors.Wrap(
errors.New("invalid proof"),
"verify frame",
)
}
}
// if !result {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(
// errors.New("invalid proof"),
// "verify frame",
// )
// }
// }
return nil
}
// return nil
// }
func (k *KZGInclusionProver) CommitRaw(
data []byte,
polySize uint64,
) ([]byte, error) {
poly, err := k.prover.BytesToPolynomial(data)
if err != nil {
return nil, errors.Wrap(err, "commit raw")
}
for i := len(poly); i < int(polySize); i++ {
poly = append(poly, curves.BLS48581G1().NewScalar().(curves.PairingScalar))
}
commit, err := k.prover.Commit(poly)
if err != nil {
return nil, errors.Wrap(err, "commit raw")
}
return commit.ToAffineCompressed(), nil
return rbls48581.CommitRaw(data, polySize), nil
}
func (k *KZGInclusionProver) ProveRaw(
@ -486,63 +470,7 @@ func (k *KZGInclusionProver) ProveRaw(
index int,
polySize uint64,
) ([]byte, error) {
poly, err := k.prover.BytesToPolynomial(data)
if err != nil {
return nil, errors.Wrap(err, "prove raw")
}
for i := len(poly); i < int(polySize); i++ {
poly = append(poly, curves.BLS48581G1().NewScalar().(curves.PairingScalar))
}
z := kzg.RootsOfUnityBLS48581[polySize][index]
evalPoly, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
polySize,
true,
)
if err != nil {
return nil, errors.Wrap(err, "prove raw")
}
divisors := make([]curves.PairingScalar, 2)
divisors[0] = (&curves.ScalarBls48581{}).Zero().Sub(z).(*curves.ScalarBls48581)
divisors[1] = (&curves.ScalarBls48581{}).One().(*curves.ScalarBls48581)
a := make([]curves.PairingScalar, len(evalPoly))
for i := 0; i < len(a); i++ {
a[i] = evalPoly[i].Clone().(*curves.ScalarBls48581)
}
// Adapted from Feist's amortized proofs:
aPos := len(a) - 1
bPos := len(divisors) - 1
diff := aPos - bPos
out := make([]curves.PairingScalar, diff+1, diff+1)
for diff >= 0 {
out[diff] = a[aPos].Div(divisors[bPos]).(*curves.ScalarBls48581)
for i := bPos; i >= 0; i-- {
a[diff+i] = a[diff+i].Sub(
out[diff].Mul(divisors[i]),
).(*curves.ScalarBls48581)
}
aPos -= 1
diff -= 1
}
proof, err := k.prover.PointLinearCombination(
kzg.CeremonyBLS48581G1[:polySize-1],
out,
)
if err != nil {
return nil, errors.Wrap(err, "prove raw")
}
return proof.ToAffineCompressed(), nil
return rbls48581.ProveRaw(data, uint64(index), polySize), nil
}
func (k *KZGInclusionProver) VerifyRaw(
@ -552,29 +480,7 @@ func (k *KZGInclusionProver) VerifyRaw(
proof []byte,
polySize uint64,
) (bool, error) {
z := kzg.RootsOfUnityBLS48581[polySize][index]
y, err := curves.BLS48581G1().NewScalar().SetBytes(data)
if err != nil {
return false, errors.Wrap(err, "verify raw")
}
c, err := curves.BLS48581G1().Point.FromAffineCompressed(commit)
if err != nil {
return false, errors.Wrap(err, "verify raw")
}
p, err := curves.BLS48581G1().Point.FromAffineCompressed(proof)
if err != nil {
return false, errors.Wrap(err, "verify raw")
}
return k.prover.Verify(
c.(curves.PairingPoint),
z,
y.(curves.PairingScalar),
p.(curves.PairingPoint),
), nil
return rbls48581.VerifyRaw(data, commit, uint64(index), proof, polySize), nil
}
var _ InclusionProver = (*KZGInclusionProver)(nil)

View File

@ -1,74 +1,88 @@
package crypto_test
import (
"bytes"
"crypto/rand"
"encoding/hex"
"testing"
"time"
"github.com/cloudflare/circl/sign/ed448"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func TestKZGVerifyFrame(t *testing.T) {
// func TestKZGVerifyFrame(t *testing.T) {
// kzg.TestInit("./kzg/ceremony.json")
// data := make([]byte, 1024)
// rand.Read(data)
// l, _ := zap.NewProduction()
// inclusionProver := crypto.NewKZGInclusionProver(l)
// commitment, err := inclusionProver.Commit(
// data,
// protobufs.IntrinsicExecutionOutputType,
// )
// assert.NoError(t, err)
// proof, err := inclusionProver.ProveAggregate(
// []*crypto.InclusionCommitment{commitment},
// )
// assert.NoError(t, err)
// frame := &protobufs.ClockFrame{
// Filter: []byte{0x00},
// FrameNumber: 1,
// Input: bytes.Repeat([]byte{0x00}, 516),
// Output: bytes.Repeat([]byte{0x00}, 516),
// }
// _, priv, _ := ed448.GenerateKey(rand.Reader)
// w := crypto.NewWesolowskiFrameProver(l)
// frame, err = w.ProveDataClockFrame(
// frame,
// [][]byte{proof.AggregateCommitment},
// []*protobufs.InclusionAggregateProof{
// {
// Filter: []byte{0x00},
// FrameNumber: 1,
// InclusionCommitments: []*protobufs.InclusionCommitment{
// {
// Filter: []byte{0x00},
// FrameNumber: 1,
// TypeUrl: proof.InclusionCommitments[0].TypeUrl,
// Commitment: proof.InclusionCommitments[0].Commitment,
// Data: data,
// Position: 0,
// },
// },
// Proof: proof.Proof,
// },
// },
// priv,
// time.Now().UnixMilli(),
// 100,
// )
// err = inclusionProver.VerifyFrame(frame)
// assert.NoError(t, err)
// }
func TestKZGInclusionProverRawFuncs(t *testing.T) {
kzg.TestInit("./kzg/ceremony.json")
data := make([]byte, 1024)
rand.Read(data)
data, _ := hex.DecodeString("408f9f0a63a1c463579a1fdaf82b37e0f397476e87c524915870ce7f5ede9c248493ea4ffefae154b8a55f10add4d75846b273a7f57433b438ae72880a29ab7cab6c3187a14651bac085329778526ebb31d14c9beb7b0983ff5e71a47c96ed9e7149e9e896cd4d604191583a282bdb5a92ea71334f296fd06498323b0c5d0e60c04180a7141813f6f9a6c766c450898ffc437ebed07a2fbd9201207171a0a8f5006a83d9e2430687952dd42237b7d77de61c0655b91bb1943ed4b9337449ded69ef8f2f83fba58827be7b7082db048b799f1bb590f61c558976910e77357562eb4d66fc97636c26ea562fe18b4cc397e679acad23cfd003ae93efe2903534ce1fe475eba3c82fef71554b4d63b593f2da3fea3b1b3f91379c6ff1989c91eaab70e336d96f3c46de987ef7165d111f692fe8205f7df0eb854fc550aa0d10942049dec4c60d99a51b0a7cde49a6d5e9364d0162cb86af1a51efeffacf7935f796f18cb868756e693aa967339efb8e45071da835ff8b6897fe56dc14edb49352edc88d3a6866873ecfa2bf968907e86c0dd139ab9a23bae341ec6aa5f1fbac2390a9d7f5ef9346d5c433268bf85e34e98295233f5e0d2ceb35c47b33b93e8ae9445c3b9f6ec32d8e3a1a1bc95b013dd36a84d803e468e873420c71b6473e44300f4d2702ccb452146c675d5ac1511a0b0a61a857b58ed3365ecdc1cafafbdfe5f0f2420389ae5f54d2fb9d12de314b416fdb12786fb66d0517229347ecc347eb8207a88abeffbdb9acfc582047a9343efae6c21cf67566e2d949920bdff1f4cea376332dd503c9dcd72a776744724c29a25038ef582f1103b406321e14d0f232c709b3d5a3568c75a1bc244b65e18d9ca7c53e2e13bb5638c325f6d43601de131aa2e3b7ffcc23accf6c69e9c6360cf8f4d48de3f11354855ec281f8a9c85caec0b8284c99c66a43ed0c37d6ce0f5c349e4551da6a1d9edcfa02f6be27ed037c5ec79c0519ba60725f89b3fe7826ca1a7b157ef9360bc2007bc2b9dd2ba8fdc225047a9f66b832e2da1dc6019f480e3aadb46ba93cccbd1e7b221a5d36e0fc96cbf497bfb40ff0276f14b7d45c4738a1b755e2754c5c352ac4af96c1a9be1d92942200b325cc3c53e9b3099c99a466bdc6c001179f6c63f828936b1c33f651a150c080b2eac8ed7cb9cfe599daee477f9ba88a6d1cbdeb08995c3c7bcce18ee2946c2beb138b8c797f61c6c33800ffeda74b77dab186cc4c7e91e9aca954d4863de6b04a82ef563a6eefbedec8fdc9284fb33e15197d2512e4928019fc29aa9c0a199797ef02c8daeb8706dd21a0e6b25b0e73795bac18dfaac2abc1defddf530f6a14046c2a918fa581b7ab0240bbd4f2e570a527581cb0a39bb544ceeabeedf891bc2417ac1e1fa558c09a9ceffef108a5778ff99a8575b4fb69cbbfb2c474d58")
l, _ := zap.NewProduction()
inclusionProver := crypto.NewKZGInclusionProver(l)
commitment, err := inclusionProver.Commit(
data,
protobufs.IntrinsicExecutionOutputType,
)
c, err := inclusionProver.CommitRaw(data, 1024)
assert.NoError(t, err)
proof, err := inclusionProver.ProveAggregate(
[]*crypto.InclusionCommitment{commitment},
)
p, err := inclusionProver.ProveRaw(data, 0, 1024)
assert.NoError(t, err)
frame := &protobufs.ClockFrame{
Filter: []byte{0x00},
FrameNumber: 1,
Input: bytes.Repeat([]byte{0x00}, 516),
Output: bytes.Repeat([]byte{0x00}, 516),
}
_, priv, _ := ed448.GenerateKey(rand.Reader)
w := crypto.NewWesolowskiFrameProver(l)
frame, err = w.ProveDataClockFrame(
frame,
[][]byte{proof.AggregateCommitment},
[]*protobufs.InclusionAggregateProof{
{
Filter: []byte{0x00},
FrameNumber: 1,
InclusionCommitments: []*protobufs.InclusionCommitment{
{
Filter: []byte{0x00},
FrameNumber: 1,
TypeUrl: proof.InclusionCommitments[0].TypeUrl,
Commitment: proof.InclusionCommitments[0].Commitment,
Data: data,
Position: 0,
},
},
Proof: proof.Proof,
},
},
priv,
time.Now().UnixMilli(),
100,
)
err = inclusionProver.VerifyFrame(frame)
v, err := inclusionProver.VerifyRaw(data[64*0:64*1], c, 0, p, 1024)
assert.NoError(t, err)
assert.False(t, v)
}
func TestKZGInclusionProverRawFuncs(t *testing.T) {

View File

@ -1,248 +0,0 @@
package application
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applyTranscriptShare(
share *protobufs.CeremonyTranscriptShare,
) error {
if len(share.AdditiveG1Powers) != len(a.LatestTranscript.G1Powers)-1 {
return errors.Wrap(errors.New("invalid g1s"), "apply transcript share")
}
if len(share.AdditiveG2Powers) != len(a.LatestTranscript.G2Powers)-1 {
return errors.Wrap(errors.New("invalid g2s"), "apply transcript share")
}
if share.AdditiveG1_256Witness == nil ||
share.AdditiveG1_256Witness.KeyValue == nil {
return errors.Wrap(
errors.New("invalid g1 witness"),
"apply transcript share",
)
}
if _, err := curves.BLS48581G1().Point.FromAffineCompressed(
share.AdditiveG1_256Witness.KeyValue,
); err != nil {
return errors.Wrap(
errors.Wrap(err, "invalid g1 witness"),
"apply transcript share",
)
}
if share.AdditiveG2_256Witness == nil ||
share.AdditiveG2_256Witness.KeyValue == nil {
return errors.Wrap(
errors.New("invalid g2 witness"),
"apply transcript share",
)
}
for _, s := range a.TranscriptShares {
if bytes.Equal(
s.AdditiveG1Powers[0].KeyValue,
share.AdditiveG1Powers[0].KeyValue,
) {
return nil
}
}
matchFound := false
for _, c := range a.FinalCommits {
if bytes.Equal(
share.ProverSignature.PublicKey.KeyValue,
c.ProverSignature.PublicKey.KeyValue,
) {
matchFound = true
break
}
}
if !matchFound {
return errors.Wrap(
errors.New(
fmt.Sprintf(
"no corresponding commit in commit set (size %d)",
len(a.FinalCommits),
),
),
"apply transcript share",
)
}
if err := share.VerifySignature(); err != nil {
return errors.Wrap(err, "apply transcript share")
}
for i, g1 := range a.LatestTranscript.G1Powers {
i := i
g1 := g1
if _, err := curves.BLS48581G1().Point.FromAffineCompressed(
g1.KeyValue,
); err != nil {
return errors.Wrap(
errors.Wrap(err, fmt.Sprintf("invalid g1 at position %d", i)),
"apply transcript share",
)
}
}
for i, g2 := range a.LatestTranscript.G2Powers {
i := i
g2 := g2
if _, err := curves.BLS48581G2().Point.FromAffineCompressed(
g2.KeyValue,
); err != nil {
return errors.Wrap(
errors.Wrap(err, fmt.Sprintf("invalid g2 at position %d", i)),
"apply transcript share",
)
}
}
exists := false
for _, s := range a.TranscriptShares {
exists = bytes.Equal(
s.ProverSignature.Signature,
share.ProverSignature.Signature,
)
if exists {
break
}
}
if !exists {
a.TranscriptShares = append(a.TranscriptShares, share)
}
return nil
}
func (a *CeremonyApplication) finalizeTranscript() error {
a.UpdatedTranscript = &protobufs.CeremonyTranscript{
G1Powers: make(
[]*protobufs.BLS48581G1PublicKey,
len(a.LatestTranscript.G1Powers),
),
G2Powers: make(
[]*protobufs.BLS48581G2PublicKey,
len(a.LatestTranscript.G2Powers),
),
RunningG1_256Witnesses: a.LatestTranscript.RunningG1_256Witnesses,
RunningG2_256Powers: a.LatestTranscript.RunningG2_256Powers,
}
a.UpdatedTranscript.G1Powers[0] = a.LatestTranscript.G1Powers[0]
a.UpdatedTranscript.G2Powers[0] = a.LatestTranscript.G2Powers[0]
for i := range a.UpdatedTranscript.G1Powers[1:] {
g1, err := curves.BLS48581G1().Point.FromAffineCompressed(
a.TranscriptShares[0].AdditiveG1Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
if len(a.TranscriptShares) > 1 {
for _, share := range a.TranscriptShares[1:] {
ag1, err := curves.BLS48581G1().Point.FromAffineCompressed(
share.AdditiveG1Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
g1 = g1.Add(ag1)
}
}
if !g1.IsOnCurve() || g1.IsIdentity() {
return errors.Wrap(
errors.New("invalid g1 power"),
"finalize transcript",
)
}
a.UpdatedTranscript.G1Powers[i+1] = &protobufs.BLS48581G1PublicKey{
KeyValue: g1.ToAffineCompressed(),
}
}
for i := range a.UpdatedTranscript.G2Powers[1:] {
g2, err := curves.BLS48581G2().Point.FromAffineCompressed(
a.TranscriptShares[0].AdditiveG2Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
if len(a.TranscriptShares) > 1 {
for _, share := range a.TranscriptShares[1:] {
ag2, err := curves.BLS48581G2().Point.FromAffineCompressed(
share.AdditiveG2Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
g2 = g2.Add(ag2)
}
}
if !g2.IsOnCurve() || g2.IsIdentity() {
return errors.Wrap(
errors.New("invalid g2 power"),
"finalize transcript",
)
}
a.UpdatedTranscript.G2Powers[i+1] = &protobufs.BLS48581G2PublicKey{
KeyValue: g2.ToAffineCompressed(),
}
}
g1Witness, err := curves.BLS48581G1().Point.FromAffineCompressed(
a.TranscriptShares[0].AdditiveG1_256Witness.KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
if len(a.TranscriptShares) > 1 {
for _, share := range a.TranscriptShares[1:] {
ag1, err := curves.BLS48581G1().Point.FromAffineCompressed(
share.AdditiveG1_256Witness.KeyValue,
)
if err != nil {
return errors.Wrap(err, "finalize transcript")
}
g1Witness = g1Witness.Add(ag1)
}
}
if !g1Witness.IsOnCurve() || g1Witness.IsIdentity() {
return errors.Wrap(
errors.New("invalid witness"),
"finalize transcript",
)
}
a.UpdatedTranscript.RunningG1_256Witnesses = append(
a.UpdatedTranscript.RunningG1_256Witnesses,
&protobufs.BLS48581G1PublicKey{
KeyValue: g1Witness.ToAffineCompressed(),
},
)
a.UpdatedTranscript.RunningG2_256Powers = append(
a.UpdatedTranscript.RunningG2_256Powers,
a.UpdatedTranscript.G2Powers[len(a.UpdatedTranscript.G2Powers)-1],
)
return nil
}

View File

@ -1,257 +0,0 @@
package application
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applySeenProverAttestation(
seenProverAttestation *protobufs.CeremonySeenProverAttestation,
) error {
if seenProverAttestation.SeenProverKey == nil ||
seenProverAttestation.SeenProverKey.KeyValue == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply seen prover attestation",
)
}
inParticipantList := false
for _, p := range a.ActiveParticipants {
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
seenProverAttestation.SeenProverKey.KeyValue,
) {
inParticipantList = true
break
}
}
if !inParticipantList {
return errors.Wrap(
errors.New("prover not in active participant list"),
"apply seen prover attestation",
)
}
b := binary.BigEndian.AppendUint64(
[]byte("lastseen"),
seenProverAttestation.LastSeenFrame,
)
b = append(b, seenProverAttestation.SeenProverKey.KeyValue...)
signature := seenProverAttestation.GetProverSignature()
if signature == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply seen prover attestation",
)
}
if err := signature.Verify(b); err != nil {
return errors.Wrap(err, "apply seen prover attestation")
}
replaced := false
for i, att := range a.LatestSeenProverAttestations {
att := att
if bytes.Equal(
att.SeenProverKey.KeyValue,
seenProverAttestation.SeenProverKey.KeyValue,
) &&
bytes.Equal(
att.ProverSignature.PublicKey.KeyValue,
seenProverAttestation.ProverSignature.PublicKey.KeyValue,
) && att.LastSeenFrame < seenProverAttestation.LastSeenFrame {
a.LatestSeenProverAttestations[i] = att
replaced = true
break
}
}
if !replaced {
a.LatestSeenProverAttestations = append(
a.LatestSeenProverAttestations,
seenProverAttestation,
)
}
return nil
}
func (a *CeremonyApplication) applyDroppedProverAttestation(
droppedProverAttestation *protobufs.CeremonyDroppedProverAttestation,
) error {
if droppedProverAttestation.DroppedProverKey == nil ||
droppedProverAttestation.DroppedProverKey.KeyValue == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply dropped prover attestation",
)
}
inParticipantList := false
for _, p := range a.ActiveParticipants {
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
droppedProverAttestation.DroppedProverKey.KeyValue,
) {
inParticipantList = true
break
}
}
if !inParticipantList {
return errors.Wrap(
errors.New("prover not in active participant list"),
"apply dropped prover attestation",
)
}
b := binary.BigEndian.AppendUint64(
[]byte("dropped"),
droppedProverAttestation.LastSeenFrame,
)
b = append(b, droppedProverAttestation.DroppedProverKey.KeyValue...)
signature := droppedProverAttestation.GetProverSignature()
if signature == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply dropped prover attestation",
)
}
if err := signature.Verify(b); err != nil {
return errors.Wrap(err, "apply dropped prover attestation")
}
replaced := false
for i, att := range a.DroppedParticipantAttestations {
att := att
if bytes.Equal(
att.DroppedProverKey.KeyValue,
droppedProverAttestation.DroppedProverKey.KeyValue,
) &&
bytes.Equal(
att.ProverSignature.PublicKey.KeyValue,
droppedProverAttestation.ProverSignature.PublicKey.KeyValue,
) && att.LastSeenFrame < droppedProverAttestation.LastSeenFrame {
a.DroppedParticipantAttestations[i] = att
replaced = true
break
}
}
if !replaced {
a.DroppedParticipantAttestations = append(
a.DroppedParticipantAttestations,
droppedProverAttestation,
)
}
return nil
}
func (a *CeremonyApplication) applyTranscriptCommit(
transcriptCommit *protobufs.CeremonyTranscriptCommit,
) error {
if transcriptCommit.ContributionSignature == nil ||
transcriptCommit.ProverSignature == nil ||
transcriptCommit.ContributionSignature.PublicKey == nil ||
transcriptCommit.ProverSignature.PublicKey == nil {
return errors.Wrap(
errors.New("signature is nil"),
"apply transcript commit",
)
}
point, err := curves.BLS48581G2().Point.FromAffineCompressed(
transcriptCommit.ContributionSignature.PublicKey.KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript commit")
}
if err := VerifySignatureOfProverKey(
transcriptCommit.ProverSignature.PublicKey.KeyValue,
transcriptCommit.ContributionSignature.Signature,
point,
); err != nil {
return errors.Wrap(err, "apply transcript commit")
}
if err := transcriptCommit.ProverSignature.Verify(
transcriptCommit.ContributionSignature.PublicKey.KeyValue,
); err != nil {
return errors.Wrap(err, "apply transcript commit")
}
inParticipantList := false
for _, p := range a.ActiveParticipants {
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
transcriptCommit.ProverSignature.PublicKey.KeyValue,
) {
inParticipantList = true
break
}
}
if !inParticipantList {
return errors.Wrap(
errors.New("prover not in active participant list"),
"apply transcript commit",
)
}
maxRounds := uint64(1)
for i := 0; i < len(a.ActiveParticipants)-1; i++ {
maxRounds = maxRounds << 1
}
if len(a.TranscriptRoundAdvanceCommits) == 0 {
a.TranscriptRoundAdvanceCommits = []*protobufs.CeremonyAdvanceRound{
{
Commits: []*protobufs.CeremonyTranscriptCommit{},
},
}
}
if maxRounds < a.RoundCount-1 {
return errors.Wrap(
errors.New("round limit exceeded"),
"apply transcript commit",
)
}
if len(a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits) ==
len(a.ActiveParticipants) {
a.TranscriptRoundAdvanceCommits = append(
a.TranscriptRoundAdvanceCommits,
&protobufs.CeremonyAdvanceRound{
Commits: []*protobufs.CeremonyTranscriptCommit{
transcriptCommit,
},
},
)
a.RoundCount++
} else {
for _, c := range a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits {
if bytes.Equal(
c.ProverSignature.PublicKey.KeyValue,
transcriptCommit.ProverSignature.PublicKey.KeyValue,
) {
return nil
}
}
a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits = append(
a.TranscriptRoundAdvanceCommits[a.RoundCount-1].Commits,
transcriptCommit,
)
}
return nil
}

View File

@ -1,101 +0,0 @@
package application
import (
"bytes"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applyLobbyJoin(
join *protobufs.CeremonyLobbyJoin,
) error {
signature := join.GetPublicKeySignatureEd448()
if signature == nil {
return errors.Wrap(errors.New("signature is nil"), "apply lobby join")
}
if join.IdentityKey == nil || join.IdentityKey.KeyValue == nil {
return errors.Wrap(errors.New("identity key is nil"), "apply lobby join")
}
if join.SignedPreKey == nil || join.SignedPreKey.KeyValue == nil {
return errors.Wrap(errors.New("signed prekey is nil"), "apply lobby join")
}
if _, err := curves.ED448().Point.FromAffineCompressed(
join.IdentityKey.KeyValue,
); err != nil {
return errors.Wrap(err, "apply lobby join")
}
if _, err := curves.ED448().Point.FromAffineCompressed(
join.SignedPreKey.KeyValue,
); err != nil {
return errors.Wrap(err, "apply lobby join")
}
if err := join.VerifySignature(); err != nil {
return errors.Wrap(err, "apply lobby join")
}
if len(a.LobbyJoins) == 256 {
return nil
}
for _, p := range a.LobbyJoins {
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
signature.PublicKey.KeyValue,
) {
return nil
}
}
prepend := false
nextRoundPreferredParticipants := []*protobufs.Ed448PublicKey{}
for _, p := range a.NextRoundPreferredParticipants {
p := p
if !bytes.Equal(p.KeyValue, signature.PublicKey.KeyValue) {
nextRoundPreferredParticipants = append(
nextRoundPreferredParticipants,
p,
)
}
}
if len(a.NextRoundPreferredParticipants) !=
len(nextRoundPreferredParticipants) {
prepend = true
}
a.NextRoundPreferredParticipants = nextRoundPreferredParticipants
if prepend {
a.LobbyJoins = append(
append([]*protobufs.CeremonyLobbyJoin{}, join),
a.LobbyJoins...,
)
} else {
a.LobbyJoins = append(a.LobbyJoins, join)
}
return nil
}
func (a *CeremonyApplication) finalizeParticipantSet() error {
power := uint64(1)
for uint64(len(a.LobbyJoins)) > power {
power = power << 1
}
if power != uint64(len(a.LobbyJoins)) {
power = power >> 1
}
a.ActiveParticipants = []*protobufs.CeremonyLobbyJoin{}
for i := 0; i < int(power); i++ {
a.ActiveParticipants = append(
a.ActiveParticipants,
a.LobbyJoins[i],
)
}
return nil
}

View File

@ -1,288 +0,0 @@
package application
import (
"bytes"
"crypto"
"crypto/rand"
"testing"
"github.com/cloudflare/circl/sign/ed448"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
func TestCeremonyTransitions(t *testing.T) {
bls48581.Init()
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
old2 := old.Mul(old)
old3 := old2.Mul(old)
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
require.NoError(t, err)
idk := curves.ED448().Scalar.Random(rand.Reader)
idkPub := curves.ED448().Point.Generator().Mul(idk).ToAffineCompressed()
spk := curves.ED448().Scalar.Random(rand.Reader)
spkPub := curves.ED448().Point.Generator().Mul(spk).ToAffineCompressed()
require.NoError(t, err)
trie := &tries.RewardCritbitTrie{}
a := &CeremonyApplication{
RewardTrie: trie,
LatestTranscript: &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
},
}
join := &protobufs.CeremonyLobbyJoin{
FrameNumber: 0,
IdentityKey: &protobufs.X448PublicKey{
KeyValue: idkPub,
},
SignedPreKey: &protobufs.X448PublicKey{
KeyValue: spkPub,
},
PeerId: []byte{},
}
sig, err := join.SignWithProverKey(proverKey)
require.NoError(t, err)
join.PublicKeySignatureEd448 = &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
}
joinBytes, err := proto.Marshal(join)
require.NoError(t, err)
a, _, _, err = a.ApplyTransition(0, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyLobbyJoinType},
TransitionInputs: [][]byte{joinBytes},
}, false)
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_OPEN)
for i := uint64(0); i < 10; i++ {
a, _, _, err = a.ApplyTransition(i+1, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{},
TransitionInputs: [][]byte{},
}, false)
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_OPEN)
}
a, _, _, err = a.ApplyTransition(12, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{},
TransitionInputs: [][]byte{},
}, false)
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_IN_PROGRESS)
require.True(t, bytes.Equal(
a.ActiveParticipants[0].PublicKeySignatureEd448.PublicKey.KeyValue,
proverPubKey,
))
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
tau2 := tau.Mul(tau)
tau3 := tau2.Mul(tau)
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
proverSig, err := proverKey.Sign(
rand.Reader,
tauPubG2.ToAffineCompressed(),
crypto.Hash(0),
)
require.NoError(t, err)
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
key := tau.Bytes()
if bls48581.Core_Sign(blsSignature, proverPubKey, key) != bls48581.BLS_OK {
require.Fail(t, "could not sign")
}
blsSig := blsSignature[:]
advanceRound := &protobufs.CeremonyTranscriptCommit{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: blsSig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: tauPubG2.ToAffineCompressed(),
},
},
}
advanceRoundBytes, err := proto.Marshal(advanceRound)
require.NoError(t, err)
a, _, _, err = a.ApplyTransition(13, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyTranscriptCommitType},
TransitionInputs: [][]byte{advanceRoundBytes},
}, false)
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_FINALIZING)
g1 := curves.BLS48581G1().Point.Generator()
g2 := curves.BLS48581G2().Point.Generator()
transcriptShare := &protobufs.CeremonyTranscriptShare{
AdditiveG1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: g1.Mul(old.Mul(tau)).ToAffineCompressed(),
},
{
KeyValue: g1.Mul(old2.Mul(tau2)).ToAffineCompressed(),
},
{
KeyValue: g1.Mul(old3.Mul(tau3)).ToAffineCompressed(),
},
},
AdditiveG2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: g2.Mul(old.Mul(tau)).ToAffineCompressed(),
},
},
AdditiveG1_256Witness: &protobufs.BLS48581G1PublicKey{
KeyValue: g1.Mul(tau).ToAffineCompressed(),
},
AdditiveG2_256Witness: &protobufs.BLS48581G2PublicKey{
KeyValue: g2.Mul(old.Mul(tau)).ToAffineCompressed(),
},
}
sig, err = transcriptShare.SignWithProverKey(proverKey)
require.NoError(t, err)
transcriptShare.ProverSignature = &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
}
shareBytes, err := proto.Marshal(transcriptShare)
require.NoError(t, err)
a, _, _, err = a.ApplyTransition(14, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyTranscriptShareType},
TransitionInputs: [][]byte{shareBytes},
}, false)
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_VALIDATING)
updatedTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).Mul(tau2).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).Mul(tau3).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
}
transcriptBytes, err := proto.Marshal(updatedTranscript)
require.NoError(t, err)
a, _, _, err = a.ApplyTransition(15, &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{protobufs.CeremonyTranscriptType},
TransitionInputs: [][]byte{transcriptBytes},
}, false)
require.NoError(t, err)
require.Equal(t, a.LobbyState, CEREMONY_APPLICATION_STATE_OPEN)
bi, err := poseidon.HashBytes(proverPubKey)
require.NoError(t, err)
addr := bi.FillBytes(make([]byte, 32))
_, f, reward := a.RewardTrie.Get(addr)
require.Equal(t, f, uint64(15))
require.Equal(t, reward, uint64(161))
}

View File

@ -1,237 +0,0 @@
package application
import (
"bytes"
"crypto/rand"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (a *CeremonyApplication) applyTranscript(
transcript *protobufs.CeremonyTranscript,
) error {
if a.UpdatedTranscript == nil {
return errors.Wrap(errors.New("invalid transcript"), "apply transcript")
}
if len(a.UpdatedTranscript.G1Powers) != len(transcript.G1Powers) {
return errors.Wrap(errors.New("invalid g1s"), "apply transcript")
}
if len(a.UpdatedTranscript.G2Powers) != len(transcript.G2Powers) {
return errors.Wrap(errors.New("invalid g2s"), "apply transcript")
}
if len(a.UpdatedTranscript.RunningG1_256Witnesses) !=
len(transcript.RunningG1_256Witnesses) ||
len(transcript.RunningG1_256Witnesses) !=
len(a.LatestTranscript.RunningG1_256Witnesses)+1 {
return errors.Wrap(
errors.New("invalid witnesses"),
"apply transcript",
)
}
if len(a.UpdatedTranscript.RunningG2_256Powers) !=
len(transcript.RunningG2_256Powers) ||
len(transcript.RunningG2_256Powers) !=
len(a.LatestTranscript.RunningG2_256Powers)+1 {
return errors.Wrap(
errors.New("invalid g2^256 powers"),
"apply transcript",
)
}
g1s := make([]curves.Point, len(a.UpdatedTranscript.G1Powers))
for i := range a.UpdatedTranscript.G1Powers {
i := i
if !bytes.Equal(
a.UpdatedTranscript.G1Powers[i].KeyValue,
transcript.G1Powers[i].KeyValue,
) {
return errors.Wrap(errors.New("invalid g1s"), "apply transcript")
}
g1 := &curves.PointBls48581G1{}
x, err := g1.FromAffineCompressed(
a.UpdatedTranscript.G1Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g1s[i] = x
}
g2s := make([]curves.Point, len(a.UpdatedTranscript.G2Powers))
for i := range a.UpdatedTranscript.G2Powers {
i := i
if !bytes.Equal(
a.UpdatedTranscript.G2Powers[i].KeyValue,
transcript.G2Powers[i].KeyValue,
) {
return errors.Wrap(errors.New("invalid g2s"), "apply transcript")
}
g2 := &curves.PointBls48581G2{}
x, err := g2.FromAffineCompressed(
a.UpdatedTranscript.G2Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g2s[i] = x
}
g1Witnesses := []*curves.PointBls48581G1{}
for i := range a.UpdatedTranscript.RunningG1_256Witnesses {
if !bytes.Equal(
a.UpdatedTranscript.RunningG1_256Witnesses[i].KeyValue,
transcript.RunningG1_256Witnesses[i].KeyValue,
) {
return errors.Wrap(errors.New("invalid g1 witnesses"), "apply transcript")
}
g1w := &curves.PointBls48581G1{}
w, err := g1w.FromAffineCompressed(
a.UpdatedTranscript.RunningG1_256Witnesses[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g1w, _ = w.(*curves.PointBls48581G1)
g1Witnesses = append(g1Witnesses, g1w)
}
g2Powers := []*curves.PointBls48581G2{}
for i := range a.UpdatedTranscript.RunningG2_256Powers {
if !bytes.Equal(
a.UpdatedTranscript.RunningG2_256Powers[i].KeyValue,
transcript.RunningG2_256Powers[i].KeyValue,
) {
return errors.Wrap(
errors.New("invalid g2^256 powers"),
"apply transcript",
)
}
g2w := &curves.PointBls48581G2{}
w, err := g2w.FromAffineCompressed(
a.UpdatedTranscript.RunningG2_256Powers[i].KeyValue,
)
if err != nil {
return errors.Wrap(err, "apply transcript")
}
g2w, _ = w.(*curves.PointBls48581G2)
g2Powers = append(g2Powers, g2w)
}
if !g2Powers[len(g2Powers)-1].Equal(g2s[len(g2s)-1]) {
return errors.Wrap(
errors.New("invalid running g2^256 power"),
"apply transcript",
)
}
for i := 0; i < len(a.LatestTranscript.RunningG1_256Witnesses); i++ {
if !bytes.Equal(
a.LatestTranscript.RunningG1_256Witnesses[i].KeyValue,
a.UpdatedTranscript.RunningG1_256Witnesses[i].KeyValue,
) {
return errors.Wrap(
errors.New("running witness mismatch"),
"apply transcript",
)
}
}
for i := 0; i < len(a.LatestTranscript.RunningG2_256Powers); i++ {
if !bytes.Equal(
a.LatestTranscript.RunningG2_256Powers[i].KeyValue,
a.UpdatedTranscript.RunningG2_256Powers[i].KeyValue,
) {
return errors.Wrap(
errors.New("running g2^256 power mismatch"),
"apply transcript",
)
}
}
mpg2 := curves.BLS48581G2().Point.Generator().(curves.PairingPoint)
mpg2n := g2s[1].Neg().(curves.PairingPoint)
mpg1 := curves.BLS48581G1().Point.Generator().(curves.PairingPoint)
mpg1n := g1s[1].Neg().(curves.PairingPoint)
randoms := []curves.Scalar{}
sum := curves.BLS48581G1().Scalar.Zero()
for i := 0; i < len(g1s)-1; i++ {
randoms = append(randoms, curves.BLS48581G1().Scalar.Random(rand.Reader))
sum = sum.Add(randoms[i])
}
g1CheckR := g1s[0].SumOfProducts(g1s[1:], randoms)
g1CheckL := g1s[0].SumOfProducts(g1s[:len(g1s)-1], randoms)
if !mpg2.MultiPairing(
g1CheckL.(curves.PairingPoint),
mpg2n.Mul(sum).(curves.PairingPoint),
g1CheckR.(curves.PairingPoint),
mpg2.Mul(sum).(curves.PairingPoint),
).IsOne() {
return errors.Wrap(
errors.New("pairing check failed for g1s"),
"apply transcript",
)
}
var g2CheckL, g2CheckR curves.Point
g2Sum := curves.BLS48581G1().Scalar.Zero()
for i := 0; i < len(g2s)-1; i++ {
g2Sum = g2Sum.Add(randoms[i])
if g2CheckL == nil {
g2CheckL = g2s[0].Mul(randoms[0])
g2CheckR = g2s[1].Mul(randoms[0])
} else {
g2CheckL = g2CheckL.Add(g2s[i].Mul(randoms[i]))
g2CheckR = g2CheckR.Add(g2s[i+1].Mul(randoms[i]))
}
}
if !mpg2.MultiPairing(
mpg1n.Mul(g2Sum).(curves.PairingPoint),
g2CheckL.(curves.PairingPoint),
mpg1.Mul(g2Sum).(curves.PairingPoint),
g2CheckR.(curves.PairingPoint),
).IsOne() {
return errors.Wrap(
errors.New("pairing check failed for g2s"),
"apply transcript",
)
}
mp3 := make([]curves.PairingPoint, (len(g2Powers)-1)*4)
for i := 0; i < len(g2Powers)-1; i++ {
i := i
mp3[i*4+0] = g1Witnesses[i+1].Neg().(curves.PairingPoint)
mp3[i*4+1] = g2Powers[i]
mp3[i*4+2] = mpg1
mp3[i*4+3] = g2Powers[i+1]
}
l := mp3[0].MultiPairing(mp3...)
if !l.IsOne() {
return errors.Wrap(
errors.New("pairing check failed for witnesses"),
"apply transcript",
)
}
a.LatestTranscript = a.UpdatedTranscript
a.UpdatedTranscript = nil
return nil
}

View File

@ -1,488 +0,0 @@
package application
import (
"crypto"
"crypto/rand"
"fmt"
"testing"
"time"
"github.com/cloudflare/circl/sign/ed448"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
// This does a full test of the 65536 powers, run this if you want to wait a
// long time
func TestApplyTranscript_Slow(t *testing.T) {
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
olds := []*curves.ScalarBls48581{
curves.BLS48581G1().Scalar.One().(*curves.ScalarBls48581),
}
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
taus := []*curves.ScalarBls48581{
curves.BLS48581G1().Scalar.One().(*curves.ScalarBls48581),
}
fmt.Println(time.Now().Unix())
fmt.Println("generate taus")
for i := 0; i < 65536; i++ {
olds = append(olds, olds[i].Mul(old).(*curves.ScalarBls48581))
taus = append(taus, taus[i].Mul(tau).(*curves.ScalarBls48581))
}
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
fmt.Println(time.Now().Unix())
fmt.Println("taus generated")
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
require.NoError(t, err)
proverSig, err := proverKey.Sign(
rand.Reader,
tauPubG2.ToAffineCompressed(),
crypto.Hash(0),
)
require.NoError(t, err)
fmt.Println(time.Now().Unix())
fmt.Println("prover signature generated")
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
key := tau.Bytes()
for i, j := 0, len(key)-1; i < j; i, j = i+1, j-1 {
key[i], key[j] = key[j], key[i]
}
if bls48581.Core_Sign(blsSignature, proverKey, key) != bls48581.BLS_OK {
require.Fail(t, "could not sign")
}
fmt.Println(time.Now().Unix())
fmt.Println("bls signature generated")
blsSig := blsSignature[:]
oldTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{},
G2Powers: []*protobufs.BLS48581G2PublicKey{},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
olds[256],
).ToAffineCompressed(),
},
},
}
updatedTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{},
G2Powers: []*protobufs.BLS48581G2PublicKey{},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
taus[256],
).ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
olds[256],
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
olds[256],
).Mul(taus[256]).ToAffineCompressed(),
},
},
}
for i, o := range olds {
oldTranscript.G1Powers = append(
oldTranscript.G1Powers,
&protobufs.BLS48581G1PublicKey{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
o,
).ToAffineCompressed(),
},
)
updatedTranscript.G1Powers = append(
updatedTranscript.G1Powers,
&protobufs.BLS48581G1PublicKey{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
o,
).Mul(taus[i]).ToAffineCompressed(),
},
)
if i < 257 {
oldTranscript.G2Powers = append(
oldTranscript.G2Powers,
&protobufs.BLS48581G2PublicKey{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
o,
).ToAffineCompressed(),
},
)
updatedTranscript.G2Powers = append(
updatedTranscript.G2Powers,
&protobufs.BLS48581G2PublicKey{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
o,
).Mul(taus[i]).ToAffineCompressed(),
},
)
}
}
fmt.Println(time.Now().Unix())
fmt.Println("transcripts generated")
a := &CeremonyApplication{
StateCount: 0,
RoundCount: 0,
LobbyState: CEREMONY_APPLICATION_STATE_VALIDATING,
FinalCommits: []*protobufs.CeremonyTranscriptCommit{
{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: blsSig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: tauPubG2.ToAffineCompressed(),
},
},
},
},
LatestTranscript: oldTranscript,
UpdatedTranscript: updatedTranscript,
}
err = a.applyTranscript(updatedTranscript)
require.NoError(t, err)
}
func TestApplyTranscript(t *testing.T) {
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
old2 := old.Mul(old)
old3 := old2.Mul(old)
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
tau2 := tau.Mul(tau)
tau3 := tau2.Mul(tau)
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
require.NoError(t, err)
proverSig, err := proverKey.Sign(
rand.Reader,
tauPubG2.ToAffineCompressed(),
crypto.Hash(0),
)
require.NoError(t, err)
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
key := tau.Bytes()
for i, j := 0, len(key)-1; i < j; i, j = i+1, j-1 {
key[i], key[j] = key[j], key[i]
}
if bls48581.Core_Sign(blsSignature, proverKey, key) != bls48581.BLS_OK {
require.Fail(t, "could not sign")
}
blsSig := blsSignature[:]
updatedTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).Mul(tau2).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).Mul(tau3).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
}
a := &CeremonyApplication{
StateCount: 0,
RoundCount: 0,
LobbyState: CEREMONY_APPLICATION_STATE_VALIDATING,
FinalCommits: []*protobufs.CeremonyTranscriptCommit{
{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: blsSig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: tauPubG2.ToAffineCompressed(),
},
},
},
},
LatestTranscript: &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
},
UpdatedTranscript: updatedTranscript,
}
err = a.applyTranscript(updatedTranscript)
require.NoError(t, err)
}
func TestApplyRewritingTranscriptFails(t *testing.T) {
old := curves.BLS48581G1().Scalar.Random(rand.Reader)
old2 := old.Mul(old)
old3 := old2.Mul(old)
tau := curves.BLS48581G1().Scalar.Random(rand.Reader)
tau2 := tau.Mul(tau)
tau3 := tau2.Mul(tau)
tauPubG2 := curves.BLS48581G2().Point.Generator().Mul(tau)
proverPubKey, proverKey, err := ed448.GenerateKey(rand.Reader)
require.NoError(t, err)
proverSig, err := proverKey.Sign(
rand.Reader,
tauPubG2.ToAffineCompressed(),
crypto.Hash(0),
)
require.NoError(t, err)
blsSignature := make([]byte, int(bls48581.MODBYTES)+1)
key := tau.Bytes()
for i, j := 0, len(key)-1; i < j; i, j = i+1, j-1 {
key[i], key[j] = key[j], key[i]
}
if bls48581.Core_Sign(blsSignature, proverKey, key) != bls48581.BLS_OK {
require.Fail(t, "could not sign")
}
blsSig := blsSignature[:]
updatedTranscript := &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
// Pretend we're accumulating still
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
tau,
).ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).Mul(tau).ToAffineCompressed(),
},
},
}
a := &CeremonyApplication{
StateCount: 0,
RoundCount: 0,
LobbyState: CEREMONY_APPLICATION_STATE_VALIDATING,
FinalCommits: []*protobufs.CeremonyTranscriptCommit{
{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: proverPubKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: blsSig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: tauPubG2.ToAffineCompressed(),
},
},
},
},
LatestTranscript: &protobufs.CeremonyTranscript{
G1Powers: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old2,
).ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
old3,
).ToAffineCompressed(),
},
},
G2Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().ToAffineCompressed(),
},
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
RunningG1_256Witnesses: []*protobufs.BLS48581G1PublicKey{
{
KeyValue: curves.BLS48581G1().Point.Generator().ToAffineCompressed(),
},
},
RunningG2_256Powers: []*protobufs.BLS48581G2PublicKey{
{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
old,
).ToAffineCompressed(),
},
},
},
UpdatedTranscript: updatedTranscript,
}
err = a.applyTranscript(updatedTranscript)
require.Error(t, err)
}

View File

@ -1,583 +0,0 @@
package application
import (
"encoding/binary"
"encoding/json"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/ot/base/simplest"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/ot/extension/kos"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/tecdsa/dkls/v1/sign"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/zkp/schnorr"
)
type MultiplyReceiverRound int
type MultiplySenderRound int
const (
MULTIPLY_RECEIVER_ROUND_UNINITIALIZED = MultiplyReceiverRound(iota)
MULTIPLY_RECEIVER_ROUND_1_COMPUTE_AND_ZKP_TO_PUBKEY
MULTIPLY_RECEIVER_ROUND_2_PAD_TRANSFER
MULTIPLY_RECEIVER_ROUND_3_VERIFY
MULTIPLY_RECEIVER_ROUND_4_MULTIPLY_INIT
MULTIPLY_RECEIVER_ROUND_5_MULTIPLY
MULTIPLY_RECEIVER_ROUND_6_DONE
)
const (
MULTIPLY_SENDER_ROUND_UNINITIALIZED = MultiplySenderRound(iota)
MULTIPLY_SENDER_ROUND_1_INITIALIZED
MULTIPLY_SENDER_ROUND_2_VERIFY_SCHNORR_AND_PAD_TRANSFER
MULTIPLY_SENDER_ROUND_3_RESPOND_TO_CHALLENGE
MULTIPLY_SENDER_ROUND_4_VERIFY
MULTIPLY_SENDER_ROUND_5_MULTIPLY
MULTIPLY_SENDER_ROUND_6_DONE
)
type Iterator interface {
Init() error
Next(message []byte) ([]byte, error)
IsDone() bool
GetPoints() []curves.Point
GetScalars() []curves.Scalar
}
type MultiplySender struct {
seed [32]byte
alphas []curves.Scalar
curve *curves.Curve
simplestReceiver *simplest.Receiver
sender []*sign.MultiplySender
step MultiplySenderRound
}
type MultiplyReceiver struct {
seed [32]byte
betas []curves.Scalar
curve *curves.Curve
simplestSender *simplest.Sender
receiver []*sign.MultiplyReceiver
step MultiplyReceiverRound
}
var _ Iterator = (*MultiplySender)(nil)
var _ Iterator = (*MultiplyReceiver)(nil)
type SchnorrProof struct {
C []byte
S []byte
Statement []byte
}
type KOSRound2Output struct {
Tau [][][]byte
}
type MultiplyRound2Output struct {
COTRound2Output *KOSRound2Output
R [][]byte
U []byte
}
func NewMultiplySender(
alphas []curves.Scalar,
curve *curves.Curve,
seed [32]byte,
) *MultiplySender {
return &MultiplySender{
seed: seed,
alphas: alphas,
curve: curve,
simplestReceiver: nil,
sender: []*sign.MultiplySender{},
step: MULTIPLY_SENDER_ROUND_UNINITIALIZED,
}
}
func NewMultiplyReceiver(
betas []curves.Scalar,
curve *curves.Curve,
seed [32]byte,
) *MultiplyReceiver {
return &MultiplyReceiver{
seed: seed,
betas: betas,
curve: curve,
simplestSender: nil,
receiver: []*sign.MultiplyReceiver{},
step: MULTIPLY_RECEIVER_ROUND_UNINITIALIZED,
}
}
func (s *MultiplySender) Init() error {
seed := sha3.Sum256(append(append([]byte{}, s.seed[:]...), []byte("OT")...))
var err error
s.simplestReceiver, err = simplest.NewReceiver(s.curve, 584, seed)
s.step = MULTIPLY_SENDER_ROUND_1_INITIALIZED
return err
}
func (r *MultiplyReceiver) Init() error {
seed := sha3.Sum256(append(append([]byte{}, r.seed[:]...), []byte("OT")...))
var err error
r.simplestSender, err = simplest.NewSender(r.curve, 584, seed)
r.step = MULTIPLY_RECEIVER_ROUND_1_COMPUTE_AND_ZKP_TO_PUBKEY
return err
}
func (s *MultiplySender) Next(message []byte) ([]byte, error) {
switch s.step {
case MULTIPLY_SENDER_ROUND_1_INITIALIZED:
s.step = MULTIPLY_SENDER_ROUND_2_VERIFY_SCHNORR_AND_PAD_TRANSFER
return nil, nil
case MULTIPLY_SENDER_ROUND_2_VERIFY_SCHNORR_AND_PAD_TRANSFER:
proof := &SchnorrProof{}
err := json.Unmarshal([]byte(message), proof)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrC, err := s.curve.Scalar.SetBytes(proof.C)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrS, err := s.curve.Scalar.SetBytes(proof.S)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrStatement, err := s.curve.Point.FromAffineCompressed(proof.Statement)
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrProof := &schnorr.Proof{
C: schnorrC,
S: schnorrS,
Statement: schnorrStatement,
}
receiversMaskedChoice, err :=
s.simplestReceiver.Round2VerifySchnorrAndPadTransfer(schnorrProof)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledReceiversMaskedChoice, err := json.Marshal(receiversMaskedChoice)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.step = MULTIPLY_SENDER_ROUND_3_RESPOND_TO_CHALLENGE
return marshaledReceiversMaskedChoice, nil
case MULTIPLY_SENDER_ROUND_3_RESPOND_TO_CHALLENGE:
challenge := [][32]byte{}
err := json.Unmarshal([]byte(message), &challenge)
if err != nil {
return nil, errors.Wrap(err, "next")
}
challengeResponse, err := s.simplestReceiver.Round4RespondToChallenge(
challenge,
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledChallengeResponse, err := json.Marshal(challengeResponse)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.step = MULTIPLY_SENDER_ROUND_4_VERIFY
return marshaledChallengeResponse, errors.Wrap(err, "next")
case MULTIPLY_SENDER_ROUND_4_VERIFY:
challengeOpenings := [][2][32]byte{}
err := json.Unmarshal([]byte(message), &challengeOpenings)
if err != nil {
return nil, errors.Wrap(err, "next")
}
err = s.simplestReceiver.Round6Verify(challengeOpenings)
if err != nil {
return nil, errors.Wrap(err, "next")
}
baseOtReceiverOutput := s.simplestReceiver.Output
for i := 0; i < len(s.alphas); i++ {
seed := sha3.Sum256(
append(
append(
append([]byte{}, s.seed[:]...),
[]byte("MUL")...,
),
binary.BigEndian.AppendUint64([]byte{}, uint64(i))...,
),
)
sender, err := sign.NewMultiplySender(
584,
160,
baseOtReceiverOutput,
s.curve,
seed,
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.sender = append(s.sender, sender)
}
s.step = MULTIPLY_SENDER_ROUND_5_MULTIPLY
return nil, nil
case MULTIPLY_SENDER_ROUND_5_MULTIPLY:
round1Outputs := []*kos.Round1Output{}
err := json.Unmarshal([]byte(message), &round1Outputs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
if len(round1Outputs) != len(s.alphas) {
return nil, errors.Wrap(errors.New("incorrect number of outputs"), "next")
}
outputs := []*MultiplyRound2Output{}
for i := 0; i < len(s.alphas); i++ {
round2Output, err := s.sender[i].Round2Multiply(
s.alphas[i],
round1Outputs[i],
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
tau := [][][]byte{}
for _, t := range round2Output.COTRound2Output.Tau {
tBytes := [][]byte{}
for _, ts := range t {
tBytes = append(tBytes, ts.Bytes())
}
tau = append(tau, tBytes)
}
r := [][]byte{}
for _, rs := range round2Output.R {
r = append(r, rs.Bytes())
}
outputs = append(outputs, &MultiplyRound2Output{
COTRound2Output: &KOSRound2Output{
Tau: tau,
},
R: r,
U: round2Output.U.Bytes(),
})
}
marshaledOutputs, err := json.Marshal(outputs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
s.step = MULTIPLY_SENDER_ROUND_6_DONE
return marshaledOutputs, nil
}
return nil, nil
}
func (r *MultiplyReceiver) Next(message []byte) ([]byte, error) {
switch r.step {
case MULTIPLY_RECEIVER_ROUND_1_COMPUTE_AND_ZKP_TO_PUBKEY:
proof, err := r.simplestSender.Round1ComputeAndZkpToPublicKey()
if err != nil {
return nil, errors.Wrap(err, "next")
}
schnorrProof := &SchnorrProof{
C: proof.C.Bytes(),
S: proof.S.Bytes(),
Statement: proof.Statement.ToAffineCompressed(),
}
marshaledProof, err := json.Marshal(schnorrProof)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_2_PAD_TRANSFER
return marshaledProof, nil
case MULTIPLY_RECEIVER_ROUND_2_PAD_TRANSFER:
receiversMaskedChoice := [][]byte{}
err := json.Unmarshal([]byte(message), &receiversMaskedChoice)
if err != nil {
return nil, errors.Wrap(err, "next")
}
challenge, err := r.simplestSender.Round3PadTransfer(receiversMaskedChoice)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledChallenge, err := json.Marshal(challenge)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_3_VERIFY
return marshaledChallenge, nil
case MULTIPLY_RECEIVER_ROUND_3_VERIFY:
challengeResponse := [][32]byte{}
err := json.Unmarshal([]byte(message), &challengeResponse)
if err != nil {
return nil, errors.Wrap(err, "next")
}
challengeOpenings, err := r.simplestSender.Round5Verify(challengeResponse)
if err != nil {
return nil, errors.Wrap(err, "next")
}
marshaledChallengeOpenings, err := json.Marshal(challengeOpenings)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_4_MULTIPLY_INIT
return marshaledChallengeOpenings, nil
case MULTIPLY_RECEIVER_ROUND_4_MULTIPLY_INIT:
baseOtSenderOutput := r.simplestSender.Output
outputs := []*kos.Round1Output{}
for i := 0; i < len(r.betas); i++ {
seed := sha3.Sum256(
append(
append(
append([]byte{}, r.seed[:]...),
[]byte("MUL")...,
),
binary.BigEndian.AppendUint64([]byte{}, uint64(i))...,
),
)
receiver, err := sign.NewMultiplyReceiver(
584,
160,
baseOtSenderOutput,
r.curve,
seed,
)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.receiver = append(r.receiver, receiver)
round1Output, err := r.receiver[i].Round1Initialize(r.betas[i])
if err != nil {
return nil, errors.Wrap(err, "next")
}
outputs = append(outputs, round1Output)
}
marshaledOutputs, err := json.Marshal(outputs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
r.step = MULTIPLY_RECEIVER_ROUND_5_MULTIPLY
return marshaledOutputs, nil
case MULTIPLY_RECEIVER_ROUND_5_MULTIPLY:
round2Output := []*MultiplyRound2Output{}
err := json.Unmarshal([]byte(message), &round2Output)
if err != nil {
return nil, errors.Wrap(err, "next")
}
if len(round2Output) != len(r.betas) {
return nil, errors.Wrap(errors.New("incorrect number of outputs"), "next")
}
for i := 0; i < len(r.betas); i++ {
rawRound2Output := &sign.MultiplyRound2Output{
COTRound2Output: &kos.Round2Output{
Tau: [][]curves.Scalar{},
},
R: []curves.Scalar{},
U: nil,
}
for _, t := range round2Output[i].COTRound2Output.Tau {
tScalars := []curves.Scalar{}
for _, ts := range t {
sc, err := r.curve.Scalar.SetBytes(ts)
if err != nil {
return nil, errors.Wrap(err, "next")
}
tScalars = append(tScalars, sc)
}
rawRound2Output.COTRound2Output.Tau = append(
rawRound2Output.COTRound2Output.Tau,
tScalars,
)
}
for _, rs := range round2Output[i].R {
sc, err := r.curve.Scalar.SetBytes(rs)
if err != nil {
return nil, errors.Wrap(err, "next")
}
rawRound2Output.R = append(rawRound2Output.R, sc)
}
rawRound2Output.U, err = r.curve.Scalar.SetBytes(round2Output[i].U)
if err != nil {
return nil, errors.Wrap(err, "next")
}
err := r.receiver[i].Round3Multiply(rawRound2Output)
if err != nil {
return nil, errors.Wrap(err, "next")
}
}
r.step = MULTIPLY_RECEIVER_ROUND_6_DONE
return nil, nil
}
return nil, nil
}
func (s *MultiplySender) IsDone() bool {
return s.step == MULTIPLY_SENDER_ROUND_6_DONE
}
func (r *MultiplyReceiver) IsDone() bool {
return r.step == MULTIPLY_RECEIVER_ROUND_6_DONE
}
func (s *MultiplySender) GetPoints() []curves.Point {
points := []curves.Point{}
for i := 0; i < len(s.alphas); i++ {
points = append(
points,
s.curve.NewGeneratorPoint().Mul(
s.sender[i].OutputAdditiveShare,
),
)
}
return points
}
func (r *MultiplyReceiver) GetPoints() []curves.Point {
points := []curves.Point{}
for i := 0; i < len(r.betas); i++ {
points = append(
points,
r.curve.NewGeneratorPoint().Mul(
r.receiver[i].OutputAdditiveShare,
),
)
}
return points
}
func (s *MultiplySender) GetScalars() []curves.Scalar {
scalars := []curves.Scalar{}
for i := 0; i < len(s.alphas); i++ {
scalars = append(
scalars,
s.sender[i].OutputAdditiveShare,
)
}
return scalars
}
func (r *MultiplyReceiver) GetScalars() []curves.Scalar {
scalars := []curves.Scalar{}
for i := 0; i < len(r.betas); i++ {
scalars = append(
scalars,
r.receiver[i].OutputAdditiveShare,
)
}
return scalars
}
func (s *MultiplySender) GetSignatureOfProverKey(
proverKey []byte,
) ([]byte, error) {
signature := make([]byte, int(bls48581.MODBYTES)+1)
key := s.sender[0].OutputAdditiveShare.Bytes()
if bls48581.Core_Sign(signature, proverKey, key) != bls48581.BLS_OK {
return nil, errors.Wrap(
errors.New("could not sign"),
"get signature of prover key",
)
}
return signature[:], nil
}
func (r *MultiplyReceiver) GetSignatureOfProverKey(
proverKey []byte,
) ([]byte, error) {
signature := make([]byte, int(bls48581.MODBYTES)+1)
key := r.receiver[0].OutputAdditiveShare.Bytes()
if bls48581.Core_Sign(signature, proverKey, key) != bls48581.BLS_OK {
return nil, errors.Wrap(
errors.New("could not sign"),
"get signature of prover key",
)
}
return signature[:], nil
}
func SignProverKeyForCommit(
proverKey []byte,
commitKey curves.Scalar,
) ([]byte, error) {
signature := make([]byte, int(bls48581.MODBYTES)+1)
key := commitKey.Bytes()
if bls48581.Core_Sign(signature, proverKey, key) != bls48581.BLS_OK {
return nil, errors.Wrap(
errors.New("could not sign"),
"sign prover key for commit",
)
}
return signature[:], nil
}
func VerifySignatureOfProverKey(
proverKey []byte,
signature []byte,
publicPointG2 curves.Point,
) error {
w := publicPointG2.ToAffineCompressed()
if bls48581.Core_Verify(signature, proverKey, w) != bls48581.BLS_OK {
return errors.Wrap(
errors.New("could not verify"),
"verify signature of prover key",
)
}
return nil
}

View File

@ -1,169 +0,0 @@
package application
import (
"bytes"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"golang.org/x/sync/errgroup"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
)
func ProcessRound(
i []byte,
idkKey curves.Scalar,
round int,
peers [][]byte,
peerIdks []curves.Point,
secrets []curves.Scalar,
curve *curves.Curve,
send func(int, []byte, []byte) error,
recv func(int, []byte) ([]byte, error),
seed []byte,
) ([]curves.Scalar, error) {
roundPeers, roundIdks, isReceiver := GetPairings(i, round, peers, peerIdks)
if roundPeers == nil {
return nil, nil
}
var participants []Iterator
if isReceiver {
for _, roundIdk := range roundIdks {
hashKeySeed := sha3.Sum256(
append(
roundIdk.Mul(idkKey).ToAffineCompressed(),
seed...,
),
)
participant := NewMultiplyReceiver(secrets, curve, hashKeySeed)
participants = append(participants, participant)
if err := participant.Init(); err != nil {
return nil, errors.Wrap(err, "process round")
}
}
} else {
for _, roundIdk := range roundIdks {
hashKeySeed := sha3.Sum256(
append(
roundIdk.Mul(idkKey).ToAffineCompressed(),
seed...,
),
)
participant := NewMultiplySender(secrets, curve, hashKeySeed)
participants = append(participants, participant)
if err := participant.Init(); err != nil {
return nil, errors.Wrap(err, "process round")
}
}
}
eg := errgroup.Group{}
eg.SetLimit(len(participants))
for j := range participants {
j := j
eg.Go(func() error {
var msg []byte
seq := 0
for !participants[j].IsDone() {
var err error
if isReceiver {
msg, err = recv(seq, roundPeers[j])
if err != nil {
return err
}
}
next, err := participants[j].Next(msg)
if err != nil {
return err
}
err = send(seq, roundPeers[j], next)
if err != nil {
return err
}
if !isReceiver {
msg, err = recv(seq, roundPeers[j])
if err != nil {
return err
}
}
seq++
}
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, errors.Wrap(err, "process round")
}
sums := make([]curves.Scalar, len(secrets))
for j := range sums {
sums[j] = curve.Scalar.Zero()
}
for _, participant := range participants {
scalars := participant.GetScalars()
for j := range sums {
sums[j] = sums[j].Add(scalars[j])
}
}
return sums, nil
}
func GetPairings(i []byte, round int, peers [][]byte, peerIdks []curves.Point) (
[][]byte,
[]curves.Point,
bool,
) {
n := len(peers)
index := -1
for j := 0; j < n; j++ {
if bytes.Equal([]byte(peers[j]), []byte(i)) {
index = j + 1
break
}
}
if index < 1 || index > n {
return nil, nil, false // invalid input
}
power := uint64(n) >> round
if power == 0 {
return nil, nil, false // rounds exceeded
}
// Find the size of the subset for this round
subsetSize := 1 << (round - 1)
// Determine the subset that i belongs to
subsetIndex := (index - 1) / subsetSize
// If subsetIndex is odd, i's pairings are in the subset before it
// If subsetIndex is even, i's pairings are in the subset after it
complementarySubsetStart := 0
if subsetIndex%2 == 0 {
complementarySubsetStart = (subsetIndex+1)*subsetSize + 1
} else {
complementarySubsetStart = subsetIndex*subsetSize - subsetSize + 1
}
// Generate the pairings
pairings := make([][]byte, subsetSize)
idks := make([]curves.Point, subsetSize)
for j := 0; j < subsetSize; j++ {
pairings[j] = peers[complementarySubsetStart+j-1]
idks[j] = peerIdks[complementarySubsetStart+j-1]
}
return pairings, idks, (index - 1) < complementarySubsetStart
}

View File

@ -1,442 +0,0 @@
package application_test
import (
"crypto/rand"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/syncmap"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
bls48581 "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/ot/base/simplest"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/channel"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony/application"
)
func TestPairings(t *testing.T) {
a := []byte{0x01}
b := []byte{0x02}
c := []byte{0x03}
d := []byte{0x04}
e := []byte{0x05}
f := []byte{0x06}
g := []byte{0x07}
h := []byte{0x08}
peers := [][]byte{a, b, c, d, e, f, g, h}
idks := []curves.Point{
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
curves.ED448().Point.Generator(),
}
a1pairing, _, isABob := application.GetPairings(a, 1, peers, idks)
b1pairing, _, isBBob := application.GetPairings(b, 1, peers, idks)
c1pairing, _, isCBob := application.GetPairings(c, 1, peers, idks)
d1pairing, _, isDBob := application.GetPairings(d, 1, peers, idks)
e1pairing, _, isEBob := application.GetPairings(e, 1, peers, idks)
f1pairing, _, isFBob := application.GetPairings(f, 1, peers, idks)
g1pairing, _, isGBob := application.GetPairings(g, 1, peers, idks)
h1pairing, _, isHBob := application.GetPairings(h, 1, peers, idks)
require.ElementsMatch(t, a1pairing, [][]byte{b})
require.ElementsMatch(t, b1pairing, [][]byte{a})
require.ElementsMatch(t, c1pairing, [][]byte{d})
require.ElementsMatch(t, d1pairing, [][]byte{c})
require.ElementsMatch(t, e1pairing, [][]byte{f})
require.ElementsMatch(t, f1pairing, [][]byte{e})
require.ElementsMatch(t, g1pairing, [][]byte{h})
require.ElementsMatch(t, h1pairing, [][]byte{g})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, true, false, true, false, true, false, true},
)
a2pairing, _, isABob := application.GetPairings(a, 2, peers, idks)
b2pairing, _, isBBob := application.GetPairings(b, 2, peers, idks)
c2pairing, _, isCBob := application.GetPairings(c, 2, peers, idks)
d2pairing, _, isDBob := application.GetPairings(d, 2, peers, idks)
e2pairing, _, isEBob := application.GetPairings(e, 2, peers, idks)
f2pairing, _, isFBob := application.GetPairings(f, 2, peers, idks)
g2pairing, _, isGBob := application.GetPairings(g, 2, peers, idks)
h2pairing, _, isHBob := application.GetPairings(h, 2, peers, idks)
require.ElementsMatch(t, a2pairing, [][]byte{c, d})
require.ElementsMatch(t, b2pairing, [][]byte{c, d})
require.ElementsMatch(t, c2pairing, [][]byte{a, b})
require.ElementsMatch(t, d2pairing, [][]byte{a, b})
require.ElementsMatch(t, e2pairing, [][]byte{g, h})
require.ElementsMatch(t, f2pairing, [][]byte{g, h})
require.ElementsMatch(t, g2pairing, [][]byte{e, f})
require.ElementsMatch(t, h2pairing, [][]byte{e, f})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, false, true, true, false, false, true, true},
)
a3pairing, _, isABob := application.GetPairings(a, 3, peers, idks)
b3pairing, _, isBBob := application.GetPairings(b, 3, peers, idks)
c3pairing, _, isCBob := application.GetPairings(c, 3, peers, idks)
d3pairing, _, isDBob := application.GetPairings(d, 3, peers, idks)
e3pairing, _, isEBob := application.GetPairings(e, 3, peers, idks)
f3pairing, _, isFBob := application.GetPairings(f, 3, peers, idks)
g3pairing, _, isGBob := application.GetPairings(g, 3, peers, idks)
h3pairing, _, isHBob := application.GetPairings(h, 3, peers, idks)
require.ElementsMatch(t, a3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, b3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, c3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, d3pairing, [][]byte{e, f, g, h})
require.ElementsMatch(t, e3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t, f3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t, g3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t, h3pairing, [][]byte{a, b, c, d})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, false, false, false, true, true, true, true},
)
a4pairing, _, isABob := application.GetPairings(a, 4, peers, idks)
b4pairing, _, isBBob := application.GetPairings(b, 4, peers, idks)
c4pairing, _, isCBob := application.GetPairings(c, 4, peers, idks)
d4pairing, _, isDBob := application.GetPairings(d, 4, peers, idks)
e4pairing, _, isEBob := application.GetPairings(e, 4, peers, idks)
f4pairing, _, isFBob := application.GetPairings(f, 4, peers, idks)
g4pairing, _, isGBob := application.GetPairings(g, 4, peers, idks)
h4pairing, _, isHBob := application.GetPairings(h, 4, peers, idks)
require.ElementsMatch(t, a4pairing, [][]byte{})
require.ElementsMatch(t, b4pairing, [][]byte{})
require.ElementsMatch(t, c4pairing, [][]byte{})
require.ElementsMatch(t, d4pairing, [][]byte{})
require.ElementsMatch(t, e4pairing, [][]byte{})
require.ElementsMatch(t, f4pairing, [][]byte{})
require.ElementsMatch(t, g4pairing, [][]byte{})
require.ElementsMatch(t, h4pairing, [][]byte{})
require.ElementsMatch(t,
[]bool{isABob, isBBob, isCBob, isDBob, isEBob, isFBob, isGBob, isHBob},
[]bool{false, false, false, false, false, false, false, false},
)
}
func TestProcessRound(t *testing.T) {
a := []byte{0x01}
aKey := curves.ED448().Scalar.Random(rand.Reader)
aPoint := curves.ED448().Point.Generator().Mul(aKey)
b := []byte{0x02}
bKey := curves.ED448().Scalar.Random(rand.Reader)
bPoint := curves.ED448().Point.Generator().Mul(bKey)
c := []byte{0x03}
cKey := curves.ED448().Scalar.Random(rand.Reader)
cPoint := curves.ED448().Point.Generator().Mul(cKey)
d := []byte{0x04}
dKey := curves.ED448().Scalar.Random(rand.Reader)
dPoint := curves.ED448().Point.Generator().Mul(dKey)
e := []byte{0x05}
eKey := curves.ED448().Scalar.Random(rand.Reader)
ePoint := curves.ED448().Point.Generator().Mul(eKey)
f := []byte{0x06}
fKey := curves.ED448().Scalar.Random(rand.Reader)
fPoint := curves.ED448().Point.Generator().Mul(fKey)
g := []byte{0x07}
gKey := curves.ED448().Scalar.Random(rand.Reader)
gPoint := curves.ED448().Point.Generator().Mul(gKey)
h := []byte{0x08}
hKey := curves.ED448().Scalar.Random(rand.Reader)
hPoint := curves.ED448().Point.Generator().Mul(hKey)
peerKeys := []curves.Scalar{aKey, bKey, cKey, dKey, eKey, fKey, gKey, hKey}
peerPoints := [][]byte{
aPoint.ToAffineCompressed(),
bPoint.ToAffineCompressed(),
cPoint.ToAffineCompressed(),
dPoint.ToAffineCompressed(),
ePoint.ToAffineCompressed(),
fPoint.ToAffineCompressed(),
gPoint.ToAffineCompressed(),
hPoint.ToAffineCompressed(),
}
idkPoints := []curves.Point{
aPoint,
bPoint,
cPoint,
dPoint,
ePoint,
fPoint,
gPoint,
hPoint,
}
peers := [][]byte{a, b, c, d, e, f, g, h}
peerSecrets := [][]curves.Scalar{}
originalPeerSecrets := [][]curves.Scalar{}
for i := range peers {
fmt.Printf("generating secrets for peer %d\n", i)
x := curves.BLS48581G1().Scalar.Random(rand.Reader)
xs := x.Clone()
secrets := []curves.Scalar{x}
originalSecrets := []curves.Scalar{x}
fmt.Printf("secret %d(%d): %+x\n", i, 0, xs.Bytes())
for j := 0; j < 1; j++ {
xs = xs.Mul(x)
secrets = append(secrets, xs)
fmt.Printf("secret %d(%d): %+x\n", i, 1, xs.Bytes())
originalSecrets = append(originalSecrets, xs)
}
peerSecrets = append(peerSecrets, secrets)
originalPeerSecrets = append(originalPeerSecrets, originalSecrets)
}
messages := syncmap.Map{}
send := func(peer []byte) func(seq int, dst, msg []byte) error {
return func(seq int, dst, msg []byte) error {
fmt.Printf("send %d bytes for seq %d to %+x\n", len(msg), seq, dst)
b := byte(seq)
dst = append(append(append([]byte{}, b), peer...), dst...)
if msg == nil {
msg = []byte{0x01}
}
messages.Store(string(dst), string(msg))
return nil
}
}
recv := func(peer []byte) func(seq int, src []byte) ([]byte, error) {
return func(seq int, src []byte) ([]byte, error) {
fmt.Printf("recv %d from %+x\n", seq, src)
b := byte(seq)
bsrc := append(append(append([]byte{}, b), src...), peer...)
msg, ok := messages.LoadAndDelete(string(bsrc))
for !ok {
fmt.Printf("no message yet, waiting for recv %d from %+x\n", seq, src)
time.Sleep(100 * time.Millisecond)
msg, ok = messages.LoadAndDelete(string(bsrc))
}
return []byte(msg.(string)), nil
}
}
for j := 1; j < 4; j++ {
eg := errgroup.Group{}
eg.SetLimit(8)
for i := range peers {
i := i
eg.Go(func() error {
fmt.Printf("running round %d for %d\n", j, i)
newSecrets, err := application.ProcessRound(
peerPoints[i],
peerKeys[i],
j,
peerPoints,
idkPoints,
peerSecrets[i],
curves.BLS48581G1(),
send(peerPoints[i]),
recv(peerPoints[i]),
[]byte{0x01},
)
require.NoError(t, err)
for s := range newSecrets {
fmt.Printf("secret %d(%d): %+x\n", i, s, newSecrets[s].Bytes())
}
peerSecrets[i] = newSecrets
return err
})
}
err := eg.Wait()
require.NoError(t, err)
}
checks := []curves.Point{}
for i := 0; i < len(originalPeerSecrets[0]); i++ {
mul := curves.BLS48581G1().Scalar.One()
for j := 0; j < len(originalPeerSecrets); j++ {
mul = mul.Mul(originalPeerSecrets[j][i])
}
checks = append(checks, curves.BLS48581G1().Point.Generator().Mul(mul))
}
result := []curves.Point{}
for i := 0; i < len(peerSecrets[0]); i++ {
var add curves.Point = nil
for j := 0; j < len(peerSecrets); j++ {
if add == nil {
add = curves.BLS48581G1().Point.Generator().Mul(peerSecrets[j][i])
} else {
add = add.Add(
curves.BLS48581G1().Point.Generator().Mul(peerSecrets[j][i]),
)
}
}
result = append(result, add)
}
for i := range checks {
require.Equal(t, true, checks[i].Equal(result[i]))
}
}
func TestCompositeConstructionOfBLS(t *testing.T) {
// needed to verify signatures
bls48581.Init()
curve := curves.BLS48581G1()
hashKeySeed := [simplest.DigestSize]byte{}
_, err := rand.Read(hashKeySeed[:])
require.NoError(t, err)
alpha := curve.Scalar.Random(rand.Reader)
beta := curve.Scalar.Random(rand.Reader)
alpha2 := alpha.Mul(alpha)
beta2 := beta.Mul(beta)
sender := application.NewMultiplySender([]curves.Scalar{alpha, alpha2}, curve, hashKeySeed)
receiver := application.NewMultiplyReceiver([]curves.Scalar{beta, beta2}, curve, hashKeySeed)
var senderMsg []byte = nil
var receiverMsg []byte = nil
sErr := sender.Init()
require.NoError(t, sErr)
rErr := receiver.Init()
require.NoError(t, rErr)
x448SendingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingEphemeralPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingSignedPrePrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingIdentityPrivateKey)
x448SendingEphemeralKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingEphemeralPrivateKey)
x448ReceivingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingIdentityPrivateKey)
x448ReceivingSignedPreKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingSignedPrePrivateKey)
senderResult := channel.SenderX3DH(
x448SendingIdentityPrivateKey,
x448SendingEphemeralPrivateKey,
x448ReceivingIdentityKey,
x448ReceivingSignedPreKey,
96,
)
receiverResult := channel.ReceiverX3DH(
x448ReceivingIdentityPrivateKey,
x448ReceivingSignedPrePrivateKey,
x448SendingIdentityKey,
x448SendingEphemeralKey,
96,
)
drSender, err := channel.NewDoubleRatchetParticipant(
senderResult[:32],
senderResult[32:64],
senderResult[64:],
true,
x448SendingEphemeralPrivateKey,
x448ReceivingSignedPreKey,
curves.ED448(),
nil,
)
require.NoError(t, err)
drReceiver, err := channel.NewDoubleRatchetParticipant(
receiverResult[:32],
receiverResult[32:64],
receiverResult[64:],
false,
x448ReceivingSignedPrePrivateKey,
x448SendingEphemeralKey,
curves.ED448(),
nil,
)
require.NoError(t, err)
for !sender.IsDone() && !receiver.IsDone() {
senderMsg, err = sender.Next(receiverMsg)
require.NoError(t, err)
senderEnvelope, err := drSender.RatchetEncrypt(senderMsg)
require.NoError(t, err)
senderMsg, err = drReceiver.RatchetDecrypt(senderEnvelope)
require.NoError(t, err)
receiverMsg, err = receiver.Next(senderMsg)
require.NoError(t, err)
receiverEnvelope, err := drReceiver.RatchetEncrypt(receiverMsg)
require.NoError(t, err)
receiverMsg, err = drSender.RatchetDecrypt(receiverEnvelope)
require.NoError(t, err)
}
senderPoints := sender.GetPoints()
receiverPoints := receiver.GetPoints()
generator := alpha.Point().Generator()
product := generator.Mul(alpha).Mul(beta)
sum := senderPoints[0].Add(receiverPoints[0])
product2 := generator.Mul(alpha2).Mul(beta2)
sum2 := senderPoints[1].Add(receiverPoints[1])
fmt.Println(alpha.Bytes())
fmt.Println(beta.Bytes())
fmt.Println(curves.BLS48581G1().Point.Generator().ToAffineCompressed())
fmt.Println(sum.ToAffineCompressed())
fmt.Println(product.ToAffineCompressed())
require.Equal(t, true, product.Equal(sum))
require.Equal(t, true, product2.Equal(sum2))
sendSig, err := sender.GetSignatureOfProverKey([]byte{0x01})
require.NoError(t, err)
require.Equal(t, len(sendSig), 74)
recvSig, err := receiver.GetSignatureOfProverKey([]byte{0x02})
require.NoError(t, err)
require.Equal(t, len(recvSig), 74)
require.NoError(t, application.VerifySignatureOfProverKey(
[]byte{0x01},
sendSig,
curves.BLS48581G2().Point.Generator().Mul(
sender.GetScalars()[0],
),
))
require.NoError(t, application.VerifySignatureOfProverKey(
[]byte{0x02},
recvSig,
curves.BLS48581G2().Point.Generator().Mul(
receiver.GetScalars()[0],
),
))
require.Error(t, application.VerifySignatureOfProverKey(
[]byte{0x02},
sendSig,
curves.BLS48581G2().Point.Generator().Mul(
sender.GetScalars()[0],
),
))
require.Error(t, application.VerifySignatureOfProverKey(
[]byte{0x01},
recvSig,
curves.BLS48581G2().Point.Generator().Mul(
receiver.GetScalars()[0],
),
))
}

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,8 @@ toolchain go1.22.1
// A necessary hack until source.quilibrium.com is open to all
replace source.quilibrium.com/quilibrium/monorepo/nekryptology => ../nekryptology
replace source.quilibrium.com/quilibrium/monorepo/bls48581 => ../bls48581
replace source.quilibrium.com/quilibrium/monorepo/vdf => ../vdf
replace github.com/libp2p/go-libp2p => ../go-libp2p
@ -25,6 +27,7 @@ require (
github.com/libp2p/go-libp2p-kad-dht v0.23.0
google.golang.org/protobuf v1.34.1
gopkg.in/yaml.v2 v2.4.0
source.quilibrium.com/quilibrium/monorepo/bls48581 v0.0.0-00010101000000-000000000000
source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub v0.0.0-00010101000000-000000000000
source.quilibrium.com/quilibrium/monorepo/nekryptology v0.0.0-00010101000000-000000000000
source.quilibrium.com/quilibrium/monorepo/vdf v0.0.0-00010101000000-000000000000

View File

@ -576,6 +576,8 @@ golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -592,8 +594,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -621,6 +623,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -636,6 +640,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -668,6 +674,7 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -678,6 +685,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -713,8 +722,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -27,10 +27,7 @@ import (
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony/application"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
"github.com/cloudflare/circl/sign/ed448"
@ -214,7 +211,7 @@ func main() {
count++
}
if count < len(signatories)/2 {
if count < len(signatories)/2+len(signatories)%2 {
fmt.Printf("Quorum on signatures not met")
os.Exit(1)
}
@ -420,6 +417,7 @@ func main() {
} else {
node, err = app.NewNode(nodeConfig, report)
}
if err != nil {
panic(err)
}
@ -509,24 +507,54 @@ func stopDataWorkers() {
}
}
// Reintroduce at a later date
func RunCompaction(clockStore *store.PebbleClockStore) {
intrinsicFilter := append(
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
)
fmt.Println("running compaction")
func RunMigrationIfNeeded(
configDir string,
nodeConfig *config.Config,
) {
shouldMigrate := false
migrationInfo := []byte{0x00, 0x00, 0x00}
_, err := os.Stat(filepath.Join(configDir, "MIGRATIONS"))
if err != nil && os.IsNotExist(err) {
fmt.Println("Migrations file not found, will perform migration...")
shouldMigrate = true
}
if err := clockStore.Compact(
intrinsicFilter,
); err != nil {
if errors.Is(err, store.ErrNotFound) {
fmt.Println("missing compaction data, skipping for now", zap.Error(err))
} else {
if !shouldMigrate {
migrationInfo, err = os.ReadFile(filepath.Join(configDir, "MIGRATIONS"))
if err != nil {
panic(err)
}
if len(migrationInfo) < 3 ||
!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x013}) {
fmt.Println("Migrations file outdated, will perform migration...")
shouldMigrate = true
}
}
// If subsequent migrations arise, we will need to distinguish by version
if shouldMigrate {
fmt.Println("Running migration...")
// Easiest migration in the world.
err := os.RemoveAll(filepath.Join(configDir, "store"))
if err != nil {
fmt.Println("ERROR: Could not remove store, please be sure to do this before restarting the node.")
panic(err)
}
err = os.WriteFile(
filepath.Join(configDir, "MIGRATIONS"),
[]byte{0x01, 0x04, 0x13},
fs.FileMode(0600),
)
if err != nil {
fmt.Println("ERROR: Could not save migration file.")
panic(err)
}
fmt.Println("Migration completed.")
}
fmt.Println("compaction complete")
}
func RunMigrationIfNeeded(
@ -664,16 +692,11 @@ func RunSelfTestIfNeeded(
rand.Read(p128bytes)
rand.Read(p1024bytes)
rand.Read(p65536bytes)
kzgProver := kzg.DefaultKZGProver()
p16, _ := kzgProver.BytesToPolynomial(p16bytes)
p128, _ := kzgProver.BytesToPolynomial(p128bytes)
p1024, _ := kzgProver.BytesToPolynomial(p1024bytes)
p65536, _ := kzgProver.BytesToPolynomial(p65536bytes)
kzgProver := qcrypto.NewKZGInclusionProver(logger)
logger.Info("generating 16 degree commitment metric")
start = time.Now().UnixMilli()
c16, err := kzgProver.Commit(p16)
_, err = kzgProver.CommitRaw(p16bytes, 16)
if err != nil {
panic(err)
}
@ -682,7 +705,7 @@ func RunSelfTestIfNeeded(
logger.Info("generating 128 degree commitment metric")
start = time.Now().UnixMilli()
c128, err := kzgProver.Commit(p128)
_, err = kzgProver.CommitRaw(p128bytes, 128)
if err != nil {
panic(err)
}
@ -691,7 +714,7 @@ func RunSelfTestIfNeeded(
logger.Info("generating 1024 degree commitment metric")
start = time.Now().UnixMilli()
c1024, err := kzgProver.Commit(p1024)
_, err = kzgProver.CommitRaw(p1024bytes, 1024)
if err != nil {
panic(err)
}
@ -700,7 +723,7 @@ func RunSelfTestIfNeeded(
logger.Info("generating 65536 degree commitment metric")
start = time.Now().UnixMilli()
c65536, err := kzgProver.Commit(p65536)
_, err = kzgProver.CommitRaw(p65536bytes, 65536)
if err != nil {
panic(err)
}
@ -709,7 +732,7 @@ func RunSelfTestIfNeeded(
logger.Info("generating 16 degree proof metric")
start = time.Now().UnixMilli()
_, err = kzgProver.Prove(p16, c16, p16[0])
_, err = kzgProver.ProveRaw(p16bytes, 0, 16)
if err != nil {
panic(err)
}
@ -718,7 +741,7 @@ func RunSelfTestIfNeeded(
logger.Info("generating 128 degree proof metric")
start = time.Now().UnixMilli()
_, err = kzgProver.Prove(p128, c128, p128[0])
_, err = kzgProver.ProveRaw(p128bytes, 0, 128)
if err != nil {
panic(err)
}
@ -727,7 +750,7 @@ func RunSelfTestIfNeeded(
logger.Info("generating 1024 degree proof metric")
start = time.Now().UnixMilli()
_, err = kzgProver.Prove(p1024, c1024, p1024[0])
_, err = kzgProver.ProveRaw(p1024bytes, 0, 1024)
if err != nil {
panic(err)
}
@ -736,7 +759,7 @@ func RunSelfTestIfNeeded(
logger.Info("generating 65536 degree proof metric")
start = time.Now().UnixMilli()
_, err = kzgProver.Prove(p65536, c65536, p65536[0])
_, err = kzgProver.ProveRaw(p65536bytes, 0, 65536)
if err != nil {
panic(err)
}

BIN
node/node-1.4.19.1-darwin-arm64 Executable file

Binary file not shown.

View File

@ -0,0 +1 @@
SHA3-256(node-1.4.19.1-darwin-arm64)= acf272b6bf328b118dc52c1c5673c4c2587efcb9c2c264a54fc966a851ebe06a

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
node/node-1.4.19.1-linux-amd64 Executable file

Binary file not shown.

View File

@ -0,0 +1 @@
SHA3-256(node-1.4.19.1-linux-amd64)= 871a0d4fe1c654dd65cfb2dd708fdd7c806cc60ccdf845dafba16b692082d492

Some files were not shown because too many files have changed in this diff Show More