Compare commits

..

1 Commits

Author SHA1 Message Date
Collin Jackson
df1dd8cd5c
docs: Update Discord link text 2024-10-25 17:37:39 -07:00
8 changed files with 71 additions and 234 deletions

View File

@ -1,64 +0,0 @@
name: ci
on:
push:
branches:
- main
pull_request:
branches:
- "**"
jobs:
build:
name: Lint CLI
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
sparse-checkout: |
clients/cli
proto
- name: Set up Rust
uses: dtolnay/rust-toolchain@stable
- name: Install protoc
uses: arduino/setup-protoc@v3
- name: Set up Rust cache
uses: Swatinem/rust-cache@v2
with:
workspaces: ./clients/cli
- name: Format
working-directory: clients/cli
run: |
rustfmt src/**/*.rs --check --edition 2021
- name: Build
working-directory: clients/cli
run: |
cargo build --profile=ci-build
- name: Run cargo clippy
working-directory: clients/cli
run: |
cargo clippy --profile=ci-build --no-deps --all-targets --workspace -- -D warnings
- name: Test
working-directory: clients/cli
run: |
cargo test --profile=ci-build --tests
- name: Ensure checked in generated files are up to date
run: |
if [ -n "$(git status --porcelain)" ]; then \
echo "There are uncommitted changes in working tree after building."; \
git status; \
git --no-pager diff; \
exit 1; \
else \
echo "Git working tree is clean"; \
fi;

View File

@ -1567,7 +1567,7 @@ dependencies = [
[[package]] [[package]]
name = "nexus-network" name = "nexus-network"
version = "0.3.4" version = "0.1.0"
dependencies = [ dependencies = [
"ark-bn254", "ark-bn254",
"ark-crypto-primitives", "ark-crypto-primitives",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "nexus-network" name = "nexus-network"
version = "0.3.4" version = "0.3.1"
edition = "2021" edition = "2021"
[[bin]] [[bin]]
@ -10,23 +10,6 @@ path = "src/prover.rs"
[build-dependencies] [build-dependencies]
prost-build = "0.13" prost-build = "0.13"
[profile.dev]
opt-level = 1
[profile.release]
lto = "fat"
strip = true
codegen-units = 1
[profile.ci-build]
inherits = "dev"
opt-level = 0
debug = 0
strip = "none"
lto = false
codegen-units = 256
incremental = true
[dependencies] [dependencies]
async-stream = "0.3" async-stream = "0.3"
clap = { version = "4.5", features = ["derive"] } clap = { version = "4.5", features = ["derive"] }
@ -100,3 +83,8 @@ ark-vesta = { git = "https://github.com/arkworks-rs/curves/", rev = "8c0256a" }
ark-bls12-381 = { git = "https://github.com/arkworks-rs/curves/", rev = "3fded1f" } ark-bls12-381 = { git = "https://github.com/arkworks-rs/curves/", rev = "3fded1f" }
zstd-sys = { git = "https://github.com/gyscos/zstd-rs" } zstd-sys = { git = "https://github.com/gyscos/zstd-rs" }
[profile.release]
strip = true
lto = true
codegen-units = 1

View File

@ -48,7 +48,6 @@ add `NONINTERACTIVE=1` before `sh`.
* Only the latest version of the CLI is currently supported. * Only the latest version of the CLI is currently supported.
* Prebuilt binaries are not yet available. * Prebuilt binaries are not yet available.
* Linking email to prover id is currently available on the web version only.
* Counting cycles proved is not yet available in the CLI. * Counting cycles proved is not yet available in the CLI.
* Only proving is supported. Submitting programs to the network is in private beta. * Only proving is supported. Submitting programs to the network is in private beta.
To request an API key, contact us at growth@nexus.xyz. To request an API key, contact us at growth@nexus.xyz.

View File

@ -1,4 +1,4 @@
use crate::config::{analytics_id, analytics_api_key}; use crate::config::analytics_token;
use chrono::Datelike; use chrono::Datelike;
use chrono::Timelike; use chrono::Timelike;
use reqwest::header::{ACCEPT, CONTENT_TYPE}; use reqwest::header::{ACCEPT, CONTENT_TYPE};
@ -16,27 +16,14 @@ pub fn track(
) { ) {
println!("{}", description); println!("{}", description);
let firebase_app_id = analytics_id(ws_addr_string); let token = analytics_token(ws_addr_string);
let firebase_api_key = analytics_api_key(ws_addr_string); if token.is_empty() {
if firebase_app_id.is_empty() {
return; return;
} }
let local_now = chrono::offset::Local::now(); let local_now = chrono::offset::Local::now();
// For tracking events, we use the Firebase Measurement Protocol
// Firebase is mostly designed for mobile and web apps, but for our use case of a CLI,
// we can use the Measurement Protocol to track events by POST to a URL.
// The only thing that may be unexpected is that the URL we use includes a firebase key
// Firebase format for properties for Measurement protocol:
// https://developers.google.com/analytics/devguides/collection/protocol/ga4/reference?client_type=firebase#payload
// https://developers.google.com/analytics/devguides/collection/protocol/ga4/reference?client_type=firebase#payload_query_parameters
let mut properties = json!({ let mut properties = json!({
"token": token,
"time": SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis(), "time": SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis(),
// app_instance_id is the standard key Firebase uses this key to track the same user across sessions
// It is a bit redundant, but I wanted to keep the recommended format Firebase uses to minimize surprises
// I still left the distinct_id key as well for backwards compatibility
"app_instance_id": event_properties["prover_id"],
"distinct_id": event_properties["prover_id"], "distinct_id": event_properties["prover_id"],
"prover_type": "volunteer", "prover_type": "volunteer",
"client_type": "cli", "client_type": "cli",
@ -49,27 +36,15 @@ pub fn track(
for (k, v) in event_properties.as_object().unwrap() { for (k, v) in event_properties.as_object().unwrap() {
properties[k] = v.clone(); properties[k] = v.clone();
} }
// Firebase format for events
let body = json!({ let body = json!({
"app_instance_id": event_properties["prover_id"], "event": event_name,
"events": [{ "properties": properties
"name": event_name,
"params": properties
}],
}); });
tokio::spawn(async move { tokio::spawn(async move {
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let _ = client let _ = client
// URL is the Google Analytics endpoint for Firebase: https://stackoverflow.com/questions/50355752/firebase-analytics-from-remote-rest-api .post("https://api.mixpanel.com/track?ip=1")
.post(format!( .body(format!("[{}]", body.to_string()))
"https://www.google-analytics.com/mp/collect?firebase_app_id={}&api_secret={}",
firebase_app_id,
firebase_api_key
))
.body(format!("[{}]", body))
.header(ACCEPT, "text/plain") .header(ACCEPT, "text/plain")
.header(CONTENT_TYPE, "application/json") .header(CONTENT_TYPE, "application/json")
.send() .send()

View File

@ -1,78 +1,18 @@
// Debug version of analytics_id
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
pub fn analytics_id(_ws_addr_string: &str) -> String { pub fn analytics_token(_ws_addr_string: &str) -> String {
// Use one of the tokens in the release version if debugging analytics // Use one of the tokens in the release version if debugging analytics
"".into() return "".into();
} }
// Debug version of analytics_api_key
#[cfg(debug_assertions)]
pub fn analytics_api_key(_ws_addr_string: &str) -> String {
// Use one of the tokens in the release version if debugging analytics
"".into()
}
// The following enum is used to determine the environment from the web socket string
#[derive(Debug)]
#[cfg(not(debug_assertions))] #[cfg(not(debug_assertions))]
enum Environment { pub fn analytics_token(ws_addr_string: &str) -> String {
Dev, if ws_addr_string.starts_with("wss://dev.orchestrator.nexus.xyz:443/") {
Staging, return "504d4d443854f2cd10e2e385aca81aa4".into();
Beta, } else if ws_addr_string.starts_with("wss://staging.orchestrator.nexus.xyz:443/") {
Unknown, return "30bcb58893992aabc5aec014e7b903d2".into();
} } else if ws_addr_string.starts_with("wss://beta.orchestrator.nexus.xyz:443/") {
return "3c16d3853f4258414c9c9109bbbdef0e".into();
// The web socket addresses for the different environments } else {
#[cfg(not(debug_assertions))] return "".into();
mod web_socket_urls {
pub const DEV: &str = "wss://dev.orchestrator.nexus.xyz:443/";
pub const STAGING: &str = "wss://staging.orchestrator.nexus.xyz:443/";
pub const BETA: &str = "wss://beta.orchestrator.nexus.xyz:443/";
}
// the firebase APP IDS by environment
#[cfg(not(debug_assertions))]
mod firebase {
pub const DEV_APP_ID: &str = "1:954530464230:web:f0a14de14ef7bcdaa99627";
pub const STAGING_APP_ID: &str = "1:222794630996:web:1758d64a85eba687eaaac1";
pub const BETA_APP_ID: &str = "1:279395003658:web:04ee2c524474d683d75ef3";
// Analytics keys for the different environments
// These are keys that allow the measurement protocol to write to the analytics database
// They are not sensitive. Worst case, if a malicious actor obtains the secret, they could potentially send false or misleading data to your GA4 property
pub const DEV_API_SECRET: &str = "8ySxiKrtT8a76zClqqO8IQ";
pub const STAGING_API_SECRET: &str = "OI7H53soRMSDWfJf1ittHQ";
pub const BETA_API_SECRET: &str = "gxxzKAQLSl-uYI0eKbIi_Q";
}
// Release versions (existing code)
#[cfg(not(debug_assertions))]
pub fn analytics_id(ws_addr_string: &str) -> String {
// Determine the environment from the web socket string (ws_addr_string)
let env = match ws_addr_string {
web_socket_urls::DEV => Environment::Dev,
web_socket_urls::STAGING => Environment::Staging,
web_socket_urls::BETA => Environment::Beta,
_ => Environment::Unknown,
}; };
// Return the appropriate Firebase App ID based on the environment
match env {
Environment::Dev => firebase::DEV_APP_ID.to_string(),
Environment::Staging => firebase::STAGING_APP_ID.to_string(),
Environment::Beta => firebase::BETA_APP_ID.to_string(),
Environment::Unknown => String::new(),
}
} }
#[cfg(not(debug_assertions))]
pub fn analytics_api_key(ws_addr_string: &str) -> String {
match ws_addr_string {
web_socket_urls::DEV => firebase::DEV_API_SECRET.to_string(),
web_socket_urls::STAGING => firebase::STAGING_API_SECRET.to_string(),
web_socket_urls::BETA => firebase::BETA_API_SECRET.to_string(),
_ => String::new(),
}
}

View File

@ -32,8 +32,8 @@ use nexus_core::{
init_circuit_trace, key::CanonicalSerialize, pp::gen_vm_pp, prove_seq_step, types::*, init_circuit_trace, key::CanonicalSerialize, pp::gen_vm_pp, prove_seq_step, types::*,
}, },
}; };
use rand::RngCore;
use zstd::stream::Encoder; use zstd::stream::Encoder;
use rand::{ RngCore };
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
struct Args { struct Args {
@ -118,7 +118,7 @@ async fn main() {
contents: Some(prover_request::Contents::Registration( contents: Some(prover_request::Contents::Registration(
ProverRequestRegistration { ProverRequestRegistration {
prover_type: ProverType::Volunteer.into(), prover_type: ProverType::Volunteer.into(),
prover_id: prover_id.clone(), prover_id: prover_id.clone().into(),
estimated_proof_cycles_hertz: None, estimated_proof_cycles_hertz: None,
}, },
)), )),
@ -127,16 +127,9 @@ async fn main() {
let mut retries = 0; let mut retries = 0;
let max_retries = 5; let max_retries = 5;
while let Err(e) = client loop {
.send(Message::Binary(registration.encode_to_vec())) if let Err(e) = client.send(Message::Binary(registration.encode_to_vec())).await {
.await eprintln!("Failed to send message: {:?}, attempt {}/{}", e, retries + 1, max_retries);
{
eprintln!(
"Failed to send message: {:?}, attempt {}/{}",
e,
retries + 1,
max_retries
);
retries += 1; retries += 1;
if retries >= max_retries { if retries >= max_retries {
@ -146,6 +139,9 @@ async fn main() {
// Add a delay before retrying // Add a delay before retrying
tokio::time::sleep(tokio::time::Duration::from_secs(u64::pow(2, retries))).await; tokio::time::sleep(tokio::time::Duration::from_secs(u64::pow(2, retries))).await;
} else {
break;
}
} }
track( track(
@ -154,6 +150,9 @@ async fn main() {
&ws_addr_string, &ws_addr_string,
json!({"ws_addr_string": ws_addr_string, "prover_id": prover_id}), json!({"ws_addr_string": ws_addr_string, "prover_id": prover_id}),
); );
println!(
"Network stats are available at https://beta.nexus.xyz/."
);
loop { loop {
let program_message = match client.next().await.unwrap().unwrap() { let program_message = match client.next().await.unwrap().unwrap() {
Message::Binary(b) => b, Message::Binary(b) => b,
@ -184,7 +183,7 @@ async fn main() {
); );
let mut vm: NexusVM<MerkleTrie> = let mut vm: NexusVM<MerkleTrie> =
parse_elf(elf_bytes.as_ref()).expect("error loading and parsing RISC-V instruction"); parse_elf(&elf_bytes.as_ref()).expect("error loading and parsing RISC-V instruction");
vm.syscalls.set_input(&input); vm.syscalls.set_input(&input);
// TODO(collinjackson): Get outputs // TODO(collinjackson): Get outputs
@ -248,10 +247,10 @@ async fn main() {
completed_fraction = steps_proven as f32 / steps_to_prove as f32; completed_fraction = steps_proven as f32 / steps_to_prove as f32;
let progress = ProverRequest { let progress = ProverRequest {
contents: Some(prover_request::Contents::Progress(Progress { contents: Some(prover_request::Contents::Progress(Progress {
completed_fraction, completed_fraction: completed_fraction,
steps_in_trace: total_steps as i32, steps_in_trace: total_steps as i32,
steps_to_prove: steps_to_prove as i32, steps_to_prove: steps_to_prove as i32,
steps_proven, steps_proven: steps_proven as i32,
})), })),
}; };
let progress_duration = SystemTime::now().duration_since(progress_time).unwrap(); let progress_duration = SystemTime::now().duration_since(progress_time).unwrap();
@ -259,10 +258,7 @@ async fn main() {
let proof_cycles_hertz = k as f64 * 1000.0 / progress_duration.as_millis() as f64; let proof_cycles_hertz = k as f64 * 1000.0 / progress_duration.as_millis() as f64;
track( track(
"progress".into(), "progress".into(),
format!( format!("Proved step {} at {:.2} proof cycles/sec.", step, proof_cycles_hertz),
"Proved step {} at {:.2} proof cycles/sec.",
step, proof_cycles_hertz
),
&ws_addr_string, &ws_addr_string,
json!({ json!({
"completed_fraction": completed_fraction, "completed_fraction": completed_fraction,
@ -280,13 +276,9 @@ async fn main() {
let mut retries = 0; let mut retries = 0;
let max_retries = 5; let max_retries = 5;
while let Err(e) = client.send(Message::Binary(progress.encode_to_vec())).await { loop {
eprintln!( if let Err(e) = client.send(Message::Binary(progress.encode_to_vec())).await {
"Failed to send message: {:?}, attempt {}/{}", eprintln!("Failed to send message: {:?}, attempt {}/{}", e, retries + 1, max_retries);
e,
retries + 1,
max_retries
);
retries += 1; retries += 1;
if retries >= max_retries { if retries >= max_retries {
@ -296,6 +288,9 @@ async fn main() {
// Add a delay before retrying // Add a delay before retrying
tokio::time::sleep(tokio::time::Duration::from_secs(u64::pow(2, retries))).await; tokio::time::sleep(tokio::time::Duration::from_secs(u64::pow(2, retries))).await;
} else {
break;
}
} }
if step == end - 1 { if step == end - 1 {
@ -313,18 +308,14 @@ async fn main() {
})), })),
}; };
let duration = SystemTime::now().duration_since(start_time).unwrap(); let duration = SystemTime::now().duration_since(start_time).unwrap();
let proof_cycles_hertz = let proof_cycles_hertz = cycles_proven as f64 * 1000.0 / duration.as_millis() as f64;
cycles_proven as f64 * 1000.0 / duration.as_millis() as f64;
client client
.send(Message::Binary(response.encode_to_vec())) .send(Message::Binary(response.encode_to_vec()))
.await .await
.unwrap(); .unwrap();
track( track(
"proof".into(), "proof".into(),
format!( format!("Proof sent! Overall speed was {:.2} proof cycles/sec.", proof_cycles_hertz),
"Proof sent! Overall speed was {:.2} proof cycles/sec.",
proof_cycles_hertz
),
&ws_addr_string, &ws_addr_string,
json!({ json!({
"proof_duration_sec": duration.as_secs(), "proof_duration_sec": duration.as_secs(),

8
proto/generate_protobufs.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
mkdir -p clients/flutter/lib/src/generated
protoc --experimental_allow_proto3_optional --dart_out=grpc:clients/flutter/lib/src/generated -Iproto proto/orchestrator.proto
dart format clients/flutter/lib/src/generated
(cd clients/dummy_client && cargo build || echo clients/dummy_client not found, possibly due to a sparse checkout.)
(cd clients/cli && cargo build || echo clients/cli not found, possibly due to a sparse checkout.)
(cd orchestrator && cargo build || echo orchestrator/ not found, possibly due a sparse checkout.)