Compare commits

...

20 Commits

Author SHA1 Message Date
Helen Grachtz
284f8ac5f7
Merge 14226dbddc into 6a26c336e7 2025-02-18 02:19:38 +01:00
comfsrt
6a26c336e7
Update config.rs (#331)
Some checks are pending
abi-consistent-check / build-and-compare (push) Waiting to run
code-coverage / unittest-cov (push) Waiting to run
rust / check (push) Waiting to run
rust / test (push) Waiting to run
rust / lints (push) Waiting to run
functional-test / test (push) Waiting to run
2025-02-18 09:19:10 +08:00
Tronica
a915766840
Fix spelling errors and correct minor errors (#337)
* fix typo lib.rs

* fix typos mod.rs

* fix typo config.rs
2025-02-18 09:11:13 +08:00
SamiAlHassan
3d9aa8c940
docs: fix broken figure image paths across docs (#335)
* patch figure links on proof-of-random-access.md

* patch figure links on transaction-processing.md

* patch figure links on k-v-store.md

* patch figure links on architecture.md
2025-02-18 09:10:51 +08:00
0g-peterzhb
538afb00e1
add gas auto adjustment (#330)
* add gas auto adjustment

* refactor config
2025-02-18 09:09:50 +08:00
0g-peterzhb
7ad3f717b4
refactor submit pora loop (#325)
Some checks failed
abi-consistent-check / build-and-compare (push) Has been cancelled
code-coverage / unittest-cov (push) Has been cancelled
rust / check (push) Has been cancelled
rust / test (push) Has been cancelled
rust / lints (push) Has been cancelled
functional-test / test (push) Has been cancelled
2025-02-11 18:23:19 +08:00
Helen Grachtz
26cc19b92d
Fix code comments and bug report errors (#319)
fix typos
2025-02-11 16:56:40 +08:00
Brawn
a3335eed82
fix: Fix incorrect -in-place='' syntax Update update_config.sh (#320)
Fix incorrect -in-place='' syntax in sed command
2025-02-11 16:55:18 +08:00
Dmitry
2272b5dbfd
fix: Fix path handling in script execution (#318) 2025-02-11 16:53:28 +08:00
witty
760d4b4a53
docs: Fix imperative mood in documentation instructions Update onebox-test.md (#316) 2025-02-11 16:50:39 +08:00
Ocenka
91680f2e33
Fix spelling errors (#314)
* fix spelling lib.rs

* fix spelling error environment.rs
2025-02-11 16:48:24 +08:00
Radovenchyk
c9bca86add
Update README.md (#312) 2025-02-11 16:46:40 +08:00
udhaykumarbala
93f587c407
replaced broken links (#311) 2025-02-11 16:46:00 +08:00
Akaonetwo
1f71aadeec
Typo fixed in: Update README.md (#310)
fix typos
2025-02-11 16:43:32 +08:00
dashangcun
656a092cf8
chore: fix some comments (#313)
Signed-off-by: dashangcun <907225865@qq.com>
2025-02-11 16:40:55 +08:00
crStiv
8014f51b6d
Update introduction.md (#309) 2025-02-11 16:40:31 +08:00
Dmitry
b0a9a415f7
docs: Fixing Documentation Typos (#308)
* docs: typo fix Update transaction-processing.md

* docs: typo fix Update run.md

* docs: typo fix Update onebox-test.md
2025-02-11 16:37:33 +08:00
Maxim Evtush
bc6bcf857c
Update LICENSE (#321) 2025-02-11 16:36:50 +08:00
peilun-conflux
d15ef5ba3d
Add tokio console for debug. (#317)
It requires `tokio_unstable` in rust flags, so a `tokio-console`
features is added to be compatible with the default config.
To compile with the tokio console enabled, compile with
``RUSTFLAGS="--cfg tokio_unstable" cargo build --release --features tokio-console``
The usage of the tokio console can be found in https://github.com/tokio-rs/console.
2025-02-11 16:36:08 +08:00
0g-peterzhb
9ce215b919
add dynamic gas price adjustment when submitting pora (#324) 2025-02-11 16:34:58 +08:00
36 changed files with 695 additions and 136 deletions

402
Cargo.lock generated
View File

@ -453,6 +453,28 @@ dependencies = [
"trust-dns-resolver", "trust-dns-resolver",
] ]
[[package]]
name = "async-stream"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
"pin-project-lite 0.2.14",
]
[[package]]
name = "async-stream-impl"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.68",
]
[[package]] [[package]]
name = "async-task" name = "async-task"
version = "4.7.1" version = "4.7.1"
@ -506,7 +528,7 @@ version = "0.16.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247"
dependencies = [ dependencies = [
"http", "http 0.2.12",
"log", "log",
"url", "url",
"wildmatch", "wildmatch",
@ -529,6 +551,53 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "axum"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf"
dependencies = [
"async-trait",
"axum-core",
"bytes",
"futures-util",
"http 1.2.0",
"http-body 1.0.1",
"http-body-util",
"itoa",
"matchit",
"memchr",
"mime",
"percent-encoding",
"pin-project-lite 0.2.14",
"rustversion",
"serde",
"sync_wrapper 1.0.2",
"tower",
"tower-layer",
"tower-service",
]
[[package]]
name = "axum-core"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
dependencies = [
"async-trait",
"bytes",
"futures-util",
"http 1.2.0",
"http-body 1.0.1",
"http-body-util",
"mime",
"pin-project-lite 0.2.14",
"rustversion",
"sync_wrapper 1.0.2",
"tower-layer",
"tower-service",
]
[[package]] [[package]]
name = "backtrace" name = "backtrace"
version = "0.3.73" version = "0.3.73"
@ -568,6 +637,12 @@ version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "base64"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]] [[package]]
name = "base64ct" name = "base64ct"
version = "1.6.0" version = "1.6.0"
@ -1104,6 +1179,45 @@ dependencies = [
"yaml-rust", "yaml-rust",
] ]
[[package]]
name = "console-api"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857"
dependencies = [
"futures-core",
"prost 0.13.4",
"prost-types 0.13.4",
"tonic",
"tracing-core",
]
[[package]]
name = "console-subscriber"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01"
dependencies = [
"console-api",
"crossbeam-channel",
"crossbeam-utils",
"futures-task",
"hdrhistogram",
"humantime",
"hyper-util",
"prost 0.13.4",
"prost-types 0.13.4",
"serde",
"serde_json",
"thread_local",
"tokio",
"tokio-stream",
"tonic",
"tracing",
"tracing-core",
"tracing-subscriber",
]
[[package]] [[package]]
name = "const-hex" name = "const-hex"
version = "1.12.0" version = "1.12.0"
@ -1157,6 +1271,17 @@ dependencies = [
"serde_json", "serde_json",
] ]
[[package]]
name = "contract-wrapper"
version = "0.1.0"
dependencies = [
"ethers",
"serde",
"serde_json",
"tokio",
"tracing",
]
[[package]] [[package]]
name = "convert_case" name = "convert_case"
version = "0.6.0" version = "0.6.0"
@ -2286,7 +2411,7 @@ dependencies = [
"futures-timer", "futures-timer",
"futures-util", "futures-util",
"hashers", "hashers",
"http", "http 0.2.12",
"instant", "instant",
"jsonwebtoken", "jsonwebtoken",
"once_cell", "once_cell",
@ -2905,7 +3030,26 @@ dependencies = [
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"futures-util", "futures-util",
"http", "http 0.2.12",
"indexmap 2.2.6",
"slab",
"tokio",
"tokio-util 0.7.11",
"tracing",
]
[[package]]
name = "h2"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
dependencies = [
"atomic-waker",
"bytes",
"fnv",
"futures-core",
"futures-sink",
"http 1.2.0",
"indexmap 2.2.6", "indexmap 2.2.6",
"slab", "slab",
"tokio", "tokio",
@ -3004,6 +3148,19 @@ dependencies = [
"tokio-util 0.6.10", "tokio-util 0.6.10",
] ]
[[package]]
name = "hdrhistogram"
version = "7.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d"
dependencies = [
"base64 0.21.7",
"byteorder",
"flate2",
"nom",
"num-traits",
]
[[package]] [[package]]
name = "heck" name = "heck"
version = "0.3.3" version = "0.3.3"
@ -3125,6 +3282,17 @@ dependencies = [
"itoa", "itoa",
] ]
[[package]]
name = "http"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
dependencies = [
"bytes",
"fnv",
"itoa",
]
[[package]] [[package]]
name = "http-body" name = "http-body"
version = "0.4.6" version = "0.4.6"
@ -3132,7 +3300,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [ dependencies = [
"bytes", "bytes",
"http", "http 0.2.12",
"pin-project-lite 0.2.14",
]
[[package]]
name = "http-body"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
dependencies = [
"bytes",
"http 1.2.0",
]
[[package]]
name = "http-body-util"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
dependencies = [
"bytes",
"futures-util",
"http 1.2.0",
"http-body 1.0.1",
"pin-project-lite 0.2.14", "pin-project-lite 0.2.14",
] ]
@ -3164,9 +3355,9 @@ dependencies = [
"futures-channel", "futures-channel",
"futures-core", "futures-core",
"futures-util", "futures-util",
"h2", "h2 0.3.26",
"http", "http 0.2.12",
"http-body", "http-body 0.4.6",
"httparse", "httparse",
"httpdate", "httpdate",
"itoa", "itoa",
@ -3178,14 +3369,35 @@ dependencies = [
"want", "want",
] ]
[[package]]
name = "hyper"
version = "1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0"
dependencies = [
"bytes",
"futures-channel",
"futures-util",
"h2 0.4.7",
"http 1.2.0",
"http-body 1.0.1",
"httparse",
"httpdate",
"itoa",
"pin-project-lite 0.2.14",
"smallvec",
"tokio",
"want",
]
[[package]] [[package]]
name = "hyper-rustls" name = "hyper-rustls"
version = "0.23.2" version = "0.23.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c"
dependencies = [ dependencies = [
"http", "http 0.2.12",
"hyper", "hyper 0.14.29",
"log", "log",
"rustls 0.20.9", "rustls 0.20.9",
"rustls-native-certs", "rustls-native-certs",
@ -3201,8 +3413,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
dependencies = [ dependencies = [
"futures-util", "futures-util",
"http", "http 0.2.12",
"hyper", "hyper 0.14.29",
"rustls 0.21.12", "rustls 0.21.12",
"tokio", "tokio",
"tokio-rustls 0.24.1", "tokio-rustls 0.24.1",
@ -3216,12 +3428,25 @@ checksum = "6eea26c5d0b6ab9d72219f65000af310f042a740926f7b2fa3553e774036e2e7"
dependencies = [ dependencies = [
"derive_builder", "derive_builder",
"dns-lookup", "dns-lookup",
"hyper", "hyper 0.14.29",
"tokio", "tokio",
"tower-service", "tower-service",
"tracing", "tracing",
] ]
[[package]]
name = "hyper-timeout"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0"
dependencies = [
"hyper 1.5.2",
"hyper-util",
"pin-project-lite 0.2.14",
"tokio",
"tower-service",
]
[[package]] [[package]]
name = "hyper-tls" name = "hyper-tls"
version = "0.5.0" version = "0.5.0"
@ -3229,12 +3454,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
dependencies = [ dependencies = [
"bytes", "bytes",
"hyper", "hyper 0.14.29",
"native-tls", "native-tls",
"tokio", "tokio",
"tokio-native-tls", "tokio-native-tls",
] ]
[[package]]
name = "hyper-util"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4"
dependencies = [
"bytes",
"futures-channel",
"futures-util",
"http 1.2.0",
"http-body 1.0.1",
"hyper 1.5.2",
"pin-project-lite 0.2.14",
"socket2 0.5.7",
"tokio",
"tower-service",
"tracing",
]
[[package]] [[package]]
name = "iana-time-zone" name = "iana-time-zone"
version = "0.1.60" version = "0.1.60"
@ -3586,7 +3830,7 @@ dependencies = [
"futures-timer", "futures-timer",
"futures-util", "futures-util",
"gloo-net", "gloo-net",
"http", "http 0.2.12",
"jsonrpsee-core", "jsonrpsee-core",
"jsonrpsee-types", "jsonrpsee-types",
"pin-project 1.1.5", "pin-project 1.1.5",
@ -3615,7 +3859,7 @@ dependencies = [
"futures-timer", "futures-timer",
"futures-util", "futures-util",
"globset", "globset",
"hyper", "hyper 0.14.29",
"jsonrpsee-types", "jsonrpsee-types",
"lazy_static", "lazy_static",
"parking_lot 0.12.3", "parking_lot 0.12.3",
@ -3638,7 +3882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fc1d8c0e4f455c47df21f8a29f4bbbcb75eb71bfee919b92e92502b48358392" checksum = "5fc1d8c0e4f455c47df21f8a29f4bbbcb75eb71bfee919b92e92502b48358392"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"hyper", "hyper 0.14.29",
"hyper-rustls 0.23.2", "hyper-rustls 0.23.2",
"jsonrpsee-core", "jsonrpsee-core",
"jsonrpsee-types", "jsonrpsee-types",
@ -3658,7 +3902,7 @@ checksum = "bdd69efeb3ce2cba767f126872f4eeb4624038a29098e75d77608b2b4345ad03"
dependencies = [ dependencies = [
"futures-channel", "futures-channel",
"futures-util", "futures-util",
"hyper", "hyper 0.14.29",
"jsonrpsee-core", "jsonrpsee-core",
"jsonrpsee-types", "jsonrpsee-types",
"serde", "serde",
@ -4713,6 +4957,12 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
[[package]]
name = "matchit"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
[[package]] [[package]]
name = "md-5" name = "md-5"
version = "0.10.6" version = "0.10.6"
@ -4778,6 +5028,7 @@ dependencies = [
"async-trait", "async-trait",
"blake2", "blake2",
"contract-interface", "contract-interface",
"contract-wrapper",
"ethereum-types 0.14.1", "ethereum-types 0.14.1",
"ethers", "ethers",
"hex", "hex",
@ -6025,6 +6276,16 @@ dependencies = [
"prost-derive 0.10.1", "prost-derive 0.10.1",
] ]
[[package]]
name = "prost"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec"
dependencies = [
"bytes",
"prost-derive 0.13.4",
]
[[package]] [[package]]
name = "prost-build" name = "prost-build"
version = "0.9.0" version = "0.9.0"
@ -6106,6 +6367,19 @@ dependencies = [
"syn 1.0.109", "syn 1.0.109",
] ]
[[package]]
name = "prost-derive"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3"
dependencies = [
"anyhow",
"itertools 0.13.0",
"proc-macro2",
"quote",
"syn 2.0.68",
]
[[package]] [[package]]
name = "prost-types" name = "prost-types"
version = "0.9.0" version = "0.9.0"
@ -6126,6 +6400,15 @@ dependencies = [
"prost 0.10.4", "prost 0.10.4",
] ]
[[package]]
name = "prost-types"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc"
dependencies = [
"prost 0.13.4",
]
[[package]] [[package]]
name = "protobuf" name = "protobuf"
version = "2.28.0" version = "2.28.0"
@ -6159,8 +6442,8 @@ dependencies = [
"dns-lookup", "dns-lookup",
"futures-core", "futures-core",
"futures-util", "futures-util",
"http", "http 0.2.12",
"hyper", "hyper 0.14.29",
"hyper-system-resolver", "hyper-system-resolver",
"pin-project-lite 0.2.14", "pin-project-lite 0.2.14",
"thiserror", "thiserror",
@ -6403,10 +6686,10 @@ dependencies = [
"encoding_rs", "encoding_rs",
"futures-core", "futures-core",
"futures-util", "futures-util",
"h2", "h2 0.3.26",
"http", "http 0.2.12",
"http-body", "http-body 0.4.6",
"hyper", "hyper 0.14.29",
"hyper-rustls 0.24.2", "hyper-rustls 0.24.2",
"hyper-tls", "hyper-tls",
"ipnet", "ipnet",
@ -6422,7 +6705,7 @@ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
"serde_urlencoded", "serde_urlencoded",
"sync_wrapper", "sync_wrapper 0.1.2",
"system-configuration", "system-configuration",
"tokio", "tokio",
"tokio-native-tls", "tokio-native-tls",
@ -7497,6 +7780,12 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "sync_wrapper"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
[[package]] [[package]]
name = "synstructure" name = "synstructure"
version = "0.12.6" version = "0.12.6"
@ -7729,6 +8018,7 @@ dependencies = [
"signal-hook-registry", "signal-hook-registry",
"socket2 0.5.7", "socket2 0.5.7",
"tokio-macros", "tokio-macros",
"tracing",
"windows-sys 0.48.0", "windows-sys 0.48.0",
] ]
@ -7786,9 +8076,9 @@ dependencies = [
[[package]] [[package]]
name = "tokio-stream" name = "tokio-stream"
version = "0.1.15" version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
dependencies = [ dependencies = [
"futures-core", "futures-core",
"pin-project-lite 0.2.14", "pin-project-lite 0.2.14",
@ -7895,6 +8185,62 @@ dependencies = [
"winnow 0.6.13", "winnow 0.6.13",
] ]
[[package]]
name = "tonic"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52"
dependencies = [
"async-stream",
"async-trait",
"axum",
"base64 0.22.1",
"bytes",
"h2 0.4.7",
"http 1.2.0",
"http-body 1.0.1",
"http-body-util",
"hyper 1.5.2",
"hyper-timeout",
"hyper-util",
"percent-encoding",
"pin-project 1.1.5",
"prost 0.13.4",
"socket2 0.5.7",
"tokio",
"tokio-stream",
"tower",
"tower-layer",
"tower-service",
"tracing",
]
[[package]]
name = "tower"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
dependencies = [
"futures-core",
"futures-util",
"indexmap 1.9.3",
"pin-project 1.1.5",
"pin-project-lite 0.2.14",
"rand 0.8.5",
"slab",
"tokio",
"tokio-util 0.7.11",
"tower-layer",
"tower-service",
"tracing",
]
[[package]]
name = "tower-layer"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
[[package]] [[package]]
name = "tower-service" name = "tower-service"
version = "0.3.2" version = "0.3.2"
@ -8124,7 +8470,7 @@ dependencies = [
"byteorder", "byteorder",
"bytes", "bytes",
"data-encoding", "data-encoding",
"http", "http 0.2.12",
"httparse", "httparse",
"log", "log",
"rand 0.8.5", "rand 0.8.5",
@ -8922,6 +9268,8 @@ dependencies = [
"chunk_pool", "chunk_pool",
"clap", "clap",
"config", "config",
"console-subscriber",
"contract-wrapper",
"ctrlc", "ctrlc",
"duration-str", "duration-str",
"error-chain", "error-chain",

View File

@ -16,7 +16,7 @@ Across the two lanes, 0G Storage supports the following features:
* **General Purpose Design**: Supports atomic transactions, mutable key-value stores, and archive log systems, enabling a wide range of applications with various data types. * **General Purpose Design**: Supports atomic transactions, mutable key-value stores, and archive log systems, enabling a wide range of applications with various data types.
* **Validated Incentivization**: Utilizes the PoRA (Proof of Random Access) mining algorithm to mitigate the data outsourcing issue and to ensure rewards are distributed to nodes who contribute to the storage network. * **Validated Incentivization**: Utilizes the PoRA (Proof of Random Access) mining algorithm to mitigate the data outsourcing issue and to ensure rewards are distributed to nodes who contribute to the storage network.
For in-depth technical details about 0G Storage, please read our [Intro to 0G Storage](https://docs.0g.ai/og-storage). For in-depth technical details about 0G Storage, please read our [Intro to 0G Storage](https://docs.0g.ai/0g-storage).
## Documentation ## Documentation

View File

@ -0,0 +1,17 @@
[package]
name = "contract-wrapper"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = { version = "1.28", features = ["macros"] }
ethers = "2.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tracing = "0.1.35"
# or `tracing` if you prefer
[features]
dev = []

View File

@ -0,0 +1,204 @@
use ethers::{
abi::Detokenize,
contract::ContractCall,
providers::{Middleware, ProviderError},
types::{TransactionReceipt, U256},
};
use serde::Deserialize;
use std::{sync::Arc, time::Duration};
use tokio::time::sleep;
use tracing::{debug, info};
/// The result of a single submission attempt.
#[derive(Debug)]
pub enum SubmissionAction {
Success(TransactionReceipt),
/// Generic "retry" signal, but we still need to know if it's "mempool/timeout" or something else.
/// We'll parse the error string or have a separate reason in a real app.
Retry(String),
Error(String),
}
/// Configuration for submission retries, gas price, etc.
#[derive(Clone, Copy, Debug, Deserialize)]
pub struct SubmitConfig {
/// If `Some`, use this gas price for the first attempt.
/// If `None`, fetch the current network gas price.
pub(crate) initial_gas_price: Option<U256>,
/// If `Some`, clamp increased gas price to this limit.
/// If `None`, do not bump gas for mempool/timeout errors.
pub(crate) max_gas_price: Option<U256>,
/// Gas limit of the transaction
pub(crate) max_gas: Option<U256>,
/// Factor by which to multiply the gas price on each mempool/timeout error.
/// E.g. if factor=11 => a 10% bump => newGas = (gas * factor) / 10
pub(crate) gas_increase_factor: Option<u64>,
/// The maximum number of gas bumps (for mempool/timeout). If `max_gas_price` is set,
/// we typically rely on clamping. But you can still cap the number of bumps if you want.
pub(crate) max_retries: Option<usize>,
/// Seconds to wait between attempts.
pub(crate) interval_secs: Option<u64>,
}
const DEFAULT_INTERVAL_SECS: u64 = 2;
const DEFAULT_MAX_RETRIES: usize = 5;
impl Default for SubmitConfig {
fn default() -> Self {
Self {
initial_gas_price: None,
max_gas_price: None,
max_gas: None,
gas_increase_factor: Some(11), // implies 10% bump if we do (gas*11)/10
max_retries: Some(DEFAULT_MAX_RETRIES),
interval_secs: Some(DEFAULT_INTERVAL_SECS),
}
}
}
/// A simple function to detect if the retry is from a mempool or timeout error.
/// Right now, we rely on `submit_once` returning `SubmissionAction::Retry` for ANY error
/// that is "retryable," so we must parse the error string from `submit_once`, or
/// store that string. Another approach is to return an enum with a reason from `submit_once`.
fn is_mempool_or_timeout_error(error_str: String) -> bool {
let lower = error_str.to_lowercase();
lower.contains("mempool") || lower.contains("timeout")
}
/// A function that performs a single submission attempt:
/// - Sends the transaction
/// - Awaits the receipt with limited internal retries
/// - Returns a `SubmissionAction` indicating success, retry, or error.
pub async fn submit_once<M, T>(call: ContractCall<M, T>) -> SubmissionAction
where
M: Middleware + 'static,
T: Detokenize,
{
let pending_tx = match call.send().await {
Ok(tx) => tx,
Err(e) => {
let msg = e.to_string();
if is_mempool_or_timeout_error(msg.clone()) {
return SubmissionAction::Retry(format!("mempool/timeout: {:?}", e));
}
debug!("Error sending transaction: {:?}", msg);
return SubmissionAction::Error(format!("Transaction failed: {}", msg));
}
};
debug!("Signed tx hash: {:?}", pending_tx.tx_hash());
let receipt_result = pending_tx.await;
match receipt_result {
Ok(Some(receipt)) => {
info!("Transaction mined, receipt: {:?}", receipt);
SubmissionAction::Success(receipt)
}
Ok(None) => {
debug!("Transaction probably timed out; retrying");
SubmissionAction::Retry("timeout, receipt is none".to_string())
}
Err(ProviderError::HTTPError(e)) => {
debug!("HTTP error retrieving receipt: {:?}", e);
SubmissionAction::Retry(format!("http error: {:?}", e))
}
Err(e) => SubmissionAction::Error(format!("Transaction unrecoverable: {:?}", e)),
}
}
/// Increase gas price using integer arithmetic: (gp * factor_num) / factor_den
fn increase_gas_price_u256(gp: U256, factor_num: u64, factor_den: u64) -> U256 {
let num = U256::from(factor_num);
let den = U256::from(factor_den);
gp.checked_mul(num).unwrap_or(U256::MAX) / den
}
/// A higher-level function that wraps `submit_once` in a gas-priceadjustment loop,
/// plus a global timeout, plus distinct behavior for mempool/timeout vs other errors.
pub async fn submit_with_retry<M, T>(
mut call: ContractCall<M, T>,
config: &SubmitConfig,
middleware: Arc<M>,
) -> Result<TransactionReceipt, String>
where
M: Middleware + 'static,
T: Detokenize,
{
if let Some(max_gas) = config.max_gas {
call = call.gas(max_gas);
}
let mut gas_price = if let Some(gp) = config.initial_gas_price {
gp
} else {
middleware
.get_gas_price()
.await
.map_err(|e| format!("Failed to fetch gas price: {:?}", e))?
};
// If no factor is set, default to 11 => 10% bump
let factor_num = config.gas_increase_factor.unwrap_or(11);
let factor_den = 10u64;
// Two counters: one for gas bumps, one for non-gas retries
let mut non_gas_retries = 0;
let max_retries = config.max_retries.unwrap_or(DEFAULT_MAX_RETRIES);
loop {
// Set gas price on the call
call = call.gas_price(gas_price);
match submit_once(call.clone()).await {
SubmissionAction::Success(receipt) => {
return Ok(receipt);
}
SubmissionAction::Retry(error_str) => {
// We need to figure out if it's "mempool/timeout" or some other reason.
// Right now, we don't have the error string from `submit_once` easily,
// so let's assume we store it or we do a separate function that returns it.
// For simplicity, let's do a hack: let's define a placeholder "error_str" and parse it.
// In reality, you'd likely return `SubmissionAction::Retry(reason_str)` from `submit_once`.
if is_mempool_or_timeout_error(error_str.clone()) {
// Mempool/timeout error
if let Some(max_gp) = config.max_gas_price {
if gas_price >= max_gp {
return Err(format!(
"Exceeded max gas price: {}, with error msg: {}",
max_gp, error_str
));
}
// Bump the gas
let new_price = increase_gas_price_u256(gas_price, factor_num, factor_den);
gas_price = std::cmp::min(new_price, max_gp);
debug!("Bumping gas price to {}", gas_price);
} else {
// No maxGasPrice => we do NOT bump => fail
return Err(
"Mempool/timeout error, no maxGasPrice set => aborting".to_string()
);
}
} else {
// Non-gas error => increment nonGasRetries
non_gas_retries += 1;
if non_gas_retries > max_retries {
return Err(format!("Exceeded non-gas retries: {}", max_retries));
}
debug!(
"Non-gas retry #{} (same gas price: {})",
non_gas_retries, gas_price
);
}
}
SubmissionAction::Error(e) => {
return Err(e);
}
}
// Sleep between attempts
sleep(Duration::from_secs(
config.interval_secs.unwrap_or(DEFAULT_INTERVAL_SECS),
))
.await;
}
}

View File

@ -7,7 +7,7 @@
//! block processing time). //! block processing time).
//! - `IntCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g., //! - `IntCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g.,
//! number of block processing requests). //! number of block processing requests).
//! - `IntGauge`: used to represent an varying integer (e.g., number of attestations per block). //! - `IntGauge`: used to represent a varying integer (e.g., number of attestations per block).
//! //!
//! ## Important //! ## Important
//! //!

View File

@ -6,13 +6,13 @@ ZeroGravity system consists of a data availability layer (0G DA) on top of a dec
Figure 1 illustrates the architecture of the 0G system. When a data block enters the 0G DA, it is first erasure coded and organized into multiple consecutive chunks through erasure coding. The merkle root as a commitment of the encoded data block is then submitted to the consensus layer to keep the order of the data entering the system. The chunks are then dispersed to different storage nodes in 0G Storage where the data may be further replicated to other nodes depending on the storage fee that the user pays. The storage nodes periodically participate the mining process by interacting with the consensus network to accrue rewards from the system. Figure 1 illustrates the architecture of the 0G system. When a data block enters the 0G DA, it is first erasure coded and organized into multiple consecutive chunks through erasure coding. The merkle root as a commitment of the encoded data block is then submitted to the consensus layer to keep the order of the data entering the system. The chunks are then dispersed to different storage nodes in 0G Storage where the data may be further replicated to other nodes depending on the storage fee that the user pays. The storage nodes periodically participate the mining process by interacting with the consensus network to accrue rewards from the system.
<figure><img src="../../.gitbook/assets/zg-storage-architecture.png" alt=""><figcaption><p>Figure 1. The Architecture of 0G System</p></figcaption></figure> <figure><img src="../.gitbook/assets/zg-storage-architecture.png" alt=""><figcaption><p>Figure 1. The Architecture of 0G System</p></figcaption></figure>
## 0G Storage ## 0G Storage
0G Storage employs layered design targeting to support different types of decentralized applications. Figure 2 shows the overview of the full stack layers of 0G Storage. 0G Storage employs layered design targeting to support different types of decentralized applications. Figure 2 shows the overview of the full stack layers of 0G Storage.
<figure><img src="../../.gitbook/assets/zg-storage-layer.png" alt=""><figcaption><p>Figure 2. Full Stack Solution of 0G Storage</p></figcaption></figure> <figure><img src="../.gitbook/assets/zg-storage-layer.png" alt=""><figcaption><p>Figure 2. Full Stack Solution of 0G Storage</p></figcaption></figure>
The lowest is a log layer which is a decentralized system. It consists of multiple storage nodes to form a storage network. The network has built-in incentive mechanism to reward the data storage. The ordering of the uploaded data is guaranteed by a sequencing mechanism to provide a log-based semantics and abstraction. This layer is used to store unstructured raw data for permanent persistency. The lowest is a log layer which is a decentralized system. It consists of multiple storage nodes to form a storage network. The network has built-in incentive mechanism to reward the data storage. The ordering of the uploaded data is guaranteed by a sequencing mechanism to provide a log-based semantics and abstraction. This layer is used to store unstructured raw data for permanent persistency.

View File

@ -30,4 +30,4 @@ Precisely, the mining process has the following steps:
6. For each piece $$\overrightarrow{v}$$, compute the Blake2b hash of the tuple ($$\mathsf{miner\_id}$$, $$\mathsf{nonce}$$, $$\mathsf{context\_digest}$$, $$\mathsf{start\_position}$$, $$\mathsf{mine\_length}$$, $$\overrightarrow{v}$$). 6. For each piece $$\overrightarrow{v}$$, compute the Blake2b hash of the tuple ($$\mathsf{miner\_id}$$, $$\mathsf{nonce}$$, $$\mathsf{context\_digest}$$, $$\mathsf{start\_position}$$, $$\mathsf{mine\_length}$$, $$\overrightarrow{v}$$).
7. If one of Blake2b hash output is smaller than a target value, the miner finds a legitimate PoRA solution. 7. If one of Blake2b hash output is smaller than a target value, the miner finds a legitimate PoRA solution.
<figure><img src="../../../.gitbook/assets/zg-storage-algorithm.png" alt=""><figcaption><p>Figure 1. Recall Position and Scratchpad Computation</p></figcaption></figure> <figure><img src="../../.gitbook/assets/zg-storage-algorithm.png" alt=""><figcaption><p>Figure 1. Recall Position and Scratchpad Computation</p></figcaption></figure>

View File

@ -10,7 +10,7 @@
## Integration ## Integration
We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to easily integrate 0G Storage in their applications with the following features: We provide a [SDK](https://github.com/0glabs/0g-ts-sdk) for users to easily integrate 0G Storage in their applications with the following features:
* File Merkle Tree Class * File Merkle Tree Class
* Flow Contract Types * Flow Contract Types
@ -22,7 +22,7 @@ We provide a [SDK](https://github.com/0glabs/0g-js-storage-sdk) for users to eas
## Deployment ## Deployment
Please refer to [Deployment](../0G%20Storage/doc/install.md) page for detailed steps to compile and start a 0G Storage node. Please refer to [Deployment](run.md) page for detailed steps to compile and start a 0G Storage node.
## Test ## Test

View File

@ -6,4 +6,4 @@ A user-defined function will be used to deserialize the raw content in the log e
When a new key-value node just joins the network, it connects to the log layer and plays the log entries from head to tail to construct the latest state of the key-value store. During the log entry playing, an application-specific key-value node can skip irrelevant log entries which do not contain stream IDs that it cares. When a new key-value node just joins the network, it connects to the log layer and plays the log entries from head to tail to construct the latest state of the key-value store. During the log entry playing, an application-specific key-value node can skip irrelevant log entries which do not contain stream IDs that it cares.
<figure><img src="../../.gitbook/assets/zg-storage-log.png" alt=""><figcaption><p>Figure 1. Decentralized K-V Store</p></figcaption></figure> <figure><img src="../.gitbook/assets/zg-storage-log.png" alt=""><figcaption><p>Figure 1. Decentralized K-V Store</p></figcaption></figure>

View File

@ -5,7 +5,7 @@
## Prerequisites ## Prerequisites
- Requires python version: 3.8, 3.9 or 3.10, higher version is not guaranteed (e.g. failed to install `pysha3`). - Requires python version: 3.8, 3.9 or 3.10, higher version is not guaranteed (e.g. failed to install `pysha3`).
- Installs dependencies under root folder: `pip3 install -r requirements.txt` - Install dependencies under root folder: `pip3 install -r requirements.txt`
## Install Blockchain Nodes ## Install Blockchain Nodes
@ -19,7 +19,7 @@ The blockchain node binaries will be compiled or downloaded from github to `test
## Run Tests ## Run Tests
Changes to the `tests` folder and run the following command to run all tests: Change to the `tests` folder and run the following command to run all tests:
``` ```
python test_all.py python test_all.py

View File

@ -4,7 +4,7 @@
### Setup Environment ### Setup Environment
Install dependencies Node.js, yarn, hardhat. Install the dependencies Node.js, yarn, hardhat.
- Linux - Linux

View File

@ -8,8 +8,8 @@ When an application server linking with the 0G Storage key-value runtime starts
When an application server with the key-value runtime encounters the commit record during playing the log, it identifies a conflict window consisting of all the log entries between the start log position of the transaction and the position of the commit record. The log entries in the conflict window therefore contain the key-value operations concurrent with the transaction submitting the commit record. The runtime further detects whether these concurrent operations contain the updates on the keys belonging to the read set of the transaction. If yes, the transaction is aborted, otherwise committed successfully. When an application server with the key-value runtime encounters the commit record during playing the log, it identifies a conflict window consisting of all the log entries between the start log position of the transaction and the position of the commit record. The log entries in the conflict window therefore contain the key-value operations concurrent with the transaction submitting the commit record. The runtime further detects whether these concurrent operations contain the updates on the keys belonging to the read set of the transaction. If yes, the transaction is aborted, otherwise committed successfully.
<figure><img src="../../.gitbook/assets/zg-storage-transaction.png" alt=""><figcaption><p>Figure 1. Transaction Processing on 0G K-V Store</p></figcaption></figure> <figure><img src="../.gitbook/assets/zg-storage-transaction.png" alt=""><figcaption><p>Figure 1. Transaction Processing on 0G K-V Store</p></figcaption></figure>
## Concurrent Assumption ## Concurrent Assumption
This transaction model assumes that the transaction participants are collaborative and will honestly compose the commit record with the correct content. Although this assumption in a decentralized environment is too strong, it is still achievable for specific applications. For example, for an application like Google Docs, a user normally shares the access to others who can be trusted. In case this assumption cannot hold, the code of the transaction can be stored in the ZeroGravity log and some mechanism of verifiable computation like zero-knowledge proof or hardware with trust execution environment (TEE) can be employed by the transaction executors to detect the validity of the commit record. This transaction model assumes that the transaction participants are collaborative and will honestly compose the commit record with the correct content. Although this assumption in a decentralized environment is too strong, it is still achievable for specific applications. For example, for an application like Google Docs, a user normally shares the access to others who can be trusted. In case this assumption cannot hold, the code of the transaction can be stored in the ZeroGravity log and some mechanism of verifiable computation like zero-knowledge proof or hardware with trusted execution environment (TEE) can be employed by the transaction executors to detect the validity of the commit record.

View File

@ -42,8 +42,13 @@ metrics = { workspace = true }
rust-log = { package = "log", version = "0.4.22" } rust-log = { package = "log", version = "0.4.22" }
tracing-core = "0.1.32" tracing-core = "0.1.32"
tracing-log = "0.2.0" tracing-log = "0.2.0"
console-subscriber = { version = "0.4.1", optional = true }
contract-wrapper = { path = "../common/contract-wrapper" }
[dependencies.libp2p] [dependencies.libp2p]
version = "0.45.1" version = "0.45.1"
default-features = true default-features = true
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
[features]
tokio-console = ["console-subscriber"]

View File

@ -20,7 +20,7 @@ pub struct LogSyncConfig {
// blockchain provider retry params // blockchain provider retry params
// the number of retries after a connection times out // the number of retries after a connection times out
pub rate_limit_retries: u32, pub rate_limit_retries: u32,
// the nubmer of retries for rate limited responses // the number of retries for rate limited responses
pub timeout_retries: u32, pub timeout_retries: u32,
// the duration to wait before retry, in ms // the duration to wait before retry, in ms
pub initial_backoff: u64, pub initial_backoff: u64,

View File

@ -10,6 +10,7 @@ zgs_spec = { path = "../../common/spec" }
zgs_seal = { path = "../../common/zgs_seal" } zgs_seal = { path = "../../common/zgs_seal" }
task_executor = { path = "../../common/task_executor" } task_executor = { path = "../../common/task_executor" }
contract-interface = { path = "../../common/contract-interface" } contract-interface = { path = "../../common/contract-interface" }
contract-wrapper = { path = "../../common/contract-wrapper" }
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
ethereum-types = "0.14" ethereum-types = "0.14"
tokio = { version = "1.19.2", features = ["full"] } tokio = { version = "1.19.2", features = ["full"] }

View File

@ -2,7 +2,8 @@ use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use ethereum_types::{Address, H256, U256}; use contract_wrapper::SubmitConfig;
use ethereum_types::{Address, H256};
use ethers::core::k256::SecretKey; use ethers::core::k256::SecretKey;
use ethers::middleware::SignerMiddleware; use ethers::middleware::SignerMiddleware;
use ethers::providers::Http; use ethers::providers::Http;
@ -21,7 +22,6 @@ pub struct MinerConfig {
pub(crate) rpc_endpoint_url: String, pub(crate) rpc_endpoint_url: String,
pub(crate) mine_address: Address, pub(crate) mine_address: Address,
pub(crate) flow_address: Address, pub(crate) flow_address: Address,
pub(crate) submission_gas: Option<U256>,
pub(crate) cpu_percentage: u64, pub(crate) cpu_percentage: u64,
pub(crate) iter_batch: usize, pub(crate) iter_batch: usize,
pub(crate) shard_config: ShardConfig, pub(crate) shard_config: ShardConfig,
@ -29,6 +29,7 @@ pub struct MinerConfig {
pub(crate) rate_limit_retries: u32, pub(crate) rate_limit_retries: u32,
pub(crate) timeout_retries: u32, pub(crate) timeout_retries: u32,
pub(crate) initial_backoff: u64, pub(crate) initial_backoff: u64,
pub(crate) submission_config: SubmitConfig,
} }
pub type MineServiceMiddleware = SignerMiddleware<Arc<Provider<RetryClient<Http>>>, LocalWallet>; pub type MineServiceMiddleware = SignerMiddleware<Arc<Provider<RetryClient<Http>>>, LocalWallet>;
@ -41,7 +42,6 @@ impl MinerConfig {
rpc_endpoint_url: String, rpc_endpoint_url: String,
mine_address: Address, mine_address: Address,
flow_address: Address, flow_address: Address,
submission_gas: Option<U256>,
cpu_percentage: u64, cpu_percentage: u64,
iter_batch: usize, iter_batch: usize,
context_query_seconds: u64, context_query_seconds: u64,
@ -49,6 +49,7 @@ impl MinerConfig {
rate_limit_retries: u32, rate_limit_retries: u32,
timeout_retries: u32, timeout_retries: u32,
initial_backoff: u64, initial_backoff: u64,
submission_config: SubmitConfig,
) -> Option<MinerConfig> { ) -> Option<MinerConfig> {
miner_key.map(|miner_key| MinerConfig { miner_key.map(|miner_key| MinerConfig {
miner_id, miner_id,
@ -56,7 +57,6 @@ impl MinerConfig {
rpc_endpoint_url, rpc_endpoint_url,
mine_address, mine_address,
flow_address, flow_address,
submission_gas,
cpu_percentage, cpu_percentage,
iter_batch, iter_batch,
shard_config, shard_config,
@ -64,6 +64,7 @@ impl MinerConfig {
rate_limit_retries, rate_limit_retries,
timeout_retries, timeout_retries,
initial_backoff, initial_backoff,
submission_config,
}) })
} }

View File

@ -57,7 +57,7 @@ pub(crate) async fn check_and_request_miner_id(
} }
(None, None) => { (None, None) => {
let beneficiary = provider.address(); let beneficiary = provider.address();
let id = request_miner_id(&mine_contract, beneficiary).await?; let id = request_miner_id(config, &mine_contract, beneficiary).await?;
set_miner_id(store, &id) set_miner_id(store, &id)
.await .await
.map_err(|e| format!("set miner id on db corrupt: {:?}", e))?; .map_err(|e| format!("set miner id on db corrupt: {:?}", e))?;
@ -86,6 +86,7 @@ async fn check_miner_id(
} }
async fn request_miner_id( async fn request_miner_id(
config: &MinerConfig,
mine_contract: &PoraMine<MineServiceMiddleware>, mine_contract: &PoraMine<MineServiceMiddleware>,
beneficiary: Address, beneficiary: Address,
) -> Result<H256, String> { ) -> Result<H256, String> {
@ -94,16 +95,13 @@ async fn request_miner_id(
let submission_call: ContractCall<_, _> = let submission_call: ContractCall<_, _> =
mine_contract.request_miner_id(beneficiary, 0).legacy(); mine_contract.request_miner_id(beneficiary, 0).legacy();
let pending_tx = submission_call let receipt = contract_wrapper::submit_with_retry(
.send() submission_call,
&config.submission_config,
mine_contract.client().clone(),
)
.await .await
.map_err(|e| format!("Fail to request miner id: {:?}", e))?; .map_err(|e| format!("Fail to submit miner id request: {:?}", e))?;
let receipt = pending_tx
.retries(3)
.await
.map_err(|e| format!("Fail to execute mine answer transaction: {:?}", e))?
.ok_or("Request miner id transaction dropped after 3 retries")?;
let first_log = receipt let first_log = receipt
.logs .logs

View File

@ -1,13 +1,11 @@
use contract_interface::PoraAnswer; use contract_interface::PoraAnswer;
use contract_interface::{PoraMine, ZgsFlow}; use contract_interface::{PoraMine, ZgsFlow};
use ethereum_types::U256; use contract_wrapper::SubmitConfig;
use ethers::contract::ContractCall; use ethers::contract::ContractCall;
use ethers::prelude::{Http, Provider, RetryClient}; use ethers::prelude::{Http, Provider, RetryClient};
use ethers::providers::PendingTransaction;
use hex::ToHex; use hex::ToHex;
use shared_types::FlowRangeProof; use shared_types::FlowRangeProof;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use storage::H256; use storage::H256;
use storage_async::Store; use storage_async::Store;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
@ -19,15 +17,13 @@ use crate::watcher::MineContextMessage;
use zgs_spec::{BYTES_PER_SEAL, SECTORS_PER_SEAL}; use zgs_spec::{BYTES_PER_SEAL, SECTORS_PER_SEAL};
const SUBMISSION_RETRIES: usize = 15;
pub struct Submitter { pub struct Submitter {
mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>, mine_answer_receiver: mpsc::UnboundedReceiver<AnswerWithoutProof>,
mine_context_receiver: broadcast::Receiver<MineContextMessage>, mine_context_receiver: broadcast::Receiver<MineContextMessage>,
mine_contract: PoraMine<MineServiceMiddleware>, mine_contract: PoraMine<MineServiceMiddleware>,
flow_contract: ZgsFlow<Provider<RetryClient<Http>>>, flow_contract: ZgsFlow<Provider<RetryClient<Http>>>,
default_gas_limit: Option<U256>,
store: Arc<Store>, store: Arc<Store>,
config: SubmitConfig,
} }
impl Submitter { impl Submitter {
@ -41,8 +37,7 @@ impl Submitter {
config: &MinerConfig, config: &MinerConfig,
) { ) {
let mine_contract = PoraMine::new(config.mine_address, signing_provider); let mine_contract = PoraMine::new(config.mine_address, signing_provider);
let flow_contract = ZgsFlow::new(config.flow_address, provider); let flow_contract = ZgsFlow::new(config.flow_address, provider.clone());
let default_gas_limit = config.submission_gas;
let submitter = Submitter { let submitter = Submitter {
mine_answer_receiver, mine_answer_receiver,
@ -50,7 +45,7 @@ impl Submitter {
mine_contract, mine_contract,
flow_contract, flow_contract,
store, store,
default_gas_limit, config: config.submission_config,
}; };
executor.spawn( executor.spawn(
async move { Box::pin(submitter.start()).await }, async move { Box::pin(submitter.start()).await },
@ -134,11 +129,7 @@ impl Submitter {
}; };
trace!("submit_answer: answer={:?}", answer); trace!("submit_answer: answer={:?}", answer);
let mut submission_call: ContractCall<_, _> = self.mine_contract.submit(answer).legacy(); let submission_call: ContractCall<_, _> = self.mine_contract.submit(answer).legacy();
if let Some(gas_limit) = self.default_gas_limit {
submission_call = submission_call.gas(gas_limit);
}
if let Some(calldata) = submission_call.calldata() { if let Some(calldata) = submission_call.calldata() {
debug!( debug!(
@ -153,27 +144,13 @@ impl Submitter {
submission_call.estimate_gas().await submission_call.estimate_gas().await
); );
let pending_transaction: PendingTransaction<'_, _> = submission_call contract_wrapper::submit_with_retry(
.send() submission_call,
&self.config,
self.mine_contract.client().clone(),
)
.await .await
.map_err(|e| format!("Fail to send PoRA submission transaction: {:?}", e))?; .map_err(|e| format!("Failed to submit mine answer: {:?}", e))?;
debug!(
"Signed submission transaction hash: {:?}",
pending_transaction.tx_hash()
);
let receipt = pending_transaction
.retries(SUBMISSION_RETRIES)
.interval(Duration::from_secs(2))
.await
.map_err(|e| format!("Fail to execute PoRA submission transaction: {:?}", e))?
.ok_or(format!(
"PoRA submission transaction dropped after {} retries",
SUBMISSION_RETRIES
))?;
info!("Submit PoRA success, receipt: {:?}", receipt);
Ok(()) Ok(())
} }

View File

@ -740,7 +740,7 @@ where
&error, &error,
ConnectionDirection::Outgoing, ConnectionDirection::Outgoing,
); );
// inform failures of requests comming outside the behaviour // inform failures of requests coming outside the behaviour
if let RequestId::Application(id) = id { if let RequestId::Application(id) = id {
self.add_event(BehaviourEvent::RPCFailed { peer_id, id }); self.add_event(BehaviourEvent::RPCFailed { peer_id, id });
} }

View File

@ -103,7 +103,7 @@ pub struct Config {
/// Subscribe to all subnets for the duration of the runtime. /// Subscribe to all subnets for the duration of the runtime.
pub subscribe_all_subnets: bool, pub subscribe_all_subnets: bool,
/// Import/aggregate all attestations recieved on subscribed subnets for the duration of the /// Import/aggregate all attestations received on subscribed subnets for the duration of the
/// runtime. /// runtime.
pub import_all_attestations: bool, pub import_all_attestations: bool,

View File

@ -54,7 +54,7 @@ impl Service {
struct Ev(PeerManagerEvent); struct Ev(PeerManagerEvent);
impl From<void::Void> for Ev { impl From<void::Void> for Ev {
fn from(_: void::Void) -> Self { fn from(_: void::Void) -> Self {
unreachable!("No events are emmited") unreachable!("No events are emitted")
} }
} }
impl From<PeerManagerEvent> for Ev { impl From<PeerManagerEvent> for Ev {

View File

@ -1,4 +1,4 @@
//! This crate aims to provide a common set of tools that can be used to create a "environment" to //! This crate aims to provide a common set of tools that can be used to create an "environment" to
//! run Zgs services. This allows for the unification of creating tokio runtimes, etc. //! run Zgs services. This allows for the unification of creating tokio runtimes, etc.
//! //!
//! The idea is that the main thread creates an `Environment`, which is then used to spawn a //! The idea is that the main thread creates an `Environment`, which is then used to spawn a

View File

@ -1,7 +1,7 @@
#![allow(clippy::field_reassign_with_default)] #![allow(clippy::field_reassign_with_default)]
use crate::ZgsConfig; use crate::ZgsConfig;
use ethereum_types::{H256, U256}; use ethereum_types::H256;
use ethers::prelude::{Http, Middleware, Provider}; use ethers::prelude::{Http, Middleware, Provider};
use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig}; use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig};
use miner::MinerConfig; use miner::MinerConfig;
@ -179,7 +179,6 @@ impl ZgsConfig {
} else { } else {
None None
}; };
let submission_gas = self.miner_submission_gas.map(U256::from);
let cpu_percentage = self.miner_cpu_percentage; let cpu_percentage = self.miner_cpu_percentage;
let iter_batch = self.mine_iter_batch_size; let iter_batch = self.mine_iter_batch_size;
let context_query_seconds = self.mine_context_query_seconds; let context_query_seconds = self.mine_context_query_seconds;
@ -192,7 +191,6 @@ impl ZgsConfig {
self.blockchain_rpc_endpoint.clone(), self.blockchain_rpc_endpoint.clone(),
mine_address, mine_address,
flow_address, flow_address,
submission_gas,
cpu_percentage, cpu_percentage,
iter_batch, iter_batch,
context_query_seconds, context_query_seconds,
@ -200,6 +198,7 @@ impl ZgsConfig {
self.rate_limit_retries, self.rate_limit_retries,
self.timeout_retries, self.timeout_retries,
self.initial_backoff, self.initial_backoff,
self.submission_config,
)) ))
} }

View File

@ -74,7 +74,6 @@ build_config! {
(mine_contract_address, (String), "".to_string()) (mine_contract_address, (String), "".to_string())
(miner_id, (Option<String>), None) (miner_id, (Option<String>), None)
(miner_key, (Option<String>), None) (miner_key, (Option<String>), None)
(miner_submission_gas, (Option<u64>), None)
(miner_cpu_percentage, (u64), 100) (miner_cpu_percentage, (u64), 100)
(mine_iter_batch_size, (usize), 100) (mine_iter_batch_size, (usize), 100)
(reward_contract_address, (String), "".to_string()) (reward_contract_address, (String), "".to_string())
@ -106,6 +105,9 @@ pub struct ZgsConfig {
// rpc config, configured by [rpc] section by `config` crate. // rpc config, configured by [rpc] section by `config` crate.
pub rpc: rpc::RPCConfig, pub rpc: rpc::RPCConfig,
// submission config, configured by [submission_config] section by `config` crate.
pub submission_config: contract_wrapper::SubmitConfig,
// metrics config, configured by [metrics] section by `config` crate. // metrics config, configured by [metrics] section by `config` crate.
pub metrics: metrics::MetricsConfiguration, pub metrics: metrics::MetricsConfiguration,
} }

View File

@ -1,7 +1,8 @@
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tracing::Level;
use tracing_log::AsLog; use tracing_log::AsLog;
use tracing_subscriber::EnvFilter; use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{EnvFilter, Layer};
const LOG_RELOAD_PERIOD_SEC: u64 = 30; const LOG_RELOAD_PERIOD_SEC: u64 = 30;
@ -15,19 +16,26 @@ pub fn configure(log_level_file: &str, log_directory: &str, executor: TaskExecut
.unwrap_or_default() .unwrap_or_default()
.trim_end() .trim_end()
.to_string(); .to_string();
let filter = EnvFilter::try_new(config.clone()).expect("invalid log level");
let (filter, reload_handle) = tracing_subscriber::reload::Layer::new(filter);
let builder = tracing_subscriber::fmt() let fmt_layer = tracing_subscriber::fmt::layer()
.with_max_level(Level::TRACE)
.with_env_filter(EnvFilter::try_new(config.clone()).expect("invalid log level"))
.with_writer(non_blocking) .with_writer(non_blocking)
.with_ansi(false) .with_ansi(false)
.compact()
.with_filter(filter);
// .with_file(true) // .with_file(true)
// .with_line_number(true) // .with_line_number(true)
// .with_thread_names(true) // .with_thread_names(true)
.with_filter_reloading(); let subscriber = tracing_subscriber::registry().with(fmt_layer);
#[cfg(feature = "tokio-console")]
let handle = builder.reload_handle(); {
builder.init(); subscriber.with(console_subscriber::spawn()).init();
}
#[cfg(not(feature = "tokio-console"))]
{
subscriber.init();
}
// periodically check for config changes // periodically check for config changes
executor.spawn( executor.spawn(
@ -57,7 +65,7 @@ pub fn configure(log_level_file: &str, log_directory: &str, executor: TaskExecut
println!("Updating log config to {:?}", new_config); println!("Updating log config to {:?}", new_config);
match handle.reload(&new_config) { match reload_handle.reload(&new_config) {
Ok(()) => { Ok(()) => {
rust_log::set_max_level(tracing_core::LevelFilter::current().as_log()); rust_log::set_max_level(tracing_core::LevelFilter::current().as_log());
config = new_config config = new_config

View File

@ -238,7 +238,7 @@ auto_sync_enabled = true
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
# sync_file_by_rpc_enabled = true # sync_file_by_rpc_enabled = true
# Maximum number of continous failures to terminate a file sync. # Maximum number of continuous failures to terminate a file sync.
# max_request_failures = 3 # max_request_failures = 3
# Timeout to dial peers. # Timeout to dial peers.

View File

@ -250,7 +250,7 @@ auto_sync_enabled = true
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
# sync_file_by_rpc_enabled = true # sync_file_by_rpc_enabled = true
# Maximum number of continous failures to terminate a file sync. # Maximum number of continuous failures to terminate a file sync.
# max_request_failures = 3 # max_request_failures = 3
# Timeout to dial peers. # Timeout to dial peers.

View File

@ -252,7 +252,7 @@
# Enable to start a file sync via RPC (e.g. `admin_startSyncFile`). # Enable to start a file sync via RPC (e.g. `admin_startSyncFile`).
# sync_file_by_rpc_enabled = true # sync_file_by_rpc_enabled = true
# Maximum number of continous failures to terminate a file sync. # Maximum number of continuous failures to terminate a file sync.
# max_request_failures = 3 # max_request_failures = 3
# Timeout to dial peers. # Timeout to dial peers.

View File

@ -1,12 +1,11 @@
set -e set -e
artifacts_path="$1" artifacts_path="$1"
check_abis() { check_abis() {
for contract_name in "$@"; do for contract_name in "$@"; do
diff $(./scripts/search_abi.sh "$artifacts_path" "$contract_name.json") "storage-contracts-abis/$contract_name.json" diff "$(./scripts/search_abi.sh "$artifacts_path" "$contract_name.json")" "storage-contracts-abis/$contract_name.json"
done done
} }
check_abis DummyMarket DummyReward Flow PoraMine PoraMineTest FixedPrice ChunkLinearReward FixedPriceFlow
check_abis DummyMarket DummyReward Flow PoraMine PoraMineTest FixedPrice ChunkLinearReward FixedPriceFlow

View File

@ -10,22 +10,22 @@ PUBLIC_IP=$(curl -s https://ipinfo.io/ip)
FILE=run/config.toml FILE=run/config.toml
# enable sync # enable sync
sed -in-place='' 's/# \[sync\]/\[sync\]/g' $FILE sed -i 's/# \[sync\]/\[sync\]/g' $FILE
# enable auto_sync # enable auto_sync
sed -in-place='' 's/# auto_sync_enabled = false/auto_sync_enabled = true/g' $FILE sed -i 's/# auto_sync_enabled = false/auto_sync_enabled = true/g' $FILE
# reduce timeout for finding peers # reduce timeout for finding peers
sed -in-place='' 's/# find_peer_timeout = .*/find_peer_timeout = "10s"/g' $FILE sed -i 's/# find_peer_timeout = .*/find_peer_timeout = "10s"/g' $FILE
# set public ip # set public ip
sed -in-place='' "s/# network_enr_address = .*/network_enr_address = \"$PUBLIC_IP\"/g" $FILE sed -i "s/# network_enr_address = .*/network_enr_address = \"$PUBLIC_IP\"/g" $FILE
# set miner key # set miner key
sed -in-place='' "s/miner_key = \"\"/miner_key = \"$MINER_KEY\"/g" $FILE sed -i "s/miner_key = \"\"/miner_key = \"$MINER_KEY\"/g" $FILE
# set miner contract address # set miner contract address
sed -in-place='' "s/mine_contract_address = .*/mine_contract_address = \"$MINE_CONTRACT\"/g" $FILE sed -i "s/mine_contract_address = .*/mine_contract_address = \"$MINE_CONTRACT\"/g" $FILE
# set blockchain rpc endpoint # set blockchain rpc endpoint
sed -in-place='' "s|blockchain_rpc_endpoint = .*|blockchain_rpc_endpoint = \"$BLOCKCHAIN_RPC\"|g" $FILE sed -i "s|blockchain_rpc_endpoint = .*|blockchain_rpc_endpoint = \"$BLOCKCHAIN_RPC\"|g" $FILE
# set flow contract address # set flow contract address
sed -in-place='' "s/log_contract_address = .*/log_contract_address = \"$FLOW_CONTRACT\"/g" $FILE sed -i "s/log_contract_address = .*/log_contract_address = \"$FLOW_CONTRACT\"/g" $FILE
# set contract deployed block number # set contract deployed block number
sed -in-place='' "s/log_sync_start_block_number = .*/log_sync_start_block_number = $BLOCK_NUMBER/g" $FILE sed -i "s/log_sync_start_block_number = .*/log_sync_start_block_number = $BLOCK_NUMBER/g" $FILE
# update the boot node ids # update the boot node ids
sed -in-place='' 's|network_boot_nodes = .*|network_boot_nodes = ["/ip4/54.219.26.22/udp/1234/p2p/16Uiu2HAmTVDGNhkHD98zDnJxQWu3i1FL1aFYeh9wiQTNu4pDCgps","/ip4/52.52.127.117/udp/1234/p2p/16Uiu2HAkzRjxK2gorngB1Xq84qDrT4hSVznYDHj6BkbaE4SGx9oS","/ip4/18.167.69.68/udp/1234/p2p/16Uiu2HAm2k6ua2mGgvZ8rTMV8GhpW71aVzkQWy7D37TTDuLCpgmX"]|g' $FILE sed -i 's|network_boot_nodes = .*|network_boot_nodes = ["/ip4/54.219.26.22/udp/1234/p2p/16Uiu2HAmTVDGNhkHD98zDnJxQWu3i1FL1aFYeh9wiQTNu4pDCgps","/ip4/52.52.127.117/udp/1234/p2p/16Uiu2HAkzRjxK2gorngB1Xq84qDrT4hSVznYDHj6BkbaE4SGx9oS","/ip4/18.167.69.68/udp/1234/p2p/16Uiu2HAm2k6ua2mGgvZ8rTMV8GhpW71aVzkQWy7D37TTDuLCpgmX"]|g' $FILE

View File

@ -20,12 +20,12 @@ This is a rust implementation of the [Discovery v5](https://github.com/ethereum/
peer discovery protocol. peer discovery protocol.
Discovery v5 is a protocol designed for encrypted peer discovery and topic advertisement. Each peer/node Discovery v5 is a protocol designed for encrypted peer discovery and topic advertisement. Each peer/node
on the network is identified via it's `ENR` ([Ethereum Node on the network is identified via its `ENR` ([Ethereum Node
Record](https://eips.ethereum.org/EIPS/eip-778)), which is essentially a signed key-value store Record](https://eips.ethereum.org/EIPS/eip-778)), which is essentially a signed key-value store
containing the node's public key and optionally IP address and port. containing the node's public key and optionally IP address and port.
Discv5 employs a kademlia-like routing table to store and manage discovered peers and topics. The Discv5 employs a kademlia-like routing table to store and manage discovered peers and topics. The
protocol allows for external IP discovery in NAT environments through regular PING/PONG's with protocol allows for external IP discovery in NAT environments through regular PING/PONGs with
discovered nodes. Nodes return the external IP address that they have received and a simple discovered nodes. Nodes return the external IP address that they have received and a simple
majority is chosen as our external IP address. If an external IP address is updated, this is majority is chosen as our external IP address. If an external IP address is updated, this is
produced as an event to notify the swarm (if one is used for this behaviour). produced as an event to notify the swarm (if one is used for this behaviour).

View File

@ -736,7 +736,7 @@ enum ClosestBucketsIterState {
/// The starting state of the iterator yields the first bucket index and /// The starting state of the iterator yields the first bucket index and
/// then transitions to `ZoomIn`. /// then transitions to `ZoomIn`.
Start(BucketIndex), Start(BucketIndex),
/// The iterator "zooms in" to to yield the next bucket containing nodes that /// The iterator "zooms in" to yield the next bucket containing nodes that
/// are incrementally closer to the local node but further from the `target`. /// are incrementally closer to the local node but further from the `target`.
/// These buckets are identified by a `1` in the corresponding bit position /// These buckets are identified by a `1` in the corresponding bit position
/// of the distance bit string. When bucket `0` is reached, the iterator /// of the distance bit string. When bucket `0` is reached, the iterator

View File

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2020 Age Manning Copyright (c) 2025 Age Manning
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -42,9 +42,9 @@ pub trait EnrKey: Send + Sync + Unpin + 'static {
/// Returns the public key associated with current key pair. /// Returns the public key associated with current key pair.
fn public(&self) -> Self::PublicKey; fn public(&self) -> Self::PublicKey;
/// Provides a method to decode a raw public key from an ENR `BTreeMap` to a useable public key. /// Provides a method to decode a raw public key from an ENR `BTreeMap` to a usable public key.
/// ///
/// This method allows a key type to decode the raw bytes in an ENR to a useable /// This method allows a key type to decode the raw bytes in an ENR to a usable
/// `EnrPublicKey`. It takes the ENR's `BTreeMap` and returns a public key. /// `EnrPublicKey`. It takes the ENR's `BTreeMap` and returns a public key.
/// ///
/// Note: This specifies the supported key schemes for an ENR. /// Note: This specifies the supported key schemes for an ENR.

View File

@ -1195,7 +1195,7 @@ mod tests {
assert_eq!(enr.tcp4(), Some(tcp)); assert_eq!(enr.tcp4(), Some(tcp));
assert!(enr.verify()); assert!(enr.verify());
// Compare the encoding as the key itself can be differnet // Compare the encoding as the key itself can be different
assert_eq!(enr.public_key().encode(), key.public().encode(),); assert_eq!(enr.public_key().encode(), key.public().encode(),);
} }

View File

@ -406,7 +406,7 @@ impl_for_vec!(SmallVec<[T; 8]>, Some(8));
/// Decodes `bytes` as if it were a list of variable-length items. /// Decodes `bytes` as if it were a list of variable-length items.
/// ///
/// The `ssz::SszDecoder` can also perform this functionality, however it it significantly faster /// The `ssz::SszDecoder` can also perform this functionality, however it is significantly faster
/// as it is optimized to read same-typed items whilst `ssz::SszDecoder` supports reading items of /// as it is optimized to read same-typed items whilst `ssz::SszDecoder` supports reading items of
/// differing types. /// differing types.
pub fn decode_list_of_variable_length_items<T: Decode>( pub fn decode_list_of_variable_length_items<T: Decode>(