mirror of
https://github.com/0glabs/0g-chain.git
synced 2025-04-04 15:55:23 +00:00
Compare commits
339 Commits
14401bd2fb
...
a1dc4a4a77
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a1dc4a4a77 | ||
![]() |
840deea660 | ||
![]() |
6c3360f102 | ||
![]() |
80b2dacbc2 | ||
![]() |
1152537679 | ||
![]() |
5bd6ac39ee | ||
![]() |
e0fcd07a08 | ||
![]() |
70ac592012 | ||
![]() |
de22587a5b | ||
![]() |
0c02c27a9d | ||
![]() |
4409bfc996 | ||
![]() |
723241f484 | ||
![]() |
84d1a89bec | ||
![]() |
94ddf20305 | ||
![]() |
4ebbb886bf | ||
![]() |
57943ec0e0 | ||
![]() |
04ce67f6a9 | ||
![]() |
500e66733d | ||
![]() |
8b691e61f8 | ||
![]() |
a0bdd2a142 | ||
![]() |
53dcea2867 | ||
![]() |
d31a599c60 | ||
![]() |
07cf4ad258 | ||
![]() |
cb4e6e006e | ||
![]() |
0e37d518ec | ||
![]() |
822e374be6 | ||
![]() |
9ca8359202 | ||
![]() |
32bcc7f4e3 | ||
![]() |
f50a429527 | ||
![]() |
8ff2277450 | ||
![]() |
cdf029c87a | ||
![]() |
5f9325c2a0 | ||
![]() |
5f4f1851cb | ||
![]() |
4c28427089 | ||
![]() |
0f40b721ee | ||
![]() |
ec3733a2c6 | ||
![]() |
8df7625ac1 | ||
![]() |
31c96eeb93 | ||
![]() |
ac1af4ae92 | ||
![]() |
0d54bb9202 | ||
![]() |
73158cd738 | ||
![]() |
73b7d800a3 | ||
![]() |
27ddc91956 | ||
![]() |
9962b7b0db | ||
![]() |
f415fb1332 | ||
![]() |
28b9c07e02 | ||
![]() |
45b7920181 | ||
![]() |
56d337df16 | ||
![]() |
85059d734e | ||
![]() |
6b4e8415da | ||
![]() |
77b817f9b8 | ||
![]() |
46378d6157 | ||
![]() |
d0721fd172 | ||
![]() |
5e34f5b289 | ||
![]() |
d6bca1b221 | ||
![]() |
8dc89ad08d | ||
![]() |
e4989f10cd | ||
![]() |
9839a244bf | ||
![]() |
c9043ca158 | ||
![]() |
8d48dadb02 | ||
![]() |
c80be7bbf7 | ||
![]() |
17fa02b554 | ||
![]() |
e348bd3748 | ||
![]() |
f44d7cc94d | ||
![]() |
0bfbd114c9 | ||
![]() |
483a939724 | ||
![]() |
547b0057c7 | ||
![]() |
1da9745903 | ||
![]() |
e952a4a705 | ||
![]() |
69a4a6298e | ||
![]() |
d05c2f9563 | ||
![]() |
82f54a1974 | ||
![]() |
3f1140dcd4 | ||
![]() |
849c95d93e | ||
![]() |
eee50a3f75 | ||
![]() |
1d2820a3b6 | ||
![]() |
950e4766d2 | ||
![]() |
91698d388f | ||
![]() |
4cf57457a7 | ||
![]() |
337f1c5cc8 | ||
![]() |
a437523ea2 | ||
![]() |
77ec52e16b | ||
![]() |
b1365fb792 | ||
![]() |
d61f4e94fd | ||
![]() |
8bc3b15c46 | ||
![]() |
e8008c9a3a | ||
![]() |
28fa4b7993 | ||
![]() |
bd0acdbd4b | ||
![]() |
7f62518464 | ||
![]() |
0b4c5da294 | ||
![]() |
ad93042155 | ||
![]() |
a7dd451e44 | ||
![]() |
c99879e9f7 | ||
![]() |
820a676709 | ||
![]() |
27feb30bb9 | ||
![]() |
17bd9a6c71 | ||
![]() |
c172fb3c55 | ||
![]() |
0eb947b594 | ||
![]() |
a2746657a1 | ||
![]() |
493ce0516f | ||
![]() |
65d091d458 | ||
![]() |
8023be0067 | ||
![]() |
eaacd83de5 | ||
![]() |
6862cde560 | ||
![]() |
b8e6e584b8 | ||
![]() |
27d63f157c | ||
![]() |
7aede3390d | ||
![]() |
49f7be8486 | ||
![]() |
fbce24abef | ||
![]() |
7e50ce8142 | ||
![]() |
43dd1a7c41 | ||
![]() |
72d30dde8a | ||
![]() |
c18ca45188 | ||
![]() |
f50d847c4f | ||
![]() |
568ff70ad7 | ||
![]() |
1355bd6ab1 | ||
![]() |
ceb4d774ff | ||
![]() |
ab3cf7c994 | ||
![]() |
8bd14a6c00 | ||
![]() |
78caabebe1 | ||
![]() |
f4408080e6 | ||
![]() |
1d2504b085 | ||
![]() |
33932e8ad6 | ||
![]() |
ab10ce628c | ||
![]() |
edf2935f31 | ||
![]() |
a4583be44b | ||
![]() |
3c4d91a443 | ||
![]() |
774e2efce8 | ||
![]() |
30728b75e9 | ||
![]() |
e822edfe8d | ||
![]() |
d2406d4efd | ||
![]() |
e8e2e3abd3 | ||
![]() |
2da75122b3 | ||
![]() |
ed5646b71a | ||
![]() |
4aa67c9efb | ||
![]() |
bd3d947e9b | ||
![]() |
a8c86f7f8b | ||
![]() |
058d9f2285 | ||
![]() |
3709a23632 | ||
![]() |
04dfd2a2e9 | ||
![]() |
932664efac | ||
![]() |
c949c06fce | ||
![]() |
14ca626365 | ||
![]() |
008b421fd2 | ||
![]() |
ac1fd4360d | ||
![]() |
d35b277cab | ||
![]() |
2c248aff18 | ||
![]() |
4ab0d3ee27 | ||
![]() |
8d761147a2 | ||
![]() |
4917eb5976 | ||
![]() |
f8d5f29078 | ||
![]() |
efee71e2e6 | ||
![]() |
6190839ddc | ||
![]() |
c1efdaa507 | ||
![]() |
62c5eaf515 | ||
![]() |
48c349c127 | ||
![]() |
4b09c6cd37 | ||
![]() |
02e96e6424 | ||
![]() |
b3a8343a19 | ||
![]() |
c6e4563cac | ||
![]() |
7d4828f415 | ||
![]() |
1fbf607360 | ||
![]() |
154dd509ee | ||
![]() |
986172d3a7 | ||
![]() |
4fabd4d011 | ||
![]() |
e25cc5f531 | ||
![]() |
6202424c27 | ||
![]() |
c7ed82b4f4 | ||
![]() |
a3f3aaaecc | ||
![]() |
701a0ba97e | ||
![]() |
1e0194262d | ||
![]() |
e3e47e5e2f | ||
![]() |
93cceff23c | ||
![]() |
1680cd6b32 | ||
![]() |
284181edc9 | ||
![]() |
422e940c28 | ||
![]() |
bb5d5130cf | ||
![]() |
b53783447b | ||
![]() |
6f2b402294 | ||
![]() |
82139161be | ||
![]() |
4798eea3ff | ||
![]() |
ee01ac7a7b | ||
![]() |
47cee39c64 | ||
![]() |
521f558f5d | ||
![]() |
adb09a7c82 | ||
![]() |
3da66a87e6 | ||
![]() |
19a202669a | ||
![]() |
eaf81e9465 | ||
![]() |
817a8a151a | ||
![]() |
78114aed73 | ||
![]() |
34a76200f0 | ||
![]() |
fe8c36f891 | ||
![]() |
8357cc2191 | ||
![]() |
f8e102fbd5 | ||
![]() |
d1c3f36bbe | ||
![]() |
89d3829646 | ||
![]() |
ffad9dbdd5 | ||
![]() |
0bbaeb0393 | ||
![]() |
cc4f72b165 | ||
![]() |
6a197a5db5 | ||
![]() |
e787cd052e | ||
![]() |
2454c94596 | ||
![]() |
454733f55b | ||
![]() |
14e1e3a7d4 | ||
![]() |
ca3ab93657 | ||
![]() |
c8bf4644c1 | ||
![]() |
272f82ec99 | ||
![]() |
e198eeb3b4 | ||
![]() |
bbfaa54ddf | ||
![]() |
4e66a56208 | ||
![]() |
9c629ad113 | ||
![]() |
b0d737d354 | ||
![]() |
a8df31b31a | ||
![]() |
6243944db6 | ||
![]() |
7f339d20ca | ||
![]() |
916ec6d30c | ||
![]() |
b4c04656ab | ||
![]() |
837e57ec2e | ||
![]() |
5f802fcfbd | ||
![]() |
f229afce1a | ||
![]() |
608f70b20a | ||
![]() |
74f76d125c | ||
![]() |
3853e276a6 | ||
![]() |
7aef2f09e9 | ||
![]() |
58d7c89f8e | ||
![]() |
d2d661276e | ||
![]() |
9de9de671e | ||
![]() |
ce6aac3a72 | ||
![]() |
23ce7d8169 | ||
![]() |
60a8073574 | ||
![]() |
2d07988994 | ||
![]() |
6a9eda8634 | ||
![]() |
4788c064bf | ||
![]() |
1743cf5275 | ||
![]() |
9aef8e4971 | ||
![]() |
38230d35e3 | ||
![]() |
af5eea690b | ||
![]() |
1c1db357f5 | ||
![]() |
409841c79c | ||
![]() |
4c3f6533a0 | ||
![]() |
e1bd6ffa2f | ||
![]() |
5b0e7c8c58 | ||
![]() |
8d85c1ae1e | ||
![]() |
80f2370d68 | ||
![]() |
16233d6031 | ||
![]() |
828f17897e | ||
![]() |
a79d852d1c | ||
![]() |
0306bec0ae | ||
![]() |
5c51530b8e | ||
![]() |
21dc0e21b3 | ||
![]() |
8d07d9cb3b | ||
![]() |
e7cc89a642 | ||
![]() |
2e8c7ce337 | ||
![]() |
110adcab2c | ||
![]() |
3d5f5902b8 | ||
![]() |
4cf41d18c2 | ||
![]() |
dbc3ad7fd2 | ||
![]() |
7990021431 | ||
![]() |
fa33947496 | ||
![]() |
4ff43eb270 | ||
![]() |
d66b7d2705 | ||
![]() |
025b7b2cdb | ||
![]() |
94914d4ca1 | ||
![]() |
3c53e72220 | ||
![]() |
871e26670c | ||
![]() |
da2f835bf7 | ||
![]() |
6a7fd4c8bd | ||
![]() |
f72b628b71 | ||
![]() |
3e877aca88 | ||
![]() |
360f21f9f8 | ||
![]() |
d981070ede | ||
![]() |
346f4be683 | ||
![]() |
1b6f1468ec | ||
![]() |
72e8641c8d | ||
![]() |
ac2e46f91e | ||
![]() |
4686a2a3e9 | ||
![]() |
667e532aaa | ||
![]() |
32a0193c45 | ||
![]() |
543417c01f | ||
![]() |
be5bf62ab8 | ||
![]() |
41b79e44af | ||
![]() |
0ea92335de | ||
![]() |
2a93c41fcc | ||
![]() |
3033529d9f | ||
![]() |
7ca43024e4 | ||
![]() |
c9d900be2c | ||
![]() |
8f93ca2048 | ||
![]() |
198b620cb4 | ||
![]() |
e34c94aa62 | ||
![]() |
d3233d65d5 | ||
![]() |
6ea518960a | ||
![]() |
673790465d | ||
![]() |
a548de05c2 | ||
![]() |
3afb656d1f | ||
![]() |
7f73061c13 | ||
![]() |
da8a4f8787 | ||
![]() |
e96fd1735d | ||
![]() |
eb9d3a3ed8 | ||
![]() |
969614d555 | ||
![]() |
7866ee2f74 | ||
![]() |
66e41733e7 | ||
![]() |
a681a5e631 | ||
![]() |
6b41ed84b8 | ||
![]() |
17691e95a8 | ||
![]() |
cf2d83b020 | ||
![]() |
5c0f27b952 | ||
![]() |
322f2ac056 | ||
![]() |
9bfd1ffd7d | ||
![]() |
f5384a1f11 | ||
![]() |
2a1e9a6631 | ||
![]() |
069be95dde | ||
![]() |
1d944d5219 | ||
![]() |
11d3ba3466 | ||
![]() |
2bc0c62570 | ||
![]() |
58d04e2996 | ||
![]() |
ad387e6a42 | ||
![]() |
550ecc8027 | ||
![]() |
5914f1db85 | ||
![]() |
ad03a75679 | ||
![]() |
c0df29333b | ||
![]() |
45aa631f18 | ||
![]() |
614d4e40fe | ||
![]() |
7efee6d536 | ||
![]() |
c59a491788 | ||
![]() |
2bccb6deaf | ||
![]() |
7b5de0a12a | ||
![]() |
4d62f47773 | ||
![]() |
3767030005 | ||
![]() |
58621577ae | ||
![]() |
f00727fe85 | ||
![]() |
b170f3bdd5 | ||
![]() |
1aa1348944 | ||
![]() |
ffd306ef52 | ||
![]() |
c63fab1317 | ||
![]() |
f70349ab02 | ||
![]() |
1d4ccf6657 | ||
![]() |
aca738fbc6 | ||
![]() |
39146747ac | ||
![]() |
967c6857e8 |
@ -11,5 +11,10 @@ docs/
|
||||
networks/
|
||||
scratch/
|
||||
|
||||
# Ignore build cache directories to avoid
|
||||
# errors when addings these to docker images
|
||||
build/.cache
|
||||
build/.golangci-lint
|
||||
|
||||
go.work
|
||||
go.work.sum
|
||||
|
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
# Global rule:
|
||||
* @rhuairahrighairidh @karzak @pirtleshell @drklee3 @nddeluca @DracoLi @evgeniy-scherbina @sesheffield @boodyvo @lbayas
|
20
.github/mergify.yml
vendored
20
.github/mergify.yml
vendored
@ -25,6 +25,8 @@ pull_request_rules:
|
||||
- release/v0.21.x
|
||||
- release/v0.23.x
|
||||
- release/v0.24.x
|
||||
- release/v0.25.x
|
||||
- release/v0.26.x
|
||||
|
||||
- name: Backport patches to the release/v0.17.x branch
|
||||
conditions:
|
||||
@ -79,3 +81,21 @@ pull_request_rules:
|
||||
backport:
|
||||
branches:
|
||||
- release/v0.24.x
|
||||
|
||||
- name: Backport patches to the release/v0.25.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=A:backport/v0.25.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- release/v0.25.x
|
||||
|
||||
- name: Backport patches to the release/v0.26.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=A:backport/v0.26.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- release/v0.26.x
|
||||
|
@ -33,7 +33,7 @@ kava config chain-id "${CHAIN_ID}"
|
||||
kava config keyring-backend test
|
||||
|
||||
# wait for transactions to be committed per CLI command
|
||||
kava config broadcast-mode block
|
||||
kava config broadcast-mode sync
|
||||
|
||||
# setup god's wallet
|
||||
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god
|
||||
|
80
.github/scripts/seed-internal-testnet.sh
vendored
80
.github/scripts/seed-internal-testnet.sh
vendored
@ -1,6 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# by sleeping 1 block in between tx's
|
||||
# we can emulate the behavior of the
|
||||
# the deprecated and now removed (as of Kava 16)
|
||||
# broadcast mode of `block` in order to
|
||||
# minimize the chance tx's fail due to an
|
||||
# account sequence number mismatch
|
||||
AVG_SECONDS_BETWEEN_BLOCKS=6.5
|
||||
|
||||
# configure kava binary to talk to the desired chain endpoint
|
||||
kava config node "${CHAIN_API_URL}"
|
||||
kava config chain-id "${CHAIN_ID}"
|
||||
@ -9,7 +17,7 @@ kava config chain-id "${CHAIN_ID}"
|
||||
kava config keyring-backend test
|
||||
|
||||
# wait for transactions to be committed per CLI command
|
||||
kava config broadcast-mode block
|
||||
kava config broadcast-mode sync
|
||||
|
||||
# setup dev wallet
|
||||
echo "${DEV_WALLET_MNEMONIC}" | kava keys add --recover dev-wallet
|
||||
@ -23,6 +31,8 @@ echo "sweet ocean blush coil mobile ten floor sample nuclear power legend where
|
||||
# fund evm-contract-deployer account (using issuance)
|
||||
kava tx issuance issue 200000000ukava kava1van3znl6597xgwwh46jgquutnqkwvwszjg04fz --from dev-wallet --gas-prices 0.5ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# deploy and fund USDC ERC20 contract
|
||||
MULTICHAIN_USDC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "USD Coin" USDC 6)
|
||||
MULTICHAIN_USDC_CONTRACT_ADDRESS=${MULTICHAIN_USDC_CONTRACT_DEPLOY: -42}
|
||||
@ -73,6 +83,31 @@ TETHER_USDT_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NA
|
||||
TETHER_USDT_CONTRACT_ADDRESS=${TETHER_USDT_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000
|
||||
|
||||
# deploy and fund axlBNB ERC20 contract
|
||||
AXL_BNB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBNB" axlBNB 18)
|
||||
AXL_BNB_CONTRACT_ADDRESS=${AXL_BNB_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund axlBUSD ERC20 contract
|
||||
AXL_BUSD_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBUSD" axlBUSD 18)
|
||||
AXL_BUSD_CONTRACT_ADDRESS=${AXL_BUSD_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund axlXRPB ERC20 contract
|
||||
AXL_XRPB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlXRPB" axlXRPB 18)
|
||||
AXL_XRPB_CONTRACT_ADDRESS=${AXL_XRPB_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund axlBTC ERC20 contract
|
||||
AXL_BTCB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBTCB" axlBTCB 18)
|
||||
AXL_BTCB_CONTRACT_ADDRESS=${AXL_BTCB_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund native wBTC ERC20 contract
|
||||
WBTC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "wBTC" wBTC 8)
|
||||
WBTC_CONTRACT_ADDRESS=${WBTC_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 100000000000000000
|
||||
|
||||
# seed some evm wallets
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
|
||||
@ -81,6 +116,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
|
||||
# seed webapp E2E whale account
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 100000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
|
||||
@ -89,6 +129,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$WBTC_CONTRACT_ADDRESS" 10000000000000
|
||||
|
||||
# give dev-wallet enough delegation power to pass proposals by itself
|
||||
|
||||
@ -96,6 +141,8 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_
|
||||
kava tx issuance issue 6000000000ukava kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq \
|
||||
--from dev-wallet --gas-prices 0.5ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# parse space seperated list of validators
|
||||
# into bash array
|
||||
read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<<"$GENESIS_VALIDATOR_ADDRESSES"
|
||||
@ -103,11 +150,14 @@ read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<<"$GENESIS_VALIDATOR_ADDRESSES"
|
||||
# delegate 300KAVA to each validator
|
||||
for validator in "${GENESIS_VALIDATOR_ADDRESS_ARRAY[@]}"; do
|
||||
kava tx staking delegate "${validator}" 300000000ukava --from dev-wallet --gas-prices 0.5ukava -y
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
done
|
||||
|
||||
# create a text proposal
|
||||
kava tx gov submit-legacy-proposal --deposit 1000000000ukava --type "Text" --title "Example Proposal" --description "This is an example proposal" --gas auto --gas-adjustment 1.2 --from dev-wallet --gas-prices 0.01ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# setup god's wallet
|
||||
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god
|
||||
|
||||
@ -123,7 +173,7 @@ PARAM_CHANGE_PROP_TEMPLATE=$(
|
||||
{
|
||||
"subspace": "evmutil",
|
||||
"key": "EnabledConversionPairs",
|
||||
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"}]"
|
||||
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"},{\"kava_erc20_address\":\"AXL_BNB_CONTRACT_ADDRESS\",\"denom\":\"bnb\"},{\"kava_erc20_address\":\"AXL_BUSD_CONTRACT_ADDRESS\",\"denom\":\"busd\"},{\"kava_erc20_address\":\"AXL_BTCB_CONTRACT_ADDRESS\",\"denom\":\"btcb\"},{\"kava_erc20_address\":\"AXL_XRPB_CONTRACT_ADDRESS\",\"denom\":\"xrpb\"},{\"kava_erc20_address\":\"WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/bitgo/wbtc\"}]"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -140,6 +190,11 @@ finalProposal="${finalProposal/AXL_USDC_CONTRACT_ADDRESS/$AXL_USDC_CONTRACT_ADDR
|
||||
finalProposal="${finalProposal/AXL_WBTC_CONTRACT_ADDRESS/$AXL_WBTC_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/wETH_CONTRACT_ADDRESS/$wETH_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/TETHER_USDT_CONTRACT_ADDRESS/$TETHER_USDT_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_BNB_CONTRACT_ADDRESS/$AXL_BNB_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_BUSD_CONTRACT_ADDRESS/$AXL_BUSD_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_BTCB_CONTRACT_ADDRESS/$AXL_BTCB_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_XRPB_CONTRACT_ADDRESS/$AXL_XRPB_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/WBTC_CONTRACT_ADDRESS/$WBTC_CONTRACT_ADDRESS}"
|
||||
|
||||
# create unique proposal filename
|
||||
proposalFileName="$(date +%s)-proposal.json"
|
||||
@ -159,16 +214,37 @@ printf "original evm util module params\n %s" , "$originalEvmUtilParams"
|
||||
# committee 1 is the stability committee. on internal testnet, this has only one member.
|
||||
kava tx committee submit-proposal 1 "$proposalFileName" --gas 2000000 --gas-prices 0.01ukava --from god -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# vote on the proposal. this assumes no other committee proposal has ever been submitted (id=1)
|
||||
kava tx committee vote 1 yes --gas 2000000 --gas-prices 0.01ukava --from god -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# fetch current module params
|
||||
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
|
||||
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"
|
||||
|
||||
# submit a kava token committee proposal
|
||||
COMMITTEE_PROP_TEMPLATE=$(
|
||||
cat <<'END_HEREDOC'
|
||||
{
|
||||
"@type": "/cosmos.gov.v1beta1.TextProposal",
|
||||
"title": "The next big thing signaling proposal.",
|
||||
"description": "The purpose of this proposal is to signal support/opposition to the next big thing"
|
||||
}
|
||||
END_HEREDOC
|
||||
)
|
||||
committeeProposalFileName="$(date +%s)-committee-proposal.json"
|
||||
echo "$COMMITTEE_PROP_TEMPLATE" >$committeeProposalFileName
|
||||
tokenCommitteeId=4
|
||||
kava tx committee submit-proposal "$tokenCommitteeId" "$committeeProposalFileName" --gas auto --gas-adjustment 1.5 --gas-prices 0.01ukava --from god -y
|
||||
|
||||
# if adding more cosmos coins -> er20s, ensure that the deployment order below remains the same.
|
||||
# convert 1 HARD to an erc20. doing this ensures the contract is deployed.
|
||||
kava tx evmutil convert-cosmos-coin-to-erc20 \
|
||||
"$DEV_TEST_WALLET_ADDRESS" \
|
||||
1000000hard \
|
||||
--from dev-wallet --gas 2000000 --gas-prices 0.001ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
23
.github/scripts/seed-protonet.sh
vendored
23
.github/scripts/seed-protonet.sh
vendored
@ -1,6 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# by sleeping 1 block in between tx's
|
||||
# we can emulate the behavior of the
|
||||
# the deprecated and now removed (as of Kava 16)
|
||||
# broadcast mode of `block` in order to
|
||||
# minimize the chance tx's fail due to an
|
||||
# account sequence number mismatch
|
||||
AVG_SECONDS_BETWEEN_BLOCKS=6.5
|
||||
|
||||
# configure kava binary to talk to the desired chain endpoint
|
||||
kava config node "${CHAIN_API_URL}"
|
||||
kava config chain-id "${CHAIN_ID}"
|
||||
@ -9,7 +17,7 @@ kava config chain-id "${CHAIN_ID}"
|
||||
kava config keyring-backend test
|
||||
|
||||
# wait for transactions to be committed per CLI command
|
||||
kava config broadcast-mode block
|
||||
kava config broadcast-mode sync
|
||||
|
||||
# setup dev wallet
|
||||
echo "${DEV_WALLET_MNEMONIC}" | kava keys add --recover dev-wallet
|
||||
@ -23,9 +31,13 @@ echo "sweet ocean blush coil mobile ten floor sample nuclear power legend where
|
||||
# fund evm-contract-deployer account (using issuance)
|
||||
kava tx issuance issue 200000000ukava kava1van3znl6597xgwwh46jgquutnqkwvwszjg04fz --from dev-wallet --gas-prices 0.5ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# fund 5k kava to x/community account
|
||||
kava tx community fund-community-pool 5000000000ukava --from dev-wallet --gas-prices 0.5ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# deploy and fund USDC ERC20 contract
|
||||
MULTICHAIN_USDC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "USD Coin" USDC 6)
|
||||
MULTICHAIN_USDC_CONTRACT_ADDRESS=${MULTICHAIN_USDC_CONTRACT_DEPLOY: -42}
|
||||
@ -89,6 +101,8 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CON
|
||||
kava tx issuance issue 6000000000ukava kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq \
|
||||
--from dev-wallet --gas-prices 0.5ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# parse space seperated list of validators
|
||||
# into bash array
|
||||
read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<< "$GENESIS_VALIDATOR_ADDRESSES"
|
||||
@ -97,11 +111,14 @@ read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<< "$GENESIS_VALIDATOR_ADDRESSES"
|
||||
for validator in "${GENESIS_VALIDATOR_ADDRESS_ARRAY[@]}"
|
||||
do
|
||||
kava tx staking delegate "${validator}" 300000000ukava --from dev-wallet --gas-prices 0.5ukava -y
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
done
|
||||
|
||||
# create a text proposal
|
||||
kava tx gov submit-legacy-proposal --deposit 1000000000ukava --type "Text" --title "Example Proposal" --description "This is an example proposal" --gas auto --gas-adjustment 1.2 --from dev-wallet --gas-prices 0.01ukava -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# setup god's wallet
|
||||
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god
|
||||
|
||||
@ -150,9 +167,13 @@ printf "original evm util module params\n %s" , "$originalEvmUtilParams"
|
||||
# https://github.com/0glabs/0g-chain/pull/1556/files#diff-0bd6043650c708661f37bbe6fa5b29b52149e0ec0069103c3954168fc9f12612R900-R903
|
||||
kava tx committee submit-proposal 1 "$proposalFileName" --gas 2000000 --gas-prices 0.01ukava --from god -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# vote on the proposal. this assumes no other committee proposal has ever been submitted (id=1)
|
||||
kava tx committee vote 1 yes --gas 2000000 --gas-prices 0.01ukava --from god -y
|
||||
|
||||
sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
|
||||
# fetch current module params
|
||||
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
|
||||
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"
|
||||
|
21
.github/workflows/cd-internal-testnet.yml
vendored
21
.github/workflows/cd-internal-testnet.yml
vendored
@ -1,5 +1,6 @@
|
||||
name: Continuous Deployment (Internal Testnet)
|
||||
# run after every successful CI job of new commits to the master branch
|
||||
# if deploy version or config has changed
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [Continuous Integration (Kava Master)]
|
||||
@ -7,6 +8,23 @@ on:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
# define output for first job forwarding output of changedInternalTestnetConfig job
|
||||
outputs:
|
||||
changedInternalTestnetConfig: ${{ steps.changed-internal-testnet-config.outputs.any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # OR "2" -> To retrieve the preceding commit.
|
||||
- name: Get all changed internal testnet files
|
||||
id: changed-internal-testnet-config
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
# Avoid using single or double quotes for multiline patterns
|
||||
files: |
|
||||
ci/env/kava-internal-testnet/**
|
||||
|
||||
# in order:
|
||||
# enter standby (prevents autoscaling group from killing node during deploy)
|
||||
# stop kava
|
||||
@ -14,8 +32,9 @@ jobs:
|
||||
# download updated binary and genesis
|
||||
# reset application database state (only done on internal testnet)
|
||||
reset-chain-to-zero-state:
|
||||
needs: [changed_files]
|
||||
# only start cd pipeline if last ci run was successful
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' && needs.changed_files.outputs.changedInternalTestnetConfig == 'true' }}
|
||||
uses: ./.github/workflows/cd-reset-internal-testnet.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
|
54
.github/workflows/cd-protonet-manual.yml
vendored
Normal file
54
.github/workflows/cd-protonet-manual.yml
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
name: Manual Deployment (Protonet)
|
||||
# allow to be triggered manually
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
# in order:
|
||||
# enter standby (prevents autoscaling group from killing node during deploy)
|
||||
# stop kava
|
||||
# take ebs + zfs snapshots
|
||||
# download updated binary and genesis
|
||||
# reset application database state (only done on internal testnet)
|
||||
reset-chain-to-zero-state:
|
||||
uses: ./.github/workflows/cd-reset-internal-testnet.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: proto_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: reset-protonet-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
|
||||
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
|
||||
start-chain-api:
|
||||
uses: ./.github/workflows/cd-start-chain.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: proto_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: start-chain-api-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
needs: [reset-chain-to-zero-state]
|
||||
|
||||
# setup test and development accounts and balances, deploy contracts by calling the chain's api
|
||||
seed-chain-state:
|
||||
uses: ./.github/workflows/cd-seed-chain.yml
|
||||
with:
|
||||
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
|
||||
chain-id: proto_2221-17000
|
||||
seed-script-filename: seed-protonet.sh
|
||||
erc20-deployer-network-name: protonet
|
||||
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
|
||||
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
|
||||
secrets: inherit
|
||||
needs: [start-chain-api]
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
metric-name: kava.deploys.testnet.proto
|
||||
namespace: Kava/ContinuousDeployment
|
||||
secrets: inherit
|
||||
needs: [seed-chain-state]
|
@ -67,7 +67,6 @@ jobs:
|
||||
--update-playbook-filename=$PLAYBOOK_NAME \
|
||||
--chain-id=$CHAIN_ID \
|
||||
--max-upgrade-batch-size=0 \
|
||||
--node-states=Standby \
|
||||
--wait-for-node-sync-after-upgrade=false
|
||||
env:
|
||||
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
|
||||
|
16
.github/workflows/cd-seed-chain.yml
vendored
16
.github/workflows/cd-seed-chain.yml
vendored
@ -35,12 +35,16 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
- name: checkout version of kava used by network
|
||||
- name: get desired version of network
|
||||
id: kava-version
|
||||
run: |
|
||||
git pull -p
|
||||
git checkout $(cat ${KAVA_VERSION_FILEPATH})
|
||||
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
KAVA_VERSION_FILEPATH: ${{ inputs.kava_version_filepath }}
|
||||
- name: checkout version of kava used by network
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
@ -68,7 +72,7 @@ jobs:
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
cache: npm
|
||||
node-version: 18
|
||||
node-version-file: .tool-versions
|
||||
cache-dependency-path: kava-bridge/contract/package.json
|
||||
- name: "install ERC20 contract deployment dependencies"
|
||||
run: "npm install"
|
||||
@ -76,8 +80,8 @@ jobs:
|
||||
- name: compile default erc20 contracts
|
||||
run: make compile-contracts
|
||||
working-directory: kava-bridge
|
||||
- name: download seed script from master
|
||||
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/master/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
|
||||
- name: download seed script from current commit
|
||||
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/${GITHUB_SHA}/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
|
||||
working-directory: kava-bridge/contract
|
||||
env:
|
||||
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}
|
||||
|
1
.github/workflows/cd-start-chain.yml
vendored
1
.github/workflows/cd-start-chain.yml
vendored
@ -63,7 +63,6 @@ jobs:
|
||||
--update-playbook-filename=$PLAYBOOK_NAME \
|
||||
--chain-id=$CHAIN_ID \
|
||||
--max-upgrade-batch-size=0 \
|
||||
--node-states=Standby \
|
||||
--wait-for-node-sync-after-upgrade=true
|
||||
env:
|
||||
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
|
||||
|
39
.github/workflows/ci-default.yml
vendored
39
.github/workflows/ci-default.yml
vendored
@ -35,6 +35,35 @@ jobs:
|
||||
run: make test
|
||||
- name: run e2e tests
|
||||
run: make docker-build test-e2e
|
||||
fuzz:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
- name: run fuzz tests
|
||||
run: make test-fuzz
|
||||
ibc-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: tests/e2e-ibc/go.mod
|
||||
cache-dependency-path: |
|
||||
tests/e2e-ibc/go.sum
|
||||
go.sum
|
||||
- name: run ibc e2e tests
|
||||
run: make test-ibc
|
||||
validate-internal-testnet-genesis:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@ -44,16 +73,10 @@ jobs:
|
||||
id: kava-version
|
||||
run: |
|
||||
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
|
||||
- name: checkout repo from master
|
||||
- name: checkout version of kava that will be deployed if this pr is merged
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
- name: checkout version of kava that will be deployed if this pr is merged
|
||||
run: |
|
||||
git pull -p
|
||||
git checkout $KAVA_VERSION
|
||||
env:
|
||||
KAVA_VERSION: ${{ steps.kava-version.outputs.KAVA_VERSION }}
|
||||
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
|
22
.github/workflows/ci-docker.yml
vendored
22
.github/workflows/ci-docker.yml
vendored
@ -50,6 +50,17 @@ jobs:
|
||||
username: ${{ inputs.dockerhub-username }}
|
||||
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Go Build Cache for Docker
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: go-build-cache
|
||||
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: inject go-build-cache into docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
|
||||
with:
|
||||
cache-source: go-build-cache
|
||||
|
||||
# publish to docker hub, tag with short git hash
|
||||
- name: Build and push (goleveldb)
|
||||
uses: docker/build-push-action@v5
|
||||
@ -89,6 +100,17 @@ jobs:
|
||||
username: ${{ inputs.dockerhub-username }}
|
||||
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Go Build Cache for Docker
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: go-build-cache
|
||||
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: inject go-build-cache into docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
|
||||
with:
|
||||
cache-source: go-build-cache
|
||||
|
||||
# publish to docker hub, tag with short git hash
|
||||
- name: Build and push (rocksdb)
|
||||
uses: docker/build-push-action@v5
|
||||
|
26
.github/workflows/ci-lint.yml
vendored
26
.github/workflows/ci-lint.yml
vendored
@ -7,11 +7,25 @@ jobs:
|
||||
uses: ./.github/workflows/proto.yml
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
checks: write # allow write access to checks to allow the action to annotate code in the PR.
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: golangci-lint
|
||||
uses: reviewdog/action-golangci-lint@v2
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with: { fetch-depth: 0 }
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
github_token: ${{ secrets.github_token }}
|
||||
reporter: github-pr-review
|
||||
golangci_lint_flags: --timeout 10m
|
||||
go-version-file: go.mod
|
||||
- name: Load Version
|
||||
id: load-version
|
||||
run: |
|
||||
GOLANGCI_VERSION=$(cat .golangci-version)
|
||||
REV=$(git merge-base origin/master HEAD)
|
||||
echo "GOLANGCI_VERSION=$GOLANGCI_VERSION" >> $GITHUB_ENV
|
||||
echo "REV=$REV" >> $GITHUB_ENV
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: ${{ env.GOLANGCI_VERSION }}
|
||||
args: -v -c .golangci.yml --new-from-rev ${{ env.REV }}
|
||||
|
5
.github/workflows/ci-master.yml
vendored
5
.github/workflows/ci-master.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
||||
- name: build rocksdb dependency
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
|
||||
env:
|
||||
ROCKSDB_VERSION: v8.1.1
|
||||
ROCKSDB_VERSION: v8.10.0
|
||||
- name: Build and upload release artifacts
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/publish-internal-release-artifacts.sh
|
||||
env:
|
||||
@ -45,6 +45,9 @@ jobs:
|
||||
dockerhub-username: kavaops
|
||||
extra-image-tag: master
|
||||
secrets: inherit
|
||||
rosetta:
|
||||
uses: ./.github/workflows/ci-rosetta.yml
|
||||
secrets: inherit
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
|
27
.github/workflows/ci-pr-lint.yml
vendored
Normal file
27
.github/workflows/ci-pr-lint.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
# this workflow is responsible for ensuring quality titles are given to all PRs
|
||||
# for PR checks to pass, the title must follow the Conventional Commits standard
|
||||
# https://www.conventionalcommits.org/en/v1.0.0/
|
||||
# this workflow was adapted from a similar workflow in https://github.com/cosmos/cosmos-sdk
|
||||
name: "Lint PR Title"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
main:
|
||||
permissions:
|
||||
pull-requests: read # for amannn/action-semantic-pull-request to analyze PRs
|
||||
statuses: write # for amannn/action-semantic-pull-request to mark status of analyzed PR
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# https://github.com/marketplace/actions/semantic-pull-request
|
||||
- uses: amannn/action-semantic-pull-request@v5.5.3
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
5
.github/workflows/ci-release.yml
vendored
5
.github/workflows/ci-release.yml
vendored
@ -4,9 +4,6 @@ on:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+*"
|
||||
jobs:
|
||||
# run per commit ci checks against released version
|
||||
lint-checks:
|
||||
uses: ./.github/workflows/ci-lint.yml
|
||||
# run default ci checks against released version
|
||||
default-checks:
|
||||
uses: ./.github/workflows/ci-default.yml
|
||||
@ -14,7 +11,7 @@ jobs:
|
||||
# get the version tag that triggered this workflow
|
||||
get-version-tag:
|
||||
# prep version release only if all checks pass
|
||||
needs: [lint-checks, default-checks]
|
||||
needs: default-checks
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
git-tag: ${{ steps.git-tag.outputs.tag }}
|
||||
|
24
.github/workflows/ci-rocksdb-build.yml
vendored
24
.github/workflows/ci-rocksdb-build.yml
vendored
@ -1,7 +1,7 @@
|
||||
name: Continuous Integration (Rocksdb Build)
|
||||
|
||||
env:
|
||||
ROCKSDB_VERSION: v8.1.1
|
||||
ROCKSDB_VERSION: v8.10.0
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@ -19,25 +19,3 @@ jobs:
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
|
||||
- name: build application
|
||||
run: make build COSMOS_BUILD_OPTIONS=rocksdb
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: install RocksDB dependencies
|
||||
run: sudo apt-get update
|
||||
&& sudo apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
|
||||
- name: install RocksDB as shared library
|
||||
run: git clone https://github.com/facebook/rocksdb.git
|
||||
&& cd rocksdb
|
||||
&& git checkout $ROCKSDB_VERSION
|
||||
&& sudo make -j$(nproc) install-shared
|
||||
&& sudo ldconfig
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: run unit tests
|
||||
run: make test-rocksdb
|
||||
|
16
.github/workflows/ci-rosetta.yml
vendored
Normal file
16
.github/workflows/ci-rosetta.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
name: Dispatch run-rosetta-tests event to rosetta-kava
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Dispatch run-rosetta-tests event to rosetta-kava
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.KAVA_PUBLIC_GITHUB_ACCESS_TOKEN }}
|
||||
repository: Kava-Labs/rosetta-kava
|
||||
event-type: run-rosetta-tests
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'
|
6
.github/workflows/upload-release-assets.yml
vendored
6
.github/workflows/upload-release-assets.yml
vendored
@ -12,11 +12,11 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21'
|
||||
- name: Build
|
||||
run: make build
|
||||
run: sudo LINK_STATICALLY=true make build-release
|
||||
- name: Rename file
|
||||
run: mv ./out/linux/0gchaind ./out/linux/0gchaind-linux-${{ github.ref_name }}
|
||||
run: sudo mv ./out/linux/0gchaind ./out/linux/0gchaind-linux-${{ github.ref_name }}
|
||||
- name: Upload Release Asset
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -31,6 +31,9 @@ out
|
||||
# Ignore build cache dir
|
||||
build/.cache
|
||||
|
||||
# Ignore make lint cache
|
||||
build/.golangci-lint
|
||||
|
||||
# Ignore installed binaires
|
||||
build/bin
|
||||
|
||||
|
1
.golangci-version
Normal file
1
.golangci-version
Normal file
@ -0,0 +1 @@
|
||||
v1.59
|
130
.golangci.yml
Normal file
130
.golangci.yml
Normal file
@ -0,0 +1,130 @@
|
||||
run:
|
||||
timeout: 20m # set maximum time allowed for the linter to run. If the linting process exceeds this duration, it will be terminated
|
||||
modules-download-mode: readonly # Ensures that modules are not modified during the linting process
|
||||
allow-parallel-runners: true # enables parallel execution of linters to speed up linting process
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- decorder
|
||||
- dogsled
|
||||
# - dupl
|
||||
# - dupword
|
||||
- durationcheck
|
||||
- errcheck
|
||||
- errchkjson
|
||||
- errname
|
||||
- errorlint
|
||||
# - exhaustive
|
||||
- exportloopref
|
||||
- funlen
|
||||
- gci
|
||||
- ginkgolinter
|
||||
- gocheckcompilerdirectives
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- godox
|
||||
- gofmt
|
||||
# - gofumpt
|
||||
- goheader
|
||||
- goimports
|
||||
- mnd
|
||||
# - gomodguard
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- grouper
|
||||
- importas
|
||||
- ineffassign
|
||||
# - interfacebloat
|
||||
- lll
|
||||
- loggercheck
|
||||
- makezero
|
||||
- mirror
|
||||
- misspell
|
||||
- musttag
|
||||
# - nakedret
|
||||
# - nestif
|
||||
- nilerr
|
||||
# - nilnil
|
||||
# - noctx
|
||||
- nolintlint
|
||||
# - nonamedreturns
|
||||
- nosprintfhostport
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
# - reassign
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- staticcheck
|
||||
# - stylecheck
|
||||
- tagalign
|
||||
# - testpackage
|
||||
# - thelper
|
||||
# - tparallel
|
||||
- typecheck
|
||||
# - unconvert
|
||||
- unparam
|
||||
- unused
|
||||
# - usestdlibvars
|
||||
- wastedassign
|
||||
# - whitespace
|
||||
- wrapcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# Disable funlen for "func Test..." or func (suite *Suite) Test..." type functions
|
||||
# These functions tend to be descriptive and exceed length limits.
|
||||
- source: "^func (\\(.*\\) )?Test"
|
||||
linters:
|
||||
- funlen
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-blank: true # check for assignments to the blank identifier '_' when errors are returned
|
||||
check-type-assertions: false # check type assertion
|
||||
errorlint:
|
||||
check-generated: false # disabled linting of generated files
|
||||
default-signifies-exhaustive: false # exhaustive handling of error types
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: false # exhaustive handling of error types
|
||||
gci:
|
||||
sections: # defines the order of import sections
|
||||
- standard
|
||||
- default
|
||||
- localmodule
|
||||
goconst:
|
||||
min-len: 3 # min length for string constants to be checked
|
||||
min-occurrences: 3 # min occurrences of the same constant before it's flagged
|
||||
godox:
|
||||
keywords: # specific keywords to flag for further action
|
||||
- BUG
|
||||
- FIXME
|
||||
- HACK
|
||||
gosec:
|
||||
exclude-generated: true
|
||||
lll:
|
||||
line-length: 120
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words: expect
|
||||
nolintlint:
|
||||
allow-leading-space: false
|
||||
require-explanation: true
|
||||
require-specific: true
|
||||
prealloc:
|
||||
simple: true # enables simple preallocation checks
|
||||
range-loops: true # enabled preallocation checks in range loops
|
||||
for-loops: false # disables preallocation checks in for loops
|
||||
unparam:
|
||||
check-exported: true # checks exported functions and methods for unused params
|
16
.mockery.yaml
Normal file
16
.mockery.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
# Generate EXPECT() methods, type-safe methods to generate call expectations
|
||||
with-expecter: true
|
||||
|
||||
# Generate mocks in adjacent mocks directory to the interfaces
|
||||
dir: "{{.InterfaceDir}}/mocks"
|
||||
mockname: "Mock{{.InterfaceName}}"
|
||||
outpkg: "mocks"
|
||||
filename: "Mock{{.InterfaceName}}.go"
|
||||
|
||||
packages:
|
||||
github.com/0glabs/0g-chain/x/precisebank/types:
|
||||
# package-specific config
|
||||
config:
|
||||
interfaces:
|
||||
AccountKeeper:
|
||||
BankKeeper:
|
@ -1,2 +1,2 @@
|
||||
golang 1.20
|
||||
nodejs 18.16.0
|
||||
golang 1.21.9
|
||||
nodejs 20.16.0
|
||||
|
62
CHANGELOG.md
62
CHANGELOG.md
@ -36,6 +36,29 @@ Ref: https://keepachangelog.com/en/1.0.0/
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [v0.26.0]
|
||||
|
||||
### Features
|
||||
- (precisebank) [#1906] Add new `x/precisebank` module with bank decimal extension for EVM usage.
|
||||
- (cli) [#1922] Add `iavlviewer` CLI command for low-level iavl db debugging.
|
||||
|
||||
### Improvements
|
||||
- (rocksdb) [#1903] Bump cometbft-db dependency for use with rocksdb v8.10.0
|
||||
- (deps) [#1988] Bump cometbft to v0.37.9-kava.1
|
||||
|
||||
## [v0.26.0]
|
||||
|
||||
### Features
|
||||
|
||||
- (cli) [#1785] Add `shard` CLI command to support creating partitions of data for standalone nodes
|
||||
- (cdp) [#1818] Add module param and logic for running x/cdp begin blocker every `n` blocks
|
||||
- (cli) [#1804] Add `rocksdb compact` command for manual DB compaction of state or blockstore
|
||||
- (cosmos-sdk) [#1811] [#1846] Upgrades app to cosmos-sdk v0.47.10 with iavl v1 support
|
||||
- (validator-vesting) [#1832] Add grpc query service to replace removed legacy querier
|
||||
- (incentive) [#1836] Update x/incentive cli to use grpc query client
|
||||
- (ibc) [#1839] Add ibc packet forward middleware for ibc transfers
|
||||
- (evmutil) [#1848] Update evm native conversion logic to handle bep3 assets
|
||||
|
||||
## [v0.25.0]
|
||||
|
||||
### Features
|
||||
@ -43,11 +66,12 @@ Ref: https://keepachangelog.com/en/1.0.0/
|
||||
- (community) [#1704] Add module params
|
||||
- (community) [#1706] Add disable inflation upgrade
|
||||
- (community) [#1745] Enable params update via governance with `MsgUpdateParams`
|
||||
- (client) [#1784] Add Kava gRPC client
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- (ethermint) [#1788] Fixes issue where tracing a transaction could show it's status as successful when isolated in simulation even if the tx when executed on the chain failed due to an error such as exhausting the block gas meter
|
||||
- (evmutil) [#1655] Initialize x/evmutil module account in InitGenesis
|
||||
- (deps) [#1770] Bump ledger-cosmos-go to v0.13.1 to resolve signing error with
|
||||
cosmos ledger app 2.34.12
|
||||
|
||||
## State Machine Breaking
|
||||
@ -60,9 +84,19 @@ Ref: https://keepachangelog.com/en/1.0.0/
|
||||
- (community) [#1755] Keep funds in `x/community` in `CommunityPoolLendWithdrawProposal` handler
|
||||
- (staking) [#1761] Set validator minimum commission to 5% for all validators under 5%
|
||||
|
||||
## [v0.24.3]
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- (deps) [#1770] Bump ledger-cosmos-go to v0.13.1 to resolve signing error with
|
||||
- (rocksdb) [#1767] Fix resolution of rocksdb database path introduced in v0.24.2
|
||||
|
||||
**Note**: There was a bug released as v0.24.2. The tag has been removed and the commit should not be used.
|
||||
|
||||
## [v0.24.1]
|
||||
|
||||
### Features
|
||||
|
||||
- (metrics) [#1668] Adds non-state breaking x/metrics module for custom telemetry.
|
||||
- (metrics) [#1669] Add performance timing metrics to all Begin/EndBlockers
|
||||
- (community) [#1751] Add `AnnualizedRewards` query endpoint
|
||||
@ -306,6 +340,19 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
|
||||
- [#257](https://github.com/Kava-Labs/kava/pulls/257) Include scripts to run
|
||||
large-scale simulations remotely using aws-batch
|
||||
|
||||
[#1988]: https://github.com/Kava-Labs/kava/pull/1988
|
||||
[#1922]: https://github.com/Kava-Labs/kava/pull/1922
|
||||
[#1906]: https://github.com/Kava-Labs/kava/pull/1906
|
||||
[#1903]: https://github.com/Kava-Labs/kava/pull/1903
|
||||
[#1846]: https://github.com/Kava-Labs/kava/pull/1846
|
||||
[#1848]: https://github.com/Kava-Labs/kava/pull/1848
|
||||
[#1839]: https://github.com/Kava-Labs/kava/pull/1839
|
||||
[#1836]: https://github.com/Kava-Labs/kava/pull/1836
|
||||
[#1832]: https://github.com/Kava-Labs/kava/pull/1832
|
||||
[#1811]: https://github.com/Kava-Labs/kava/pull/1811
|
||||
[#1804]: https://github.com/Kava-Labs/kava/pull/1804
|
||||
[#1785]: https://github.com/Kava-Labs/kava/pull/1785
|
||||
[#1784]: https://github.com/Kava-Labs/kava/pull/1784
|
||||
[#1770]: https://github.com/Kava-Labs/kava/pull/1770
|
||||
[#1755]: https://github.com/Kava-Labs/kava/pull/1755
|
||||
[#1761]: https://github.com/Kava-Labs/kava/pull/1761
|
||||
@ -359,14 +406,13 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
|
||||
[#750]: https://github.com/Kava-Labs/kava/pull/750
|
||||
[#751]: https://github.com/Kava-Labs/kava/pull/751
|
||||
[#780]: https://github.com/Kava-Labs/kava/pull/780
|
||||
|
||||
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.25.0...HEAD
|
||||
|
||||
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.25.0
|
||||
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.24.1
|
||||
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.23.2...v0.24.0
|
||||
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.26.0...HEAD
|
||||
[v0.26.0]: https://github.com/Kava-Labs/kava/compare/v0.25.0...v0.26.0
|
||||
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.25.0
|
||||
[v0.24.3]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.24.1
|
||||
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.24.0
|
||||
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.23.2
|
||||
[v0.23.2]: https://github.com/Kava-Labs/kava/compare/v0.23.1...v0.23.2
|
||||
[v0.23.1]: https://github.com/Kava-Labs/kava/compare/v0.23.0...v0.23.1
|
||||
[v0.23.0]: https://github.com/Kava-Labs/kava/compare/v0.21.1...v0.23.0
|
||||
[v0.16.1]: https://github.com/Kava-Labs/kava/compare/v0.16.0...v0.16.1
|
||||
[v0.16.0]: https://github.com/Kava-Labs/kava/compare/v0.15.2...v0.16.0
|
||||
|
12
Dockerfile
12
Dockerfile
@ -1,4 +1,4 @@
|
||||
FROM golang:1.20-alpine AS build-env
|
||||
FROM golang:1.21-alpine AS build-env
|
||||
|
||||
# Set up dependencies
|
||||
# bash, jq, curl for debugging
|
||||
@ -19,6 +19,15 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go version && go mod download
|
||||
|
||||
# Cosmwasm - Download correct libwasmvm version
|
||||
RUN ARCH=$(uname -m) && WASMVM_VERSION=$(go list -m github.com/CosmWasm/wasmvm | sed 's/.* //') && \
|
||||
wget https://github.com/CosmWasm/wasmvm/releases/download/$WASMVM_VERSION/libwasmvm_muslc.$ARCH.a \
|
||||
-O /lib/libwasmvm.$ARCH.a && \
|
||||
# verify checksum
|
||||
wget https://github.com/CosmWasm/wasmvm/releases/download/$WASMVM_VERSION/checksums.txt -O /tmp/checksums.txt && \
|
||||
sha256sum /lib/libwasmvm.$ARCH.a | grep $(cat /tmp/checksums.txt | grep libwasmvm_muslc.$ARCH | cut -d ' ' -f 1)
|
||||
|
||||
|
||||
# Add source files
|
||||
COPY . .
|
||||
|
||||
@ -27,6 +36,7 @@ COPY . .
|
||||
# Mount go build and mod caches as container caches, persisted between builder invocations
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
LINK_STATICALLY=true \
|
||||
make install
|
||||
|
||||
FROM alpine:3.15
|
||||
|
42
Dockerfile-node
Normal file
42
Dockerfile-node
Normal file
@ -0,0 +1,42 @@
|
||||
FROM --platform=linux/amd64 ubuntu:24.04
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
git \
|
||||
sudo \
|
||||
wget \
|
||||
jq \
|
||||
make \
|
||||
gcc \
|
||||
unzip && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Go
|
||||
RUN wget https://golang.org/dl/go1.22.5.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go1.22.5.linux-amd64.tar.gz && \
|
||||
rm go1.22.5.linux-amd64.tar.gz
|
||||
# Set Go environment variables
|
||||
ENV GOPATH=/root/go
|
||||
ENV PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
|
||||
# Create Go workspace directory
|
||||
RUN mkdir -p /root/go
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# https://docs.0g.ai/0g-doc/run-a-node/validator-node
|
||||
RUN git clone -b v0.2.3 https://github.com/0glabs/0g-chain.git
|
||||
RUN ./0g-chain/networks/testnet/install.sh
|
||||
|
||||
RUN 0gchaind config chain-id zgtendermint_16600-2
|
||||
|
||||
RUN 0gchaind init testnetnode --chain-id zgtendermint_16600-2
|
||||
|
||||
RUN rm ~/.0gchain/config/genesis.json
|
||||
RUN wget -P ~/.0gchain/config https://github.com/0glabs/0g-chain/releases/download/v0.2.3/genesis.json
|
||||
|
||||
RUN 0gchaind validate-genesis
|
||||
|
||||
RUN sed -i 's|seeds = ""|seeds = "81987895a11f6689ada254c6b57932ab7ed909b6@54.241.167.190:26656,010fb4de28667725a4fef26cdc7f9452cc34b16d@54.176.175.48:26656,e9b4bc203197b62cc7e6a80a64742e752f4210d5@54.193.250.204:26656,68b9145889e7576b652ca68d985826abd46ad660@18.166.164.232:26656"|' $HOME/.0gchain/config/config.toml
|
||||
|
||||
ENTRYPOINT ["0gchaind", "start"]
|
@ -1,23 +1,6 @@
|
||||
FROM golang:1.20-bullseye AS chain-builder
|
||||
FROM kava/rocksdb:v8.10.1-go1.21 AS kava-builder
|
||||
|
||||
# Set up dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set working directory for the build
|
||||
WORKDIR /root
|
||||
# default home directory is /root
|
||||
|
||||
# install rocksdb
|
||||
ARG rocksdb_version=v8.1.1
|
||||
ENV ROCKSDB_VERSION=$rocksdb_version
|
||||
|
||||
RUN git clone https://github.com/facebook/rocksdb.git \
|
||||
&& cd rocksdb \
|
||||
&& git checkout $ROCKSDB_VERSION \
|
||||
&& make -j$(nproc) install-shared \
|
||||
&& ldconfig
|
||||
RUN apt-get update
|
||||
|
||||
WORKDIR /root/0gchain
|
||||
# Copy dependency files first to facilitate dependency caching
|
||||
|
22
Dockerfile-rocksdb-base
Normal file
22
Dockerfile-rocksdb-base
Normal file
@ -0,0 +1,22 @@
|
||||
# published to https://hub.docker.com/repository/docker/kava/rocksdb/tags
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 -t kava/rocksdb:v8.10.1-go1.21 -f Dockerfile-rocksdb-base . --push
|
||||
FROM golang:1.21-bullseye
|
||||
|
||||
# Set up dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set working directory for the build
|
||||
WORKDIR /root
|
||||
# default home directory is /root
|
||||
|
||||
# install rocksdb
|
||||
ARG rocksdb_version=v8.10.0
|
||||
ENV ROCKSDB_VERSION=$rocksdb_version
|
||||
|
||||
RUN git clone https://github.com/facebook/rocksdb.git \
|
||||
&& cd rocksdb \
|
||||
&& git checkout $ROCKSDB_VERSION \
|
||||
&& make -j$(nproc) install-shared \
|
||||
&& ldconfig
|
64
Makefile
64
Makefile
@ -6,6 +6,8 @@ BINARY_NAME := 0gchaind
|
||||
MAIN_ENTRY := ./cmd/$(BINARY_NAME)
|
||||
DOCKER_IMAGE_NAME := 0glabs/$(PROJECT_NAME)
|
||||
GO_BIN ?= go
|
||||
ARCH := $(shell uname -m)
|
||||
WASMVM_VERSION := $(shell $(GO_BIN) list -m github.com/CosmWasm/wasmvm | sed 's/.* //')
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD)
|
||||
@ -30,7 +32,7 @@ VERSION := $(GIT_COMMIT_SHORT)
|
||||
VERSION_NUMBER := $(VERSION)
|
||||
endif
|
||||
|
||||
TENDERMINT_VERSION := $(shell $(GO_BIN) list -m github.com/tendermint/tendermint | sed 's:.* ::')
|
||||
TENDERMINT_VERSION := $(shell $(GO_BIN) list -m github.com/cometbft/cometbft | sed 's:.* ::')
|
||||
COSMOS_SDK_VERSION := $(shell $(GO_BIN) list -m github.com/cosmos/cosmos-sdk | sed 's:.* ::')
|
||||
|
||||
.PHONY: print-git-info
|
||||
@ -103,6 +105,8 @@ include $(BUILD_DIR)/deps.mk
|
||||
include $(BUILD_DIR)/proto.mk
|
||||
include $(BUILD_DIR)/proto-deps.mk
|
||||
|
||||
include $(BUILD_DIR)/lint.mk
|
||||
|
||||
#export GO111MODULE = on
|
||||
# process build tags
|
||||
build_tags = netgo
|
||||
@ -149,7 +153,7 @@ ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=$(PROJECT_NAME) \
|
||||
-X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION_NUMBER) \
|
||||
-X github.com/cosmos/cosmos-sdk/version.Commit=$(GIT_COMMIT) \
|
||||
-X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" \
|
||||
-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(TENDERMINT_VERSION)
|
||||
-X github.com/cometbft/cometbft/version.TMCoreSemVer=$(TENDERMINT_VERSION)
|
||||
|
||||
# DB backend selection
|
||||
ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS)))
|
||||
@ -174,6 +178,10 @@ endif
|
||||
ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS)))
|
||||
ldflags += -w -s
|
||||
endif
|
||||
|
||||
ifeq ($(LINK_STATICALLY),true)
|
||||
ldflags += -linkmode=external -extldflags "-Wl,-z,muldefs -static -lm"
|
||||
endif
|
||||
ldflags += $(LDFLAGS)
|
||||
ldflags := $(strip $(ldflags))
|
||||
|
||||
@ -195,9 +203,21 @@ else
|
||||
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/$(BINARY_NAME) $(MAIN_ENTRY)
|
||||
endif
|
||||
|
||||
build-release: go.sum
|
||||
wget -q https://github.com/CosmWasm/wasmvm/releases/download/$(WASMVM_VERSION)/libwasmvm_muslc.$(ARCH).a -O /lib/libwasmvm.$(ARCH).a
|
||||
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/$(BINARY_NAME) $(MAIN_ENTRY)
|
||||
|
||||
build-linux: go.sum
|
||||
LEDGER_ENABLED=false GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
# build on rocksdb-backed kava on macOS with shared libs from brew
|
||||
# this assumes you are on macOS & these deps have been installed with brew:
|
||||
# rocksdb, snappy, lz4, and zstd
|
||||
# use like `make build-rocksdb-brew COSMOS_BUILD_OPTIONS=rocksdb`
|
||||
build-rocksdb-brew:
|
||||
export CGO_CFLAGS := -I$(shell brew --prefix rocksdb)/include
|
||||
export CGO_LDFLAGS := -L$(shell brew --prefix rocksdb)/lib -lrocksdb -lstdc++ -lm -lz -L$(shell brew --prefix snappy)/lib -L$(shell brew --prefix lz4)/lib -L$(shell brew --prefix zstd)/lib
|
||||
|
||||
install: go.sum
|
||||
$(GO_BIN) install -mod=readonly $(BUILD_FLAGS) $(MAIN_ENTRY)
|
||||
|
||||
@ -224,13 +244,6 @@ link-check:
|
||||
# TODO: replace kava in following line with project name
|
||||
liche -r . --exclude "^http://127.*|^https://riot.im/app*|^http://kava-testnet*|^https://testnet-dex*|^https://kava3.data.kava.io*|^https://ipfs.io*|^https://apps.apple.com*|^https://kava.quicksync.io*"
|
||||
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" | xargs gofmt -d -s
|
||||
$(GO_BIN) mod verify
|
||||
.PHONY: lint
|
||||
|
||||
format:
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs gofmt -w -s
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs misspell -w
|
||||
@ -255,11 +268,11 @@ build-docker-local-0gchain:
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start: build-linux localnet-stop
|
||||
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z $(DOCKER_IMAGE_NAME)-node testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
|
||||
docker-compose up -d
|
||||
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z kava/kavanode testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
|
||||
$(DOCKER) compose up -d
|
||||
|
||||
localnet-stop:
|
||||
docker-compose down
|
||||
$(DOCKER) compose down
|
||||
|
||||
# Launch a new single validator chain
|
||||
start:
|
||||
@ -301,12 +314,14 @@ test-basic: test
|
||||
test-e2e: docker-build
|
||||
$(GO_BIN) test -failfast -count=1 -v ./tests/e2e/...
|
||||
|
||||
# run interchaintest tests (./tests/e2e-ibc)
|
||||
test-ibc: docker-build
|
||||
cd tests/e2e-ibc && KAVA_TAG=local $(GO_BIN) test -timeout 10m .
|
||||
.PHONY: test-ibc
|
||||
|
||||
test:
|
||||
@$(GO_BIN) test $$($(GO_BIN) list ./... | grep -v 'contrib' | grep -v 'tests/e2e')
|
||||
|
||||
test-rocksdb:
|
||||
@go test -tags=rocksdb $(MAIN_ENTRY)/opendb
|
||||
|
||||
# Run cli integration tests
|
||||
# `-p 4` to use 4 cores, `-tags cli_test` to tell $(GO_BIN) not to ignore the cli package
|
||||
# These tests use the `kvd` or `kvcli` binaries in the build dir, or in `$BUILDDIR` if that env var is set.
|
||||
@ -317,6 +332,18 @@ test-cli: build
|
||||
test-migrate:
|
||||
@$(GO_BIN) test -v -count=1 ./migrate/...
|
||||
|
||||
# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169
|
||||
ifeq ($(OS_FAMILY),Darwin)
|
||||
FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic
|
||||
endif
|
||||
|
||||
test-fuzz:
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzMintCoins ./x/precisebank/keeper
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzBurnCoins ./x/precisebank/keeper
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzSendCoins ./x/precisebank/keeper
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_NonZeroRemainder ./x/precisebank/types
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_ZeroRemainder ./x/precisebank/types
|
||||
|
||||
# Kick start lots of sims on an AWS cluster.
|
||||
# This submits an AWS Batch job to run a lot of sims, each within a docker image. Results are uploaded to S3
|
||||
start-remote-sims:
|
||||
@ -327,13 +354,14 @@ start-remote-sims:
|
||||
# submit an array job on AWS Batch, using 1000 seeds, spot instances
|
||||
aws batch submit-job \
|
||||
-—job-name "master-$(VERSION)" \
|
||||
-—job-queue “simulation-1-queue-spot" \
|
||||
-—job-queue "simulation-1-queue-spot" \
|
||||
-—array-properties size=1000 \
|
||||
-—job-definition $(BINARY_NAME)-sim-master \
|
||||
-—container-override environment=[{SIM_NAME=master-$(VERSION)}]
|
||||
|
||||
update-kvtool:
|
||||
git submodule update
|
||||
git submodule init || true
|
||||
git submodule update --remote
|
||||
cd tests/e2e/kvtool && make install
|
||||
|
||||
.PHONY: all build-linux install clean build test test-cli test-all test-rest test-basic start-remote-sims
|
||||
.PHONY: all build-linux install build test test-cli test-all test-rest test-basic test-fuzz start-remote-sims
|
||||
|
@ -17,13 +17,13 @@ Reference implementation of 0G Chain, the first modular AI chain. Built using th
|
||||
<!---
|
||||
## Mainnet
|
||||
|
||||
The current recommended version of the software for mainnet is [v0.25.0](https://github.com/Kava-Labs/kava/releases/tag/v0.25.0) The master branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
|
||||
The current recommended version of the software for mainnet is [v0.26.2](https://github.com/Kava-Labs/kava/releases/tag/v0.26.2) The `master` branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
|
||||
|
||||
### Installation and Setup
|
||||
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/participate/validator-node).
|
||||
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/nodes-and-validators/validator-node).
|
||||
|
||||
```bash
|
||||
git checkout v0.25.0
|
||||
git checkout v0.26.2
|
||||
make install
|
||||
```
|
||||
|
||||
@ -49,7 +49,7 @@ If you have technical questions or concerns, ask a developer or community member
|
||||
|
||||
## Security
|
||||
|
||||
If you find a security issue, please report it to security [at] kava.io. Depending on the verification and severity, a bug bounty may be available.
|
||||
If you find a security issue, please report it to security [at] kavalabs.io. Depending on the verification and severity, a bug bounty may be available.
|
||||
|
||||
## License
|
||||
|
||||
|
@ -11,15 +11,15 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
|
||||
"cosmossdk.io/simapp"
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"github.com/cosmos/cosmos-sdk/simapp"
|
||||
"github.com/cosmos/cosmos-sdk/simapp/helpers"
|
||||
"github.com/cosmos/cosmos-sdk/store"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/module"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth"
|
||||
@ -275,7 +275,7 @@ func TestAppStateDeterminism(t *testing.T) {
|
||||
config.ExportParamsPath = ""
|
||||
config.OnOperation = false
|
||||
config.AllInvariants = false
|
||||
config.ChainID = helpers.SimAppChainID
|
||||
config.ChainID = sims.SimAppChainID
|
||||
|
||||
numTimesToRunPerSeed := 2
|
||||
appHashList := make([]json.RawMessage, numTimesToRunPerSeed)
|
||||
|
@ -11,6 +11,10 @@ import (
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
tmbytes "github.com/cometbft/cometbft/libs/bytes"
|
||||
ctypes "github.com/cometbft/cometbft/rpc/core/types"
|
||||
jsonrpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types"
|
||||
"github.com/cosmos/cosmos-sdk/client/context"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/rest"
|
||||
@ -19,10 +23,6 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
tmbytes "github.com/tendermint/tendermint/libs/bytes"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
jsonrpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
|
||||
)
|
||||
|
||||
type SimulateRequestTestSuite struct {
|
||||
|
@ -5,16 +5,16 @@ import (
|
||||
"runtime/debug"
|
||||
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
tmlog "github.com/cometbft/cometbft/libs/log"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
authante "github.com/cosmos/cosmos-sdk/x/auth/ante"
|
||||
authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing"
|
||||
vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
|
||||
ibcante "github.com/cosmos/ibc-go/v6/modules/core/ante"
|
||||
ibckeeper "github.com/cosmos/ibc-go/v6/modules/core/keeper"
|
||||
ibcante "github.com/cosmos/ibc-go/v7/modules/core/ante"
|
||||
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
|
||||
evmante "github.com/evmos/ethermint/app/ante"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
// HandlerOptions extend the SDK's AnteHandler options by requiring the IBC
|
||||
|
@ -7,9 +7,13 @@ import (
|
||||
"time"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
tmdb "github.com/cometbft/cometbft-db"
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
|
||||
"github.com/cosmos/cosmos-sdk/simapp/helpers"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
|
||||
@ -17,9 +21,6 @@ import (
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmdb "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
@ -57,10 +58,11 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
|
||||
nil,
|
||||
encodingConfig,
|
||||
opts,
|
||||
baseapp.SetChainID(app.TestChainId),
|
||||
),
|
||||
}
|
||||
|
||||
chainID := "kavatest_1-1"
|
||||
chainID := app.TestChainId
|
||||
tApp = tApp.InitializeFromGenesisStatesWithTimeAndChainID(
|
||||
time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
chainID,
|
||||
@ -107,7 +109,7 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
stdTx, err := helpers.GenSignedMockTx(
|
||||
stdTx, err := sims.GenSignedMockTx(
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
encodingConfig.TxConfig,
|
||||
[]sdk.Msg{
|
||||
@ -118,7 +120,7 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
|
||||
),
|
||||
},
|
||||
sdk.NewCoins(), // no fee
|
||||
helpers.DefaultGenTxGas,
|
||||
sims.DefaultGenTxGas,
|
||||
chainID,
|
||||
[]uint64{0},
|
||||
[]uint64{0}, // fixed sequence numbers will cause tests to fail sig verification if the same address is used twice
|
||||
@ -210,7 +212,7 @@ func TestAppAnteHandler_RejectMsgsInAuthz(t *testing.T) {
|
||||
return msg
|
||||
}
|
||||
|
||||
chainID := "kavatest_1-1"
|
||||
chainID := app.TestChainId
|
||||
encodingConfig := app.MakeEncodingConfig()
|
||||
|
||||
testcases := []struct {
|
||||
@ -239,12 +241,12 @@ func TestAppAnteHandler_RejectMsgsInAuthz(t *testing.T) {
|
||||
chainID,
|
||||
)
|
||||
|
||||
stdTx, err := helpers.GenSignedMockTx(
|
||||
stdTx, err := sims.GenSignedMockTx(
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
encodingConfig.TxConfig,
|
||||
[]sdk.Msg{tc.msg},
|
||||
sdk.NewCoins(), // no fee
|
||||
helpers.DefaultGenTxGas,
|
||||
sims.DefaultGenTxGas,
|
||||
chainID,
|
||||
[]uint64{0},
|
||||
[]uint64{0},
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/simapp/helpers"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -39,7 +39,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_NotCheckTx(t *testing.T) {
|
||||
fetcher := mockAddressFetcher(testAddresses[1:]...)
|
||||
|
||||
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
|
||||
tx, err := helpers.GenSignedMockTx(
|
||||
tx, err := sims.GenSignedMockTx(
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
txConfig,
|
||||
[]sdk.Msg{
|
||||
@ -50,7 +50,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_NotCheckTx(t *testing.T) {
|
||||
),
|
||||
},
|
||||
sdk.NewCoins(), // no fee
|
||||
helpers.DefaultGenTxGas,
|
||||
sims.DefaultGenTxGas,
|
||||
"testing-chain-id",
|
||||
[]uint64{0},
|
||||
[]uint64{0},
|
||||
@ -74,7 +74,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Pass(t *testing.T) {
|
||||
|
||||
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
|
||||
|
||||
tx, err := helpers.GenSignedMockTx(
|
||||
tx, err := sims.GenSignedMockTx(
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
txConfig,
|
||||
[]sdk.Msg{
|
||||
@ -90,7 +90,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Pass(t *testing.T) {
|
||||
),
|
||||
},
|
||||
sdk.NewCoins(), // no fee
|
||||
helpers.DefaultGenTxGas,
|
||||
sims.DefaultGenTxGas,
|
||||
"testing-chain-id",
|
||||
[]uint64{0, 123},
|
||||
[]uint64{0, 123},
|
||||
@ -115,7 +115,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Reject(t *testing.T) {
|
||||
|
||||
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
|
||||
|
||||
tx, err := helpers.GenSignedMockTx(
|
||||
tx, err := sims.GenSignedMockTx(
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
txConfig,
|
||||
[]sdk.Msg{
|
||||
@ -126,7 +126,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Reject(t *testing.T) {
|
||||
),
|
||||
},
|
||||
sdk.NewCoins(), // no fee
|
||||
helpers.DefaultGenTxGas,
|
||||
sims.DefaultGenTxGas,
|
||||
"testing-chain-id",
|
||||
[]uint64{0},
|
||||
[]uint64{0},
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/simapp/helpers"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
"github.com/cosmos/cosmos-sdk/x/authz"
|
||||
@ -213,12 +213,12 @@ func TestAuthzLimiterDecorator(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tx, err := helpers.GenSignedMockTx(
|
||||
tx, err := sims.GenSignedMockTx(
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
txConfig,
|
||||
tc.msgs,
|
||||
sdk.NewCoins(),
|
||||
helpers.DefaultGenTxGas,
|
||||
sims.DefaultGenTxGas,
|
||||
"testing-chain-id",
|
||||
[]uint64{0},
|
||||
[]uint64{0},
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
|
||||
"github.com/cosmos/cosmos-sdk/simapp/helpers"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/tx/signing"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
|
||||
@ -20,6 +20,11 @@ import (
|
||||
ethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
"github.com/cometbft/cometbft/crypto/tmhash"
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
tmversion "github.com/cometbft/cometbft/proto/tendermint/version"
|
||||
"github.com/cometbft/cometbft/version"
|
||||
"github.com/evmos/ethermint/crypto/ethsecp256k1"
|
||||
"github.com/evmos/ethermint/ethereum/eip712"
|
||||
"github.com/evmos/ethermint/tests"
|
||||
@ -27,11 +32,6 @@ import (
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
|
||||
"github.com/stretchr/testify/suite"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
@ -42,7 +42,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ChainID = "kavatest_1-1"
|
||||
ChainID = app.TestChainId
|
||||
USDCCoinDenom = "erc20/usdc"
|
||||
USDCCDPType = "erc20-usdc"
|
||||
)
|
||||
@ -137,6 +137,7 @@ func (suite *EIP712TestSuite) createTestEIP712CosmosTxBuilder(
|
||||
func (suite *EIP712TestSuite) SetupTest() {
|
||||
tApp := app.NewTestApp()
|
||||
suite.tApp = tApp
|
||||
|
||||
cdc := tApp.AppCodec()
|
||||
suite.evmutilKeeper = tApp.GetEvmutilKeeper()
|
||||
|
||||
@ -290,6 +291,11 @@ func (suite *EIP712TestSuite) SetupTest() {
|
||||
)
|
||||
suite.usdcEVMAddr = pair.GetAddress()
|
||||
|
||||
// update consensus params
|
||||
cParams := tApp.GetConsensusParams(suite.ctx)
|
||||
cParams.Block.MaxGas = sims.DefaultGenTxGas * 20
|
||||
tApp.StoreConsensusParams(suite.ctx, cParams)
|
||||
|
||||
// Add a contract to evmutil conversion pair
|
||||
evmutilParams := suite.evmutilKeeper.GetParams(suite.ctx)
|
||||
evmutilParams.EnabledConversionPairs =
|
||||
@ -399,7 +405,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
|
||||
// usdxToMintAmt: 99,
|
||||
// },
|
||||
{
|
||||
name: "fails when convertion more erc20 usdc than balance",
|
||||
name: "fails when conversion more erc20 usdc than balance",
|
||||
usdcDepositAmt: 51_000,
|
||||
usdxToMintAmt: 100,
|
||||
errMsg: "transfer amount exceeds balance",
|
||||
@ -455,7 +461,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
|
||||
var option *codectypes.Any
|
||||
option, _ = codectypes.NewAnyWithValue(ðerminttypes.ExtensionOptionsWeb3Tx{
|
||||
FeePayer: suite.testAddr.String(),
|
||||
TypedDataChainID: 1,
|
||||
TypedDataChainID: 2221,
|
||||
FeePayerSig: []byte("sig"),
|
||||
})
|
||||
builder, _ := txBuilder.(authtx.ExtensionOptionsTxBuilder)
|
||||
@ -484,7 +490,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
|
||||
updateTx: func(txBuilder client.TxBuilder, msgs []sdk.Msg) client.TxBuilder {
|
||||
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
|
||||
return suite.createTestEIP712CosmosTxBuilder(
|
||||
suite.testAddr, suite.testPrivKey, "kavatest_12-1", uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
|
||||
suite.testAddr, suite.testPrivKey, "kavatest_12-1", uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -497,7 +503,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
|
||||
updateTx: func(txBuilder client.TxBuilder, msgs []sdk.Msg) client.TxBuilder {
|
||||
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
|
||||
return suite.createTestEIP712CosmosTxBuilder(
|
||||
suite.testAddr2, suite.testPrivKey2, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
|
||||
suite.testAddr2, suite.testPrivKey2, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -525,7 +531,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
|
||||
|
||||
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
|
||||
txBuilder := suite.createTestEIP712CosmosTxBuilder(
|
||||
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
|
||||
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
|
||||
)
|
||||
if tc.updateTx != nil {
|
||||
txBuilder = tc.updateTx(txBuilder, msgs)
|
||||
@ -599,7 +605,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx_DepositAndWithdraw() {
|
||||
// deliver deposit msg
|
||||
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
|
||||
txBuilder := suite.createTestEIP712CosmosTxBuilder(
|
||||
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, depositMsgs,
|
||||
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, depositMsgs,
|
||||
)
|
||||
txBytes, err := encodingConfig.TxConfig.TxEncoder()(txBuilder.GetTx())
|
||||
suite.Require().NoError(err)
|
||||
@ -633,7 +639,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx_DepositAndWithdraw() {
|
||||
|
||||
// deliver withdraw msg
|
||||
txBuilder = suite.createTestEIP712CosmosTxBuilder(
|
||||
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, withdrawMsgs,
|
||||
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, withdrawMsgs,
|
||||
)
|
||||
txBytes, err = encodingConfig.TxConfig.TxEncoder()(txBuilder.GetTx())
|
||||
suite.Require().NoError(err)
|
||||
|
@ -4,12 +4,12 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
tmtime "github.com/cometbft/cometbft/types/time"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/app/ante"
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/simapp/helpers"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
@ -73,14 +73,14 @@ func TestVestingMempoolDecorator_MsgCreateVestingAccount_Unauthorized(t *testing
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tx, err := helpers.GenSignedMockTx(
|
||||
tx, err := sims.GenSignedMockTx(
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
txConfig,
|
||||
[]sdk.Msg{
|
||||
tt.msg,
|
||||
},
|
||||
sdk.NewCoins(),
|
||||
helpers.DefaultGenTxGas,
|
||||
sims.DefaultGenTxGas,
|
||||
"testing-chain-id",
|
||||
[]uint64{0},
|
||||
[]uint64{0},
|
||||
|
318
app/app.go
318
app/app.go
@ -5,6 +5,10 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
tmjson "github.com/cometbft/cometbft/libs/json"
|
||||
tmlog "github.com/cometbft/cometbft/libs/log"
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node"
|
||||
@ -20,6 +24,7 @@ import (
|
||||
"github.com/cosmos/cosmos-sdk/version"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth"
|
||||
authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
|
||||
authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
|
||||
authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
|
||||
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/vesting"
|
||||
@ -34,11 +39,13 @@ import (
|
||||
"github.com/cosmos/cosmos-sdk/x/capability"
|
||||
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
|
||||
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
|
||||
consensus "github.com/cosmos/cosmos-sdk/x/consensus"
|
||||
consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
|
||||
consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/crisis"
|
||||
crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper"
|
||||
crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types"
|
||||
distr "github.com/cosmos/cosmos-sdk/x/distribution"
|
||||
distrclient "github.com/cosmos/cosmos-sdk/x/distribution/client"
|
||||
distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
|
||||
distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/evidence"
|
||||
@ -70,16 +77,25 @@ import (
|
||||
upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client"
|
||||
upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper"
|
||||
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
|
||||
transfer "github.com/cosmos/ibc-go/v6/modules/apps/transfer"
|
||||
ibctransferkeeper "github.com/cosmos/ibc-go/v6/modules/apps/transfer/keeper"
|
||||
ibctransfertypes "github.com/cosmos/ibc-go/v6/modules/apps/transfer/types"
|
||||
ibc "github.com/cosmos/ibc-go/v6/modules/core"
|
||||
ibcclient "github.com/cosmos/ibc-go/v6/modules/core/02-client"
|
||||
ibcclientclient "github.com/cosmos/ibc-go/v6/modules/core/02-client/client"
|
||||
ibcclienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
|
||||
porttypes "github.com/cosmos/ibc-go/v6/modules/core/05-port/types"
|
||||
ibchost "github.com/cosmos/ibc-go/v6/modules/core/24-host"
|
||||
ibckeeper "github.com/cosmos/ibc-go/v6/modules/core/keeper"
|
||||
"github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward"
|
||||
packetforwardkeeper "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward/keeper"
|
||||
packetforwardtypes "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward/types"
|
||||
ibcwasm "github.com/cosmos/ibc-go/modules/light-clients/08-wasm"
|
||||
ibcwasmkeeper "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/keeper"
|
||||
ibcwasmtypes "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/types"
|
||||
transfer "github.com/cosmos/ibc-go/v7/modules/apps/transfer"
|
||||
ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper"
|
||||
ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types"
|
||||
ibc "github.com/cosmos/ibc-go/v7/modules/core"
|
||||
ibcclient "github.com/cosmos/ibc-go/v7/modules/core/02-client"
|
||||
ibcclientclient "github.com/cosmos/ibc-go/v7/modules/core/02-client/client"
|
||||
ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types"
|
||||
ibcporttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types"
|
||||
ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported"
|
||||
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
|
||||
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
|
||||
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
evmante "github.com/evmos/ethermint/app/ante"
|
||||
ethermintconfig "github.com/evmos/ethermint/server/config"
|
||||
"github.com/evmos/ethermint/x/evm"
|
||||
@ -90,11 +106,6 @@ import (
|
||||
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/0glabs/0g-chain/app/ante"
|
||||
chainparams "github.com/0glabs/0g-chain/app/params"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
@ -119,6 +130,9 @@ import (
|
||||
issuance "github.com/0glabs/0g-chain/x/issuance"
|
||||
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
|
||||
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
|
||||
"github.com/0glabs/0g-chain/x/precisebank"
|
||||
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
|
||||
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
|
||||
pricefeed "github.com/0glabs/0g-chain/x/pricefeed"
|
||||
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
|
||||
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
|
||||
@ -126,14 +140,13 @@ import (
|
||||
validatorvestingrest "github.com/0glabs/0g-chain/x/validator-vesting/client/rest"
|
||||
validatorvestingtypes "github.com/0glabs/0g-chain/x/validator-vesting/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
|
||||
var (
|
||||
// ModuleBasics manages simple versions of full app modules.
|
||||
// It's used for things such as codec registration and genesis file verification.
|
||||
ModuleBasics = module.NewBasicManager(
|
||||
genutil.AppModuleBasic{},
|
||||
genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
|
||||
auth.AppModuleBasic{},
|
||||
bank.AppModuleBasic{},
|
||||
capability.AppModuleBasic{},
|
||||
@ -141,7 +154,6 @@ var (
|
||||
distr.AppModuleBasic{},
|
||||
gov.NewAppModuleBasic([]govclient.ProposalHandler{
|
||||
paramsclient.ProposalHandler,
|
||||
distrclient.ProposalHandler,
|
||||
upgradeclient.LegacyProposalHandler,
|
||||
upgradeclient.LegacyCancelProposalHandler,
|
||||
ibcclientclient.UpdateClientProposalHandler,
|
||||
@ -152,6 +164,9 @@ var (
|
||||
crisis.AppModuleBasic{},
|
||||
slashing.AppModuleBasic{},
|
||||
ibc.AppModuleBasic{},
|
||||
ibctm.AppModuleBasic{},
|
||||
solomachine.AppModuleBasic{},
|
||||
packetforward.AppModuleBasic{},
|
||||
upgrade.AppModuleBasic{},
|
||||
evidence.AppModuleBasic{},
|
||||
authzmodule.AppModuleBasic{},
|
||||
@ -166,8 +181,11 @@ var (
|
||||
validatorvesting.AppModuleBasic{},
|
||||
evmutil.AppModuleBasic{},
|
||||
mint.AppModuleBasic{},
|
||||
precisebank.AppModuleBasic{},
|
||||
council.AppModuleBasic{},
|
||||
dasigners.AppModuleBasic{},
|
||||
consensus.AppModuleBasic{},
|
||||
ibcwasm.AppModuleBasic{},
|
||||
)
|
||||
|
||||
// module account permissions
|
||||
@ -185,13 +203,13 @@ var (
|
||||
issuancetypes.ModuleAccountName: {authtypes.Minter, authtypes.Burner},
|
||||
bep3types.ModuleName: {authtypes.Burner, authtypes.Minter},
|
||||
minttypes.ModuleName: {authtypes.Minter},
|
||||
precisebanktypes.ModuleName: {authtypes.Minter, authtypes.Burner}, // used for reserve account to back fractional amounts
|
||||
}
|
||||
)
|
||||
|
||||
// Verify app interface at compile time
|
||||
var (
|
||||
_ servertypes.Application = (*App)(nil)
|
||||
_ servertypes.ApplicationQueryService = (*App)(nil)
|
||||
_ servertypes.Application = (*App)(nil)
|
||||
)
|
||||
|
||||
// Options bundles several configuration params for an App.
|
||||
@ -227,31 +245,35 @@ type App struct {
|
||||
memKeys map[string]*storetypes.MemoryStoreKey
|
||||
|
||||
// keepers from all the modules
|
||||
accountKeeper authkeeper.AccountKeeper
|
||||
bankKeeper bankkeeper.Keeper
|
||||
capabilityKeeper *capabilitykeeper.Keeper
|
||||
stakingKeeper stakingkeeper.Keeper
|
||||
distrKeeper distrkeeper.Keeper
|
||||
govKeeper govkeeper.Keeper
|
||||
paramsKeeper paramskeeper.Keeper
|
||||
authzKeeper authzkeeper.Keeper
|
||||
crisisKeeper crisiskeeper.Keeper
|
||||
slashingKeeper slashingkeeper.Keeper
|
||||
ibcKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
|
||||
evmKeeper *evmkeeper.Keeper
|
||||
evmutilKeeper evmutilkeeper.Keeper
|
||||
feeMarketKeeper feemarketkeeper.Keeper
|
||||
upgradeKeeper upgradekeeper.Keeper
|
||||
evidenceKeeper evidencekeeper.Keeper
|
||||
transferKeeper ibctransferkeeper.Keeper
|
||||
CouncilKeeper councilkeeper.Keeper
|
||||
issuanceKeeper issuancekeeper.Keeper
|
||||
bep3Keeper bep3keeper.Keeper
|
||||
pricefeedKeeper pricefeedkeeper.Keeper
|
||||
committeeKeeper committeekeeper.Keeper
|
||||
vestingKeeper vestingkeeper.VestingKeeper
|
||||
mintKeeper mintkeeper.Keeper
|
||||
dasignersKeeper dasignerskeeper.Keeper
|
||||
accountKeeper authkeeper.AccountKeeper
|
||||
bankKeeper bankkeeper.Keeper
|
||||
capabilityKeeper *capabilitykeeper.Keeper
|
||||
stakingKeeper *stakingkeeper.Keeper
|
||||
distrKeeper distrkeeper.Keeper
|
||||
govKeeper govkeeper.Keeper
|
||||
paramsKeeper paramskeeper.Keeper
|
||||
authzKeeper authzkeeper.Keeper
|
||||
crisisKeeper crisiskeeper.Keeper
|
||||
slashingKeeper slashingkeeper.Keeper
|
||||
ibcWasmClientKeeper ibcwasmkeeper.Keeper
|
||||
ibcKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
|
||||
packetForwardKeeper *packetforwardkeeper.Keeper
|
||||
evmKeeper *evmkeeper.Keeper
|
||||
evmutilKeeper evmutilkeeper.Keeper
|
||||
feeMarketKeeper feemarketkeeper.Keeper
|
||||
upgradeKeeper upgradekeeper.Keeper
|
||||
evidenceKeeper evidencekeeper.Keeper
|
||||
transferKeeper ibctransferkeeper.Keeper
|
||||
CouncilKeeper councilkeeper.Keeper
|
||||
issuanceKeeper issuancekeeper.Keeper
|
||||
bep3Keeper bep3keeper.Keeper
|
||||
pricefeedKeeper pricefeedkeeper.Keeper
|
||||
committeeKeeper committeekeeper.Keeper
|
||||
vestingKeeper vestingkeeper.VestingKeeper
|
||||
mintKeeper mintkeeper.Keeper
|
||||
dasignersKeeper dasignerskeeper.Keeper
|
||||
consensusParamsKeeper consensusparamkeeper.Keeper
|
||||
precisebankKeeper precisebankkeeper.Keeper
|
||||
|
||||
// make scoped keepers public for test purposes
|
||||
ScopedIBCKeeper capabilitykeeper.ScopedKeeper
|
||||
@ -291,8 +313,8 @@ func NewApp(
|
||||
|
||||
keys := sdk.NewKVStoreKeys(
|
||||
authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey,
|
||||
distrtypes.StoreKey, slashingtypes.StoreKey,
|
||||
govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey,
|
||||
distrtypes.StoreKey, slashingtypes.StoreKey, packetforwardtypes.StoreKey,
|
||||
govtypes.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey,
|
||||
upgradetypes.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey,
|
||||
evmtypes.StoreKey, feemarkettypes.StoreKey, authzkeeper.StoreKey,
|
||||
capabilitytypes.StoreKey,
|
||||
@ -302,12 +324,15 @@ func NewApp(
|
||||
counciltypes.StoreKey,
|
||||
dasignerstypes.StoreKey,
|
||||
vestingtypes.StoreKey,
|
||||
consensusparamtypes.StoreKey, crisistypes.StoreKey, precisebanktypes.StoreKey,
|
||||
ibcwasmtypes.StoreKey,
|
||||
)
|
||||
tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey)
|
||||
memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey)
|
||||
|
||||
// Authority for gov proposals, using the x/gov module account address
|
||||
govAuthorityAddr := authtypes.NewModuleAddress(govtypes.ModuleName)
|
||||
govAuthAddr := authtypes.NewModuleAddress(govtypes.ModuleName)
|
||||
govAuthAddrStr := govAuthAddr.String()
|
||||
|
||||
app := &App{
|
||||
BaseApp: bApp,
|
||||
@ -336,18 +361,20 @@ func NewApp(
|
||||
issuanceSubspace := app.paramsKeeper.Subspace(issuancetypes.ModuleName)
|
||||
bep3Subspace := app.paramsKeeper.Subspace(bep3types.ModuleName)
|
||||
pricefeedSubspace := app.paramsKeeper.Subspace(pricefeedtypes.ModuleName)
|
||||
ibcSubspace := app.paramsKeeper.Subspace(ibchost.ModuleName)
|
||||
ibcSubspace := app.paramsKeeper.Subspace(ibcexported.ModuleName)
|
||||
ibctransferSubspace := app.paramsKeeper.Subspace(ibctransfertypes.ModuleName)
|
||||
packetforwardSubspace := app.paramsKeeper.Subspace(packetforwardtypes.ModuleName).WithKeyTable(packetforwardtypes.ParamKeyTable())
|
||||
feemarketSubspace := app.paramsKeeper.Subspace(feemarkettypes.ModuleName)
|
||||
evmSubspace := app.paramsKeeper.Subspace(evmtypes.ModuleName)
|
||||
evmutilSubspace := app.paramsKeeper.Subspace(evmutiltypes.ModuleName)
|
||||
mintSubspace := app.paramsKeeper.Subspace(minttypes.ModuleName)
|
||||
|
||||
bApp.SetParamStore(
|
||||
app.paramsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()),
|
||||
)
|
||||
// set the BaseApp's parameter store
|
||||
app.consensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, keys[consensusparamtypes.StoreKey], govAuthAddrStr)
|
||||
bApp.SetParamStore(&app.consensusParamsKeeper)
|
||||
|
||||
app.capabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey])
|
||||
scopedIBCKeeper := app.capabilityKeeper.ScopeToModule(ibchost.ModuleName)
|
||||
scopedIBCKeeper := app.capabilityKeeper.ScopeToModule(ibcexported.ModuleName)
|
||||
scopedTransferKeeper := app.capabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName)
|
||||
app.capabilityKeeper.Seal()
|
||||
|
||||
@ -355,17 +382,17 @@ func NewApp(
|
||||
app.accountKeeper = authkeeper.NewAccountKeeper(
|
||||
appCodec,
|
||||
keys[authtypes.StoreKey],
|
||||
authSubspace,
|
||||
authtypes.ProtoBaseAccount,
|
||||
mAccPerms,
|
||||
sdk.GetConfig().GetBech32AccountAddrPrefix(),
|
||||
govAuthAddrStr,
|
||||
)
|
||||
app.bankKeeper = bankkeeper.NewBaseKeeper(
|
||||
appCodec,
|
||||
keys[banktypes.StoreKey],
|
||||
app.accountKeeper,
|
||||
bankSubspace,
|
||||
app.loadBlockedMaccAddrs(),
|
||||
govAuthAddrStr,
|
||||
)
|
||||
app.vestingKeeper = vestingkeeper.NewVestingKeeper(app.accountKeeper, app.bankKeeper, keys[vestingtypes.StoreKey])
|
||||
|
||||
@ -375,7 +402,7 @@ func NewApp(
|
||||
app.accountKeeper,
|
||||
app.bankKeeper,
|
||||
app.vestingKeeper,
|
||||
stakingSubspace,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
app.authzKeeper = authzkeeper.NewKeeper(
|
||||
keys[authzkeeper.StoreKey],
|
||||
@ -386,52 +413,68 @@ func NewApp(
|
||||
app.distrKeeper = distrkeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[distrtypes.StoreKey],
|
||||
distrSubspace,
|
||||
app.accountKeeper,
|
||||
app.bankKeeper,
|
||||
&app.stakingKeeper,
|
||||
app.stakingKeeper,
|
||||
authtypes.FeeCollectorName,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
app.slashingKeeper = slashingkeeper.NewKeeper(
|
||||
appCodec,
|
||||
app.legacyAmino,
|
||||
keys[slashingtypes.StoreKey],
|
||||
&app.stakingKeeper,
|
||||
slashingSubspace,
|
||||
app.stakingKeeper,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
app.crisisKeeper = crisiskeeper.NewKeeper(
|
||||
crisisSubspace,
|
||||
app.crisisKeeper = *crisiskeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[crisistypes.StoreKey],
|
||||
options.InvariantCheckPeriod,
|
||||
app.bankKeeper,
|
||||
authtypes.FeeCollectorName,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
app.upgradeKeeper = upgradekeeper.NewKeeper(
|
||||
app.upgradeKeeper = *upgradekeeper.NewKeeper(
|
||||
options.SkipUpgradeHeights,
|
||||
keys[upgradetypes.StoreKey],
|
||||
appCodec,
|
||||
homePath,
|
||||
app.BaseApp,
|
||||
govAuthorityAddr.String(),
|
||||
govAuthAddrStr,
|
||||
)
|
||||
app.evidenceKeeper = *evidencekeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[evidencetypes.StoreKey],
|
||||
&app.stakingKeeper,
|
||||
app.stakingKeeper,
|
||||
app.slashingKeeper,
|
||||
)
|
||||
|
||||
app.ibcKeeper = ibckeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[ibchost.StoreKey],
|
||||
keys[ibcexported.StoreKey],
|
||||
ibcSubspace,
|
||||
app.stakingKeeper,
|
||||
app.upgradeKeeper,
|
||||
scopedIBCKeeper,
|
||||
)
|
||||
|
||||
app.ibcWasmClientKeeper = ibcwasmkeeper.NewKeeperWithConfig(
|
||||
appCodec,
|
||||
keys[ibcwasmtypes.StoreKey],
|
||||
app.ibcKeeper.ClientKeeper,
|
||||
authtypes.NewModuleAddress(govtypes.ModuleName).String(),
|
||||
ibcwasmtypes.WasmConfig{
|
||||
DataDir: "ibc_08-wasm",
|
||||
SupportedCapabilities: "iterator,stargate",
|
||||
ContractDebugMode: false,
|
||||
},
|
||||
app.GRPCQueryRouter(),
|
||||
)
|
||||
|
||||
// Create Ethermint keepers
|
||||
app.feeMarketKeeper = feemarketkeeper.NewKeeper(
|
||||
appCodec,
|
||||
govAuthorityAddr,
|
||||
govAuthAddr,
|
||||
keys[feemarkettypes.StoreKey],
|
||||
tkeys[feemarkettypes.TransientKey],
|
||||
feemarketSubspace,
|
||||
@ -445,9 +488,15 @@ func NewApp(
|
||||
app.accountKeeper,
|
||||
)
|
||||
|
||||
evmBankKeeper := evmutilkeeper.NewEvmBankKeeper(app.evmutilKeeper, app.bankKeeper, app.accountKeeper)
|
||||
app.precisebankKeeper = precisebankkeeper.NewKeeper(
|
||||
app.appCodec,
|
||||
keys[precisebanktypes.StoreKey],
|
||||
app.bankKeeper,
|
||||
app.accountKeeper,
|
||||
)
|
||||
|
||||
// dasigners keeper
|
||||
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper)
|
||||
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper, govAuthAddrStr)
|
||||
// precopmiles
|
||||
precompiles := make(map[common.Address]vm.PrecompiledContract)
|
||||
daSignersPrecompile, err := dasignersprecompile.NewDASignersPrecompile(app.dasignersKeeper)
|
||||
@ -455,35 +504,63 @@ func NewApp(
|
||||
panic("initialize precompile failed")
|
||||
}
|
||||
precompiles[daSignersPrecompile.Address()] = daSignersPrecompile
|
||||
// evm keeper
|
||||
|
||||
app.evmKeeper = evmkeeper.NewKeeper(
|
||||
appCodec, keys[evmtypes.StoreKey], tkeys[evmtypes.TransientKey],
|
||||
govAuthorityAddr,
|
||||
app.accountKeeper, evmBankKeeper, app.stakingKeeper, app.feeMarketKeeper,
|
||||
govAuthAddr,
|
||||
app.accountKeeper,
|
||||
app.precisebankKeeper, // x/precisebank in place of x/bank
|
||||
app.stakingKeeper,
|
||||
app.feeMarketKeeper,
|
||||
options.EVMTrace,
|
||||
evmSubspace,
|
||||
precompiles,
|
||||
)
|
||||
|
||||
app.evmutilKeeper.SetEvmKeeper(app.evmKeeper)
|
||||
|
||||
// It's important to note that the PFM Keeper must be initialized before the Transfer Keeper
|
||||
app.packetForwardKeeper = packetforwardkeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[packetforwardtypes.StoreKey],
|
||||
nil, // will be zero-value here, reference is set later on with SetTransferKeeper.
|
||||
app.ibcKeeper.ChannelKeeper,
|
||||
app.distrKeeper,
|
||||
app.bankKeeper,
|
||||
app.ibcKeeper.ChannelKeeper,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
|
||||
app.transferKeeper = ibctransferkeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[ibctransfertypes.StoreKey],
|
||||
ibctransferSubspace,
|
||||
app.ibcKeeper.ChannelKeeper,
|
||||
app.packetForwardKeeper,
|
||||
app.ibcKeeper.ChannelKeeper,
|
||||
&app.ibcKeeper.PortKeeper,
|
||||
app.accountKeeper,
|
||||
app.bankKeeper,
|
||||
scopedTransferKeeper,
|
||||
)
|
||||
app.packetForwardKeeper.SetTransferKeeper(app.transferKeeper)
|
||||
transferModule := transfer.NewAppModule(app.transferKeeper)
|
||||
transferIBCModule := transfer.NewIBCModule(app.transferKeeper)
|
||||
|
||||
// allow ibc packet forwarding for ibc transfers.
|
||||
// transfer stack contains (from top to bottom):
|
||||
// - Packet Forward Middleware
|
||||
// - Transfer
|
||||
var transferStack ibcporttypes.IBCModule
|
||||
transferStack = transfer.NewIBCModule(app.transferKeeper)
|
||||
transferStack = packetforward.NewIBCMiddleware(
|
||||
transferStack,
|
||||
app.packetForwardKeeper,
|
||||
0, // retries on timeout
|
||||
packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp,
|
||||
packetforwardkeeper.DefaultRefundTransferPacketTimeoutTimestamp,
|
||||
)
|
||||
|
||||
// Create static IBC router, add transfer route, then set and seal it
|
||||
ibcRouter := porttypes.NewRouter()
|
||||
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferIBCModule)
|
||||
ibcRouter := ibcporttypes.NewRouter()
|
||||
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack)
|
||||
app.ibcKeeper.SetRouter(ibcRouter)
|
||||
|
||||
app.issuanceKeeper = issuancekeeper.NewKeeper(
|
||||
@ -510,11 +587,11 @@ func NewApp(
|
||||
app.mintKeeper = mintkeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[minttypes.StoreKey],
|
||||
mintSubspace,
|
||||
app.stakingKeeper,
|
||||
app.accountKeeper,
|
||||
app.bankKeeper,
|
||||
authtypes.FeeCollectorName,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
|
||||
// create committee keeper with router
|
||||
@ -522,8 +599,7 @@ func NewApp(
|
||||
committeeGovRouter.
|
||||
AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler).
|
||||
AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)).
|
||||
AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)).
|
||||
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper))
|
||||
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(&app.upgradeKeeper))
|
||||
// Note: the committee proposal handler is not registered on the committee router. This means committees cannot create or update other committees.
|
||||
// Adding the committee proposal handler to the router is possible but awkward as the handler depends on the keeper which depends on the handler.
|
||||
app.committeeKeeper = committeekeeper.NewKeeper(
|
||||
@ -536,12 +612,11 @@ func NewApp(
|
||||
)
|
||||
|
||||
// register the staking hooks
|
||||
// NOTE: These keepers are passed by reference above, so they will contain these hooks.
|
||||
app.stakingKeeper = *(app.stakingKeeper.SetHooks(
|
||||
app.stakingKeeper.SetHooks(
|
||||
stakingtypes.NewMultiStakingHooks(
|
||||
app.distrKeeper.Hooks(),
|
||||
app.slashingKeeper.Hooks(),
|
||||
)))
|
||||
))
|
||||
|
||||
// create gov keeper with router
|
||||
// NOTE this must be done after any keepers referenced in the gov router (ie committee) are defined
|
||||
@ -549,27 +624,27 @@ func NewApp(
|
||||
govRouter.
|
||||
AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler).
|
||||
AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)).
|
||||
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper)).
|
||||
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(&app.upgradeKeeper)).
|
||||
AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.ibcKeeper.ClientKeeper)).
|
||||
AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)).
|
||||
AddRoute(committeetypes.RouterKey, committee.NewProposalHandler(app.committeeKeeper))
|
||||
|
||||
govConfig := govtypes.DefaultConfig()
|
||||
app.govKeeper = govkeeper.NewKeeper(
|
||||
govKeeper := govkeeper.NewKeeper(
|
||||
appCodec,
|
||||
keys[govtypes.StoreKey],
|
||||
govSubspace,
|
||||
app.accountKeeper,
|
||||
app.bankKeeper,
|
||||
&app.stakingKeeper,
|
||||
govRouter,
|
||||
app.stakingKeeper,
|
||||
app.MsgServiceRouter(),
|
||||
govConfig,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
govKeeper.SetLegacyRouter(govRouter)
|
||||
app.govKeeper = *govKeeper
|
||||
|
||||
// override x/gov tally handler with custom implementation
|
||||
tallyHandler := NewTallyHandler(
|
||||
app.govKeeper, app.stakingKeeper, app.bankKeeper,
|
||||
app.govKeeper, *app.stakingKeeper, app.bankKeeper,
|
||||
)
|
||||
app.govKeeper.SetTallyHandler(tallyHandler)
|
||||
|
||||
@ -581,22 +656,24 @@ func NewApp(
|
||||
// must be passed by reference here.)
|
||||
app.mm = module.NewManager(
|
||||
genutil.NewAppModule(app.accountKeeper, app.stakingKeeper, app.BaseApp.DeliverTx, encodingConfig.TxConfig),
|
||||
auth.NewAppModule(appCodec, app.accountKeeper, nil),
|
||||
bank.NewAppModule(appCodec, app.bankKeeper, app.accountKeeper),
|
||||
capability.NewAppModule(appCodec, *app.capabilityKeeper),
|
||||
staking.NewAppModule(appCodec, app.stakingKeeper, app.accountKeeper, app.bankKeeper),
|
||||
distr.NewAppModule(appCodec, app.distrKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper),
|
||||
gov.NewAppModule(appCodec, app.govKeeper, app.accountKeeper, app.bankKeeper),
|
||||
auth.NewAppModule(appCodec, app.accountKeeper, authsims.RandomGenesisAccounts, authSubspace),
|
||||
bank.NewAppModule(appCodec, app.bankKeeper, app.accountKeeper, bankSubspace),
|
||||
capability.NewAppModule(appCodec, *app.capabilityKeeper, false), // todo: confirm if this is okay to not be sealed
|
||||
staking.NewAppModule(appCodec, app.stakingKeeper, app.accountKeeper, app.bankKeeper, stakingSubspace),
|
||||
distr.NewAppModule(appCodec, app.distrKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper, distrSubspace),
|
||||
gov.NewAppModule(appCodec, &app.govKeeper, app.accountKeeper, app.bankKeeper, govSubspace),
|
||||
params.NewAppModule(app.paramsKeeper),
|
||||
crisis.NewAppModule(&app.crisisKeeper, options.SkipGenesisInvariants),
|
||||
slashing.NewAppModule(appCodec, app.slashingKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper),
|
||||
crisis.NewAppModule(&app.crisisKeeper, options.SkipGenesisInvariants, crisisSubspace),
|
||||
slashing.NewAppModule(appCodec, app.slashingKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper, slashingSubspace),
|
||||
consensus.NewAppModule(appCodec, app.consensusParamsKeeper),
|
||||
ibc.NewAppModule(app.ibcKeeper),
|
||||
packetforward.NewAppModule(app.packetForwardKeeper, packetforwardSubspace),
|
||||
evm.NewAppModule(app.evmKeeper, app.accountKeeper),
|
||||
feemarket.NewAppModule(app.feeMarketKeeper, feemarketSubspace),
|
||||
upgrade.NewAppModule(app.upgradeKeeper),
|
||||
upgrade.NewAppModule(&app.upgradeKeeper),
|
||||
evidence.NewAppModule(app.evidenceKeeper),
|
||||
transferModule,
|
||||
vesting.NewAppModule(app.accountKeeper, app.vestingKeeper),
|
||||
vesting.NewAppModule(app.accountKeeper, app.bankKeeper, app.vestingKeeper),
|
||||
authzmodule.NewAppModule(appCodec, app.authzKeeper, app.accountKeeper, app.bankKeeper, app.interfaceRegistry),
|
||||
issuance.NewAppModule(app.issuanceKeeper, app.accountKeeper, app.bankKeeper),
|
||||
bep3.NewAppModule(app.bep3Keeper, app.accountKeeper, app.bankKeeper),
|
||||
@ -605,9 +682,11 @@ func NewApp(
|
||||
committee.NewAppModule(app.committeeKeeper, app.accountKeeper),
|
||||
evmutil.NewAppModule(app.evmutilKeeper, app.bankKeeper, app.accountKeeper),
|
||||
// nil InflationCalculationFn, use SDK's default inflation function
|
||||
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, chaincfg.NextInflationRate),
|
||||
council.NewAppModule(app.CouncilKeeper, app.stakingKeeper),
|
||||
dasigners.NewAppModule(app.dasignersKeeper, app.stakingKeeper),
|
||||
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, nil, mintSubspace),
|
||||
precisebank.NewAppModule(app.precisebankKeeper, app.bankKeeper, app.accountKeeper),
|
||||
council.NewAppModule(app.CouncilKeeper),
|
||||
ibcwasm.NewAppModule(app.ibcWasmClientKeeper),
|
||||
dasigners.NewAppModule(app.dasignersKeeper, *app.stakingKeeper),
|
||||
)
|
||||
|
||||
// Warning: Some begin blockers must run before others. Ensure the dependencies are understood before modifying this list.
|
||||
@ -635,7 +714,7 @@ func NewApp(
|
||||
// It should be run before cdp begin blocker which cancels out debt with stable and starts more auctions.
|
||||
bep3types.ModuleName,
|
||||
issuancetypes.ModuleName,
|
||||
ibchost.ModuleName,
|
||||
ibcexported.ModuleName,
|
||||
// Add all remaining modules with an empty begin blocker below since cosmos 0.45.0 requires it
|
||||
vestingtypes.ModuleName,
|
||||
pricefeedtypes.ModuleName,
|
||||
@ -649,8 +728,11 @@ func NewApp(
|
||||
paramstypes.ModuleName,
|
||||
authz.ModuleName,
|
||||
evmutiltypes.ModuleName,
|
||||
|
||||
counciltypes.ModuleName,
|
||||
consensusparamtypes.ModuleName,
|
||||
packetforwardtypes.ModuleName,
|
||||
precisebanktypes.ModuleName,
|
||||
ibcwasmtypes.ModuleName,
|
||||
dasignerstypes.ModuleName,
|
||||
)
|
||||
|
||||
@ -673,7 +755,7 @@ func NewApp(
|
||||
upgradetypes.ModuleName,
|
||||
evidencetypes.ModuleName,
|
||||
vestingtypes.ModuleName,
|
||||
ibchost.ModuleName,
|
||||
ibcexported.ModuleName,
|
||||
validatorvestingtypes.ModuleName,
|
||||
authtypes.ModuleName,
|
||||
banktypes.ModuleName,
|
||||
@ -684,6 +766,10 @@ func NewApp(
|
||||
evmutiltypes.ModuleName,
|
||||
minttypes.ModuleName,
|
||||
counciltypes.ModuleName,
|
||||
consensusparamtypes.ModuleName,
|
||||
packetforwardtypes.ModuleName,
|
||||
precisebanktypes.ModuleName,
|
||||
ibcwasmtypes.ModuleName,
|
||||
dasignerstypes.ModuleName,
|
||||
)
|
||||
|
||||
@ -697,7 +783,7 @@ func NewApp(
|
||||
slashingtypes.ModuleName, // iterates over validators, run after staking
|
||||
govtypes.ModuleName,
|
||||
minttypes.ModuleName,
|
||||
ibchost.ModuleName,
|
||||
ibcexported.ModuleName,
|
||||
evidencetypes.ModuleName,
|
||||
authz.ModuleName,
|
||||
ibctransfertypes.ModuleName,
|
||||
@ -709,18 +795,21 @@ func NewApp(
|
||||
committeetypes.ModuleName,
|
||||
evmutiltypes.ModuleName,
|
||||
genutiltypes.ModuleName, // runs arbitrary txs included in genisis state, so run after modules have been initialized
|
||||
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
|
||||
// Add all remaining modules with an empty InitGenesis below since cosmos 0.45.0 requires it
|
||||
vestingtypes.ModuleName,
|
||||
paramstypes.ModuleName,
|
||||
upgradetypes.ModuleName,
|
||||
validatorvestingtypes.ModuleName,
|
||||
counciltypes.ModuleName,
|
||||
consensusparamtypes.ModuleName,
|
||||
packetforwardtypes.ModuleName,
|
||||
precisebanktypes.ModuleName, // Must be run after x/bank to verify reserve balance
|
||||
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
|
||||
ibcwasmtypes.ModuleName,
|
||||
dasignerstypes.ModuleName,
|
||||
)
|
||||
|
||||
app.mm.RegisterInvariants(&app.crisisKeeper)
|
||||
app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino)
|
||||
|
||||
app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
|
||||
app.RegisterServices(app.configurator)
|
||||
@ -792,6 +881,15 @@ func NewApp(
|
||||
}
|
||||
}
|
||||
|
||||
if manager := app.SnapshotManager(); manager != nil {
|
||||
err := manager.RegisterExtensions(
|
||||
ibcwasmkeeper.NewWasmSnapshotter(app.CommitMultiStore(), &app.ibcWasmClientKeeper),
|
||||
)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to register snapshot extension: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
app.ScopedIBCKeeper = scopedIBCKeeper
|
||||
app.ScopedTransferKeeper = scopedTransferKeeper
|
||||
|
||||
|
@ -3,22 +3,26 @@ package app
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
|
||||
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
db "github.com/tendermint/tm-db"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
db "github.com/cometbft/cometbft-db"
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
|
||||
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
|
||||
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
|
||||
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewApp(t *testing.T) {
|
||||
@ -36,7 +40,7 @@ func TestNewApp(t *testing.T) {
|
||||
func TestExport(t *testing.T) {
|
||||
chaincfg.SetSDKConfig()
|
||||
db := db.NewMemDB()
|
||||
app := NewApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, chaincfg.DefaultNodeHome, nil, MakeEncodingConfig(), DefaultOptions)
|
||||
app := NewApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, chaincfg.DefaultNodeHome, nil, MakeEncodingConfig(), DefaultOptions, baseapp.SetChainID(TestChainId))
|
||||
|
||||
genesisState := GenesisStateWithSingleValidator(&TestApp{App: *app}, NewDefaultGenesisState())
|
||||
|
||||
@ -45,21 +49,23 @@ func TestExport(t *testing.T) {
|
||||
|
||||
initRequest := abci.RequestInitChain{
|
||||
Time: time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
ChainId: "kavatest_1-1",
|
||||
ChainId: TestChainId,
|
||||
InitialHeight: 1,
|
||||
ConsensusParams: tmtypes.TM2PB.ConsensusParams(tmtypes.DefaultConsensusParams()),
|
||||
ConsensusParams: sims.DefaultConsensusParams,
|
||||
Validators: nil,
|
||||
AppStateBytes: stateBytes,
|
||||
}
|
||||
app.InitChain(initRequest)
|
||||
app.Commit()
|
||||
|
||||
exportedApp, err := app.ExportAppStateAndValidators(false, []string{})
|
||||
exportedApp, err := app.ExportAppStateAndValidators(false, []string{}, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assume each module is exported correctly, so only check modules in genesis are present in export
|
||||
initialModules, err := unmarshalJSONKeys(initRequest.AppStateBytes)
|
||||
require.NoError(t, err)
|
||||
// note ibctm is only registered in the BasicManager and not module manager so can be ignored
|
||||
initialModules = removeIbcTmModule(initialModules)
|
||||
exportedModules, err := unmarshalJSONKeys(exportedApp.AppState)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, initialModules, exportedModules)
|
||||
@ -143,3 +149,13 @@ func unmarshalJSONKeys(jsonBytes []byte) ([]string, error) {
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func removeIbcTmModule(modules []string) []string {
|
||||
var result []string
|
||||
for _, str := range modules {
|
||||
if str != ibctm.ModuleName && str != solomachine.ModuleName {
|
||||
result = append(result, str)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
|
||||
servertypes "github.com/cosmos/cosmos-sdk/server/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// ExportAppStateAndValidators export the state of the app for a genesis file
|
||||
func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []string,
|
||||
func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []string, modulesToExport []string,
|
||||
) (servertypes.ExportedApp, error) {
|
||||
// as if they could withdraw from the start of the next block
|
||||
// block time is not available and defaults to Jan 1st, 0001
|
||||
@ -26,7 +26,7 @@ func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []
|
||||
app.prepForZeroHeightGenesis(ctx, jailWhiteList)
|
||||
}
|
||||
|
||||
genState := app.mm.ExportGenesis(ctx, app.appCodec)
|
||||
genState := app.mm.ExportGenesisForModules(ctx, app.appCodec, modulesToExport)
|
||||
newAppState, err := json.MarshalIndent(genState, "", " ")
|
||||
if err != nil {
|
||||
return servertypes.ExportedApp{}, err
|
||||
|
@ -3,9 +3,9 @@ Package params defines the simulation parameters for the 0gChain app.
|
||||
|
||||
It contains the default weights used for each transaction used on the module's
|
||||
simulation. These weights define the chance for a transaction to be simulated at
|
||||
any gived operation.
|
||||
any given operation.
|
||||
|
||||
You can repace the default values for the weights by providing a params.json
|
||||
You can replace the default values for the weights by providing a params.json
|
||||
file with the weights defined for each of the transaction operations:
|
||||
|
||||
{
|
||||
|
@ -143,7 +143,7 @@ func (th TallyHandler) Tally(
|
||||
totalVotingPower = totalVotingPower.Add(votingPower)
|
||||
}
|
||||
|
||||
tallyParams := th.gk.GetTallyParams(ctx)
|
||||
tallyParams := th.gk.GetParams(ctx)
|
||||
tallyResults = govv1.NewTallyResultFromMap(results)
|
||||
|
||||
// TODO: Upgrade the spec to cover all of these cases & remove pseudocode.
|
||||
@ -155,7 +155,7 @@ func (th TallyHandler) Tally(
|
||||
// If there is not enough quorum of votes, the proposal fails
|
||||
percentVoting := totalVotingPower.Quo(sdk.NewDecFromInt(th.stk.TotalBondedTokens(ctx)))
|
||||
if percentVoting.LT(sdk.MustNewDecFromStr(tallyParams.Quorum)) {
|
||||
return false, true, tallyResults
|
||||
return false, tallyParams.BurnVoteQuorum, tallyResults
|
||||
}
|
||||
|
||||
// If no one votes (everyone abstains), proposal fails
|
||||
@ -165,7 +165,7 @@ func (th TallyHandler) Tally(
|
||||
|
||||
// If more than 1/3 of voters veto, proposal fails
|
||||
if results[govv1.OptionNoWithVeto].Quo(totalVotingPower).GT(sdk.MustNewDecFromStr(tallyParams.VetoThreshold)) {
|
||||
return false, true, tallyResults
|
||||
return false, tallyParams.BurnVoteVeto, tallyResults
|
||||
}
|
||||
|
||||
// If more than 1/2 of non-abstaining voters vote Yes, proposal passes
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
@ -15,7 +16,6 @@ import (
|
||||
stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
"github.com/stretchr/testify/suite"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
// d is an alias for sdk.MustNewDecFromStr
|
||||
@ -41,12 +41,13 @@ func (suite *tallyHandlerSuite) SetupTest() {
|
||||
genesisTime := time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
suite.ctx = suite.app.NewContext(false, tmproto.Header{Height: 1, Time: genesisTime})
|
||||
|
||||
suite.staking = stakingHelper{suite.app.GetStakingKeeper()}
|
||||
stakingKeeper := *suite.app.GetStakingKeeper()
|
||||
suite.staking = stakingHelper{stakingKeeper}
|
||||
suite.staking.setBondDenom(suite.ctx, "ukava")
|
||||
|
||||
suite.tallier = NewTallyHandler(
|
||||
suite.app.GetGovKeeper(),
|
||||
suite.app.GetStakingKeeper(),
|
||||
stakingKeeper,
|
||||
suite.app.GetBankKeeper(),
|
||||
)
|
||||
}
|
||||
@ -129,7 +130,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
|
||||
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
|
||||
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
|
||||
})
|
||||
suite.Run("VetoedFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -144,7 +145,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
|
||||
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
|
||||
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
|
||||
})
|
||||
suite.Run("UnvetoedAndYesAboveThresholdPasses", func() {
|
||||
suite.SetupTest()
|
||||
@ -161,7 +162,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Truef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
suite.Run("UnvetoedAndYesBelowThresholdFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -178,7 +179,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
suite.Run("NotEnoughStakeFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -190,7 +191,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
suite.Run("UnvetoedAndAllAbstainedFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -203,17 +204,18 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (suite *tallyHandlerSuite) setTallyParams(quorum, threshold, veto sdk.Dec) {
|
||||
suite.app.GetGovKeeper().SetTallyParams(suite.ctx, govv1.TallyParams{
|
||||
Quorum: quorum.String(),
|
||||
Threshold: threshold.String(),
|
||||
VetoThreshold: veto.String(),
|
||||
})
|
||||
params := suite.app.GetGovKeeper().GetParams(suite.ctx)
|
||||
params.Quorum = quorum.String()
|
||||
params.Threshold = threshold.String()
|
||||
params.VetoThreshold = veto.String()
|
||||
params.BurnVoteQuorum = true
|
||||
suite.app.GetGovKeeper().SetParams(suite.ctx, params)
|
||||
}
|
||||
|
||||
func (suite *tallyHandlerSuite) voteOnProposal(
|
||||
@ -234,7 +236,7 @@ func (suite *tallyHandlerSuite) voteOnProposal(
|
||||
|
||||
func (suite *tallyHandlerSuite) createProposal() govv1.Proposal {
|
||||
gk := suite.app.GetGovKeeper()
|
||||
deposit := gk.GetDepositParams(suite.ctx).MinDeposit
|
||||
deposit := gk.GetParams(suite.ctx).MinDeposit
|
||||
proposer := suite.createAccount(deposit...)
|
||||
|
||||
msg, err := govv1beta1.NewMsgSubmitProposal(
|
||||
@ -244,7 +246,7 @@ func (suite *tallyHandlerSuite) createProposal() govv1.Proposal {
|
||||
)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
msgServerv1 := govkeeper.NewMsgServerImpl(gk)
|
||||
msgServerv1 := govkeeper.NewMsgServerImpl(&gk)
|
||||
|
||||
govAcct := gk.GetGovernanceAccount(suite.ctx).GetAddress()
|
||||
msgServer := govkeeper.NewLegacyMsgServerImpl(govAcct.String(), msgServerv1)
|
||||
@ -364,7 +366,7 @@ func (h stakingHelper) createUnbondedValidator(ctx sdk.Context, address sdk.ValA
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgServer := stakingkeeper.NewMsgServerImpl(h.keeper)
|
||||
msgServer := stakingkeeper.NewMsgServerImpl(&h.keeper)
|
||||
_, err = msgServer.CreateValidator(sdk.WrapSDKContext(ctx), msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -384,7 +386,7 @@ func (h stakingHelper) delegate(ctx sdk.Context, delegator sdk.AccAddress, valid
|
||||
h.newBondCoin(ctx, amount),
|
||||
)
|
||||
|
||||
msgServer := stakingkeeper.NewMsgServerImpl(h.keeper)
|
||||
msgServer := stakingkeeper.NewMsgServerImpl(&h.keeper)
|
||||
_, err := msgServer.Delegate(sdk.WrapSDKContext(ctx), msg)
|
||||
if err != nil {
|
||||
return sdk.Dec{}, err
|
||||
|
@ -9,6 +9,12 @@ import (
|
||||
"time"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
dasignerskeeper "github.com/0glabs/0g-chain/x/dasigners/v1/keeper"
|
||||
tmdb "github.com/cometbft/cometbft-db"
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
@ -35,26 +41,23 @@ import (
|
||||
evmkeeper "github.com/evmos/ethermint/x/evm/keeper"
|
||||
feemarketkeeper "github.com/evmos/ethermint/x/feemarket/keeper"
|
||||
"github.com/stretchr/testify/require"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
tmdb "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
bep3keeper "github.com/0glabs/0g-chain/x/bep3/keeper"
|
||||
committeekeeper "github.com/0glabs/0g-chain/x/committee/keeper"
|
||||
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
|
||||
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
|
||||
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
|
||||
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyTime time.Time
|
||||
testChainID = "kavatest_1-1"
|
||||
defaultInitialHeight int64 = 1
|
||||
)
|
||||
|
||||
const TestChainId = "zgchain_8888-1"
|
||||
|
||||
// TestApp is a simple wrapper around an App. It exposes internal keepers for use in integration tests.
|
||||
// This file also contains test helpers. Ideally they would be in separate package.
|
||||
// Basic Usage:
|
||||
@ -89,32 +92,41 @@ func NewTestAppFromSealed() TestApp {
|
||||
|
||||
encCfg := MakeEncodingConfig()
|
||||
|
||||
app := NewApp(log.NewNopLogger(), db, chaincfg.DefaultNodeHome, nil, encCfg, DefaultOptions)
|
||||
app := NewApp(
|
||||
log.NewNopLogger(), db, chaincfg.DefaultNodeHome, nil,
|
||||
encCfg, DefaultOptions, baseapp.SetChainID(TestChainId),
|
||||
)
|
||||
return TestApp{App: *app}
|
||||
}
|
||||
|
||||
// nolint
|
||||
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
|
||||
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
|
||||
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
|
||||
func (tApp TestApp) GetStakingKeeper() stakingkeeper.Keeper { return tApp.stakingKeeper }
|
||||
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
|
||||
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
|
||||
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
|
||||
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
|
||||
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
|
||||
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
|
||||
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
|
||||
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
|
||||
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
|
||||
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
|
||||
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
|
||||
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
|
||||
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
|
||||
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
|
||||
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
|
||||
func (tApp TestApp) GetStakingKeeper() *stakingkeeper.Keeper { return tApp.stakingKeeper }
|
||||
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
|
||||
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
|
||||
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
|
||||
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
|
||||
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
|
||||
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
|
||||
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
|
||||
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
|
||||
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
|
||||
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
|
||||
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
|
||||
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
|
||||
func (tApp TestApp) GetDASignersKeeper() dasignerskeeper.Keeper { return tApp.dasignersKeeper }
|
||||
func (tApp TestApp) GetPrecisebankKeeper() precisebankkeeper.Keeper { return tApp.precisebankKeeper }
|
||||
|
||||
func (tApp TestApp) GetKVStoreKey(key string) *storetypes.KVStoreKey {
|
||||
return tApp.keys[key]
|
||||
}
|
||||
|
||||
func (tApp TestApp) GetBlockedMaccAddrs() map[string]bool {
|
||||
return tApp.loadBlockedMaccAddrs()
|
||||
}
|
||||
|
||||
// LegacyAmino returns the app's amino codec.
|
||||
func (app *App) LegacyAmino() *codec.LegacyAmino {
|
||||
return app.legacyAmino
|
||||
@ -246,6 +258,7 @@ func genesisStateWithValSet(
|
||||
balances,
|
||||
totalSupply,
|
||||
currentBankGenesis.DenomMetadata,
|
||||
currentBankGenesis.SendEnabled,
|
||||
)
|
||||
|
||||
// set genesis state
|
||||
@ -259,13 +272,13 @@ func genesisStateWithValSet(
|
||||
// InitializeFromGenesisStates calls InitChain on the app using the provided genesis states.
|
||||
// If any module genesis states are missing, defaults are used.
|
||||
func (tApp TestApp) InitializeFromGenesisStates(genesisStates ...GenesisState) TestApp {
|
||||
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(emptyTime, testChainID, defaultInitialHeight, true, genesisStates...)
|
||||
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(emptyTime, TestChainId, defaultInitialHeight, true, genesisStates...)
|
||||
}
|
||||
|
||||
// InitializeFromGenesisStatesWithTime calls InitChain on the app using the provided genesis states and time.
|
||||
// If any module genesis states are missing, defaults are used.
|
||||
func (tApp TestApp) InitializeFromGenesisStatesWithTime(genTime time.Time, genesisStates ...GenesisState) TestApp {
|
||||
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(genTime, testChainID, defaultInitialHeight, true, genesisStates...)
|
||||
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(genTime, TestChainId, defaultInitialHeight, true, genesisStates...)
|
||||
}
|
||||
|
||||
// InitializeFromGenesisStatesWithTimeAndChainID calls InitChain on the app using the provided genesis states, time, and chain id.
|
||||
@ -322,8 +335,8 @@ func (tApp TestApp) InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(
|
||||
AppStateBytes: stateBytes,
|
||||
ChainId: chainID,
|
||||
// Set consensus params, which is needed by x/feemarket
|
||||
ConsensusParams: &abci.ConsensusParams{
|
||||
Block: &abci.BlockParams{
|
||||
ConsensusParams: &tmproto.ConsensusParams{
|
||||
Block: &tmproto.BlockParams{
|
||||
MaxBytes: 200000,
|
||||
MaxGas: 20000000,
|
||||
},
|
||||
@ -458,7 +471,7 @@ func (tApp TestApp) SetInflation(ctx sdk.Context, value sdk.Dec) {
|
||||
mk.SetParams(ctx, mintParams)
|
||||
}
|
||||
|
||||
// GeneratePrivKeyAddressPairsFromRand generates (deterministically) a total of n private keys and addresses.
|
||||
// GeneratePrivKeyAddressPairs generates (deterministically) a total of n private keys and addresses.
|
||||
func GeneratePrivKeyAddressPairs(n int) (keys []cryptotypes.PrivKey, addrs []sdk.AccAddress) {
|
||||
r := rand.New(rand.NewSource(12345)) // make the generation deterministic
|
||||
keys = make([]cryptotypes.PrivKey, n)
|
||||
|
386
app/upgrades.go
386
app/upgrades.go
@ -4,86 +4,40 @@ import (
|
||||
"fmt"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
|
||||
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
|
||||
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
|
||||
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
|
||||
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
|
||||
storetypes "github.com/cosmos/cosmos-sdk/store/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/module"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
|
||||
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
|
||||
)
|
||||
|
||||
const (
|
||||
UpgradeName_Mainnet = "v0.25.0"
|
||||
UpgradeName_Testnet = "v0.25.0-alpha.0"
|
||||
UpgradeName_E2ETest = "v0.25.0-testing"
|
||||
)
|
||||
|
||||
var (
|
||||
// KAVA to ukava - 6 decimals
|
||||
kavaConversionFactor = sdk.NewInt(1000_000)
|
||||
secondsPerYear = sdk.NewInt(365 * 24 * 60 * 60)
|
||||
|
||||
// 10 Million KAVA per year in staking rewards, inflation disable time 2024-01-01T00:00:00 UTC
|
||||
// CommunityParams_Mainnet = communitytypes.NewParams(
|
||||
// time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
// // before switchover
|
||||
// sdkmath.LegacyZeroDec(),
|
||||
// // after switchover - 10M KAVA to ukava per year / seconds per year
|
||||
// sdkmath.LegacyNewDec(10_000_000).
|
||||
// MulInt(kavaConversionFactor).
|
||||
// QuoInt(secondsPerYear),
|
||||
// )
|
||||
|
||||
// Testnet -- 15 Trillion KAVA per year in staking rewards, inflation disable time 2023-11-16T00:00:00 UTC
|
||||
// CommunityParams_Testnet = communitytypes.NewParams(
|
||||
// time.Date(2023, 11, 16, 0, 0, 0, 0, time.UTC),
|
||||
// // before switchover
|
||||
// sdkmath.LegacyZeroDec(),
|
||||
// // after switchover
|
||||
// sdkmath.LegacyNewDec(15_000_000).
|
||||
// MulInt64(1_000_000). // 15M * 1M = 15T
|
||||
// MulInt(kavaConversionFactor).
|
||||
// QuoInt(secondsPerYear),
|
||||
// )
|
||||
|
||||
// CommunityParams_E2E = communitytypes.NewParams(
|
||||
// time.Now().Add(10*time.Second).UTC(), // relative time for testing
|
||||
// sdkmath.LegacyNewDec(0), // stakingRewardsPerSecond
|
||||
// sdkmath.LegacyNewDec(1000), // upgradeTimeSetstakingRewardsPerSecond
|
||||
// )
|
||||
|
||||
// ValidatorMinimumCommission is the new 5% minimum commission rate for validators
|
||||
ValidatorMinimumCommission = sdk.NewDecWithPrec(5, 2)
|
||||
UpgradeName_Testnet = "v0.4.0"
|
||||
)
|
||||
|
||||
// RegisterUpgradeHandlers registers the upgrade handlers for the app.
|
||||
func (app App) RegisterUpgradeHandlers() {
|
||||
// app.upgradeKeeper.SetUpgradeHandler(
|
||||
// UpgradeName_Mainnet,
|
||||
// upgradeHandler(app, UpgradeName_Mainnet, CommunityParams_Mainnet),
|
||||
// )
|
||||
// app.upgradeKeeper.SetUpgradeHandler(
|
||||
// UpgradeName_Testnet,
|
||||
// upgradeHandler(app, UpgradeName_Testnet, CommunityParams_Testnet),
|
||||
// )
|
||||
// app.upgradeKeeper.SetUpgradeHandler(
|
||||
// UpgradeName_E2ETest,
|
||||
// upgradeHandler(app, UpgradeName_Testnet, CommunityParams_E2E),
|
||||
// )
|
||||
app.upgradeKeeper.SetUpgradeHandler(
|
||||
UpgradeName_Testnet,
|
||||
upgradeHandler(app, UpgradeName_Testnet),
|
||||
)
|
||||
|
||||
upgradeInfo, err := app.upgradeKeeper.ReadUpgradeInfoFromDisk()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
doUpgrade := upgradeInfo.Name == UpgradeName_Mainnet ||
|
||||
upgradeInfo.Name == UpgradeName_Testnet ||
|
||||
upgradeInfo.Name == UpgradeName_E2ETest
|
||||
doUpgrade := upgradeInfo.Name == UpgradeName_Testnet
|
||||
|
||||
if doUpgrade && !app.upgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
|
||||
storeUpgrades := storetypes.StoreUpgrades{
|
||||
Added: []string{
|
||||
// x/community added store
|
||||
// communitytypes.ModuleName,
|
||||
precisebanktypes.ModuleName,
|
||||
},
|
||||
}
|
||||
|
||||
@ -96,163 +50,219 @@ func (app App) RegisterUpgradeHandlers() {
|
||||
func upgradeHandler(
|
||||
app App,
|
||||
name string,
|
||||
// communityParams communitytypes.Params,
|
||||
) upgradetypes.UpgradeHandler {
|
||||
return func(
|
||||
ctx sdk.Context,
|
||||
plan upgradetypes.Plan,
|
||||
fromVM module.VersionMap,
|
||||
) (module.VersionMap, error) {
|
||||
app.Logger().Info(fmt.Sprintf("running %s upgrade handler", name))
|
||||
logger := app.Logger()
|
||||
logger.Info(fmt.Sprintf("running %s upgrade handler", name))
|
||||
|
||||
toVM, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
|
||||
// Run migrations for all modules and return new consensus version map.
|
||||
versionMap, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
|
||||
if err != nil {
|
||||
return toVM, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// Staking validator minimum commission
|
||||
//
|
||||
UpdateValidatorMinimumCommission(ctx, app)
|
||||
logger.Info("completed store migrations")
|
||||
|
||||
//
|
||||
// Community Params
|
||||
//
|
||||
// app.communityKeeper.SetParams(ctx, communityParams)
|
||||
// app.Logger().Info(
|
||||
// "initialized x/community params",
|
||||
// "UpgradeTimeDisableInflation", communityParams.UpgradeTimeDisableInflation,
|
||||
// "StakingRewardsPerSecond", communityParams.StakingRewardsPerSecond,
|
||||
// "UpgradeTimeSetStakingRewardsPerSecond", communityParams.UpgradeTimeSetStakingRewardsPerSecond,
|
||||
// )
|
||||
// Migration of fractional balances from x/evmutil to x/precisebank
|
||||
if err := MigrateEvmutilToPrecisebank(
|
||||
ctx,
|
||||
app.accountKeeper,
|
||||
app.bankKeeper,
|
||||
app.evmutilKeeper,
|
||||
app.precisebankKeeper,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// Kavadist gov grant
|
||||
//
|
||||
// msgGrant, err := authz.NewMsgGrant(
|
||||
// app.accountKeeper.GetModuleAddress(kavadisttypes.ModuleName), // granter
|
||||
// app.accountKeeper.GetModuleAddress(govtypes.ModuleName), // grantee
|
||||
// authz.NewGenericAuthorization(sdk.MsgTypeURL(&banktypes.MsgSend{})), // authorization
|
||||
// nil, // expiration
|
||||
// )
|
||||
// if err != nil {
|
||||
// return toVM, err
|
||||
// }
|
||||
// _, err = app.authzKeeper.Grant(ctx, msgGrant)
|
||||
// if err != nil {
|
||||
// return toVM, err
|
||||
// }
|
||||
// app.Logger().Info("created gov grant for kavadist funds")
|
||||
logger.Info("completed x/evmutil to x/precisebank migration")
|
||||
|
||||
//
|
||||
// Gov Quorum
|
||||
//
|
||||
govTallyParams := app.govKeeper.GetTallyParams(ctx)
|
||||
oldQuorum := govTallyParams.Quorum
|
||||
govTallyParams.Quorum = sdkmath.LegacyMustNewDecFromStr("0.2").String()
|
||||
app.govKeeper.SetTallyParams(ctx, govTallyParams)
|
||||
app.Logger().Info(fmt.Sprintf("updated tally quorum from %s to %s", oldQuorum, govTallyParams.Quorum))
|
||||
|
||||
//
|
||||
// Incentive Params
|
||||
//
|
||||
UpdateIncentiveParams(ctx, app)
|
||||
|
||||
return toVM, nil
|
||||
return versionMap, nil
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateValidatorMinimumCommission updates the commission rate for all
|
||||
// validators to be at least the new min commission rate, and sets the minimum
|
||||
// commission rate in the staking params.
|
||||
func UpdateValidatorMinimumCommission(
|
||||
// MigrateEvmutilToPrecisebank migrates all required state from x/evmutil to
|
||||
// x/precisebank and ensures the resulting state is correct.
|
||||
// This migrates the following state:
|
||||
// - Fractional balances
|
||||
// - Fractional balance reserve
|
||||
// Initializes the following state in x/precisebank:
|
||||
// - Remainder amount
|
||||
func MigrateEvmutilToPrecisebank(
|
||||
ctx sdk.Context,
|
||||
app App,
|
||||
) {
|
||||
resultCount := make(map[stakingtypes.BondStatus]int)
|
||||
accountKeeper evmutiltypes.AccountKeeper,
|
||||
bankKeeper bankkeeper.Keeper,
|
||||
evmutilKeeper evmutilkeeper.Keeper,
|
||||
precisebankKeeper precisebankkeeper.Keeper,
|
||||
) error {
|
||||
logger := ctx.Logger()
|
||||
|
||||
// Iterate over *all* validators including inactive
|
||||
app.stakingKeeper.IterateValidators(
|
||||
aggregateSum, err := TransferFractionalBalances(
|
||||
ctx,
|
||||
func(index int64, validator stakingtypes.ValidatorI) (stop bool) {
|
||||
// Skip if validator commission is already >= 5%
|
||||
if validator.GetCommission().GTE(ValidatorMinimumCommission) {
|
||||
return false
|
||||
}
|
||||
|
||||
val, ok := validator.(stakingtypes.Validator)
|
||||
if !ok {
|
||||
panic("expected stakingtypes.Validator")
|
||||
}
|
||||
|
||||
// Set minimum commission rate to 5%, when commission is < 5%
|
||||
val.Commission.Rate = ValidatorMinimumCommission
|
||||
val.Commission.UpdateTime = ctx.BlockTime()
|
||||
|
||||
// Update MaxRate if necessary
|
||||
if val.Commission.MaxRate.LT(ValidatorMinimumCommission) {
|
||||
val.Commission.MaxRate = ValidatorMinimumCommission
|
||||
}
|
||||
|
||||
if err := app.stakingKeeper.BeforeValidatorModified(ctx, val.GetOperator()); err != nil {
|
||||
panic(fmt.Sprintf("failed to call BeforeValidatorModified: %s", err))
|
||||
}
|
||||
app.stakingKeeper.SetValidator(ctx, val)
|
||||
|
||||
// Keep track of counts just for logging purposes
|
||||
switch val.GetStatus() {
|
||||
case stakingtypes.Bonded:
|
||||
resultCount[stakingtypes.Bonded]++
|
||||
case stakingtypes.Unbonded:
|
||||
resultCount[stakingtypes.Unbonded]++
|
||||
case stakingtypes.Unbonding:
|
||||
resultCount[stakingtypes.Unbonding]++
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
evmutilKeeper,
|
||||
precisebankKeeper,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fractional balances transfer: %w", err)
|
||||
}
|
||||
logger.Info(
|
||||
"fractional balances transferred from x/evmutil to x/precisebank",
|
||||
"aggregate sum", aggregateSum,
|
||||
)
|
||||
|
||||
app.Logger().Info(
|
||||
"updated validator minimum commission rate for all existing validators",
|
||||
stakingtypes.BondStatusBonded, resultCount[stakingtypes.Bonded],
|
||||
stakingtypes.BondStatusUnbonded, resultCount[stakingtypes.Unbonded],
|
||||
stakingtypes.BondStatusUnbonding, resultCount[stakingtypes.Unbonding],
|
||||
)
|
||||
remainder := InitializeRemainder(ctx, precisebankKeeper, aggregateSum)
|
||||
logger.Info("remainder amount initialized in x/precisebank", "remainder", remainder)
|
||||
|
||||
stakingParams := app.stakingKeeper.GetParams(ctx)
|
||||
stakingParams.MinCommissionRate = ValidatorMinimumCommission
|
||||
app.stakingKeeper.SetParams(ctx, stakingParams)
|
||||
// Migrate fractional balances, reserve, and ensure reserve fully backs all
|
||||
// fractional balances.
|
||||
if err := TransferFractionalBalanceReserve(
|
||||
ctx,
|
||||
accountKeeper,
|
||||
bankKeeper,
|
||||
precisebankKeeper,
|
||||
); err != nil {
|
||||
return fmt.Errorf("reserve transfer: %w", err)
|
||||
}
|
||||
|
||||
app.Logger().Info(
|
||||
"updated x/staking params minimum commission rate",
|
||||
"MinCommissionRate", stakingParams.MinCommissionRate,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateIncentiveParams modifies the earn rewards period for bkava to be 600K KAVA per year.
|
||||
func UpdateIncentiveParams(
|
||||
// TransferFractionalBalances migrates fractional balances from x/evmutil to
|
||||
// x/precisebank. It sets the fractional balance in x/precisebank and deletes
|
||||
// the account from x/evmutil. Returns the aggregate sum of all fractional
|
||||
// balances.
|
||||
func TransferFractionalBalances(
|
||||
ctx sdk.Context,
|
||||
app App,
|
||||
) {
|
||||
// incentiveParams := app.incentiveKeeper.GetParams(ctx)
|
||||
evmutilKeeper evmutilkeeper.Keeper,
|
||||
precisebankKeeper precisebankkeeper.Keeper,
|
||||
) (sdkmath.Int, error) {
|
||||
aggregateSum := sdkmath.ZeroInt()
|
||||
|
||||
// bkava annualized rewards: 600K KAVA
|
||||
// newAmount := sdkmath.LegacyNewDec(600_000).
|
||||
// MulInt(kavaConversionFactor).
|
||||
// QuoInt(secondsPerYear).
|
||||
// TruncateInt()
|
||||
var iterErr error
|
||||
|
||||
// for i := range incentiveParams.EarnRewardPeriods {
|
||||
// if incentiveParams.EarnRewardPeriods[i].CollateralType != "bkava" {
|
||||
// continue
|
||||
// }
|
||||
evmutilKeeper.IterateAllAccounts(ctx, func(acc evmutiltypes.Account) bool {
|
||||
// Set account balance in x/precisebank
|
||||
precisebankKeeper.SetFractionalBalance(ctx, acc.Address, acc.Balance)
|
||||
|
||||
// // Update rewards per second via index
|
||||
// incentiveParams.EarnRewardPeriods[i].RewardsPerSecond = sdk.NewCoins(
|
||||
// sdk.NewCoin("ukava", newAmount),
|
||||
// )
|
||||
// }
|
||||
// Delete account from x/evmutil
|
||||
iterErr := evmutilKeeper.SetAccount(ctx, evmutiltypes.Account{
|
||||
Address: acc.Address,
|
||||
// Set balance to 0 to delete it
|
||||
Balance: sdkmath.ZeroInt(),
|
||||
})
|
||||
|
||||
// app.incentiveKeeper.SetParams(ctx, incentiveParams)
|
||||
// Halt iteration if there was an error
|
||||
if iterErr != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Aggregate sum of all fractional balances
|
||||
aggregateSum = aggregateSum.Add(acc.Balance)
|
||||
|
||||
// Continue iterating
|
||||
return false
|
||||
})
|
||||
|
||||
return aggregateSum, iterErr
|
||||
}
|
||||
|
||||
// InitializeRemainder initializes the remainder amount in x/precisebank. It
|
||||
// calculates the remainder amount that is needed to ensure that the sum of all
|
||||
// fractional balances is a multiple of the conversion factor. The remainder
|
||||
// amount is stored in the store and returned.
|
||||
func InitializeRemainder(
|
||||
ctx sdk.Context,
|
||||
precisebankKeeper precisebankkeeper.Keeper,
|
||||
aggregateSum sdkmath.Int,
|
||||
) sdkmath.Int {
|
||||
// Extra fractional coins that exceed the conversion factor.
|
||||
// This extra + remainder should equal the conversion factor to ensure
|
||||
// (sum(fBalances) + remainder) % conversionFactor = 0
|
||||
extraFractionalAmount := aggregateSum.Mod(precisebanktypes.ConversionFactor())
|
||||
remainder := precisebanktypes.ConversionFactor().
|
||||
Sub(extraFractionalAmount).
|
||||
// Mod conversion factor to ensure remainder is valid.
|
||||
// If extraFractionalAmount is a multiple of conversion factor, the
|
||||
// remainder is 0.
|
||||
Mod(precisebanktypes.ConversionFactor())
|
||||
|
||||
// Panics if the remainder is invalid. In a correct chain state and only
|
||||
// mint/burns due to transfers, this would be 0.
|
||||
precisebankKeeper.SetRemainderAmount(ctx, remainder)
|
||||
|
||||
return remainder
|
||||
}
|
||||
|
||||
// TransferFractionalBalanceReserve migrates the fractional balance reserve from
|
||||
// x/evmutil to x/precisebank. It transfers the reserve balance from x/evmutil
|
||||
// to x/precisebank and ensures that the reserve fully backs all fractional
|
||||
// balances. It mints or burns coins to back the fractional balances exactly.
|
||||
func TransferFractionalBalanceReserve(
|
||||
ctx sdk.Context,
|
||||
accountKeeper evmutiltypes.AccountKeeper,
|
||||
bankKeeper bankkeeper.Keeper,
|
||||
precisebankKeeper precisebankkeeper.Keeper,
|
||||
) error {
|
||||
logger := ctx.Logger()
|
||||
|
||||
// Transfer x/evmutil reserve to x/precisebank.
|
||||
evmutilAddr := accountKeeper.GetModuleAddress(evmutiltypes.ModuleName)
|
||||
reserveBalance := bankKeeper.GetBalance(ctx, evmutilAddr, precisebanktypes.IntegerCoinDenom)
|
||||
|
||||
if err := bankKeeper.SendCoinsFromModuleToModule(
|
||||
ctx,
|
||||
evmutiltypes.ModuleName, // from x/evmutil
|
||||
precisebanktypes.ModuleName, // to x/precisebank
|
||||
sdk.NewCoins(reserveBalance),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to transfer reserve from x/evmutil to x/precisebank: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("transferred reserve balance: %s", reserveBalance))
|
||||
|
||||
// Ensure x/precisebank reserve fully backs all fractional balances.
|
||||
totalFractionalBalances := precisebankKeeper.GetTotalSumFractionalBalances(ctx)
|
||||
|
||||
// Does NOT ensure state is correct, total fractional balances should be a
|
||||
// multiple of conversion factor but is not guaranteed due to the remainder.
|
||||
// Remainder initialization is handled by InitializeRemainder.
|
||||
|
||||
// Determine how much the reserve is off by, e.g. unbacked amount
|
||||
expectedReserveBalance := totalFractionalBalances.Quo(precisebanktypes.ConversionFactor())
|
||||
|
||||
// If there is a remainder (totalFractionalBalances % conversionFactor != 0),
|
||||
// then expectedReserveBalance is rounded up to the nearest integer.
|
||||
if totalFractionalBalances.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
|
||||
expectedReserveBalance = expectedReserveBalance.Add(sdkmath.OneInt())
|
||||
}
|
||||
|
||||
unbackedAmount := expectedReserveBalance.Sub(reserveBalance.Amount)
|
||||
logger.Info(fmt.Sprintf("total account fractional balances: %s", totalFractionalBalances))
|
||||
|
||||
// Three possible cases:
|
||||
// 1. Reserve is not enough, mint coins to back the fractional balances
|
||||
// 2. Reserve is too much, burn coins to back the fractional balances exactly
|
||||
// 3. Reserve is exactly enough, no action needed
|
||||
if unbackedAmount.IsPositive() {
|
||||
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount))
|
||||
if err := bankKeeper.MintCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
|
||||
return fmt.Errorf("failed to mint extra reserve coins: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("unbacked amount minted to reserve: %s", unbackedAmount))
|
||||
} else if unbackedAmount.IsNegative() {
|
||||
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount.Neg()))
|
||||
if err := bankKeeper.BurnCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
|
||||
return fmt.Errorf("failed to burn extra reserve coins: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(fmt.Sprintf("extra reserve amount burned: %s", unbackedAmount.Neg()))
|
||||
} else {
|
||||
logger.Info("reserve exactly backs fractional balances, no mint/burn needed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1,239 +1,434 @@
|
||||
package app_test
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
|
||||
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
|
||||
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
"github.com/evmos/ethermint/crypto/ethsecp256k1"
|
||||
"github.com/stretchr/testify/require"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// func TestUpgradeCommunityParams_Mainnet(t *testing.T) {
|
||||
// require.Equal(
|
||||
// t,
|
||||
// sdkmath.LegacyZeroDec().String(),
|
||||
// app.CommunityParams_Mainnet.StakingRewardsPerSecond.String(),
|
||||
// )
|
||||
|
||||
// require.Equal(
|
||||
// t,
|
||||
// // Manually confirmed
|
||||
// "317097.919837645865043125",
|
||||
// app.CommunityParams_Mainnet.UpgradeTimeSetStakingRewardsPerSecond.String(),
|
||||
// "mainnet kava per second should be correct",
|
||||
// )
|
||||
// }
|
||||
|
||||
// func TestUpgradeCommunityParams_Testnet(t *testing.T) {
|
||||
// require.Equal(
|
||||
// t,
|
||||
// sdkmath.LegacyZeroDec().String(),
|
||||
// app.CommunityParams_Testnet.StakingRewardsPerSecond.String(),
|
||||
// )
|
||||
|
||||
// require.Equal(
|
||||
// t,
|
||||
// // Manually confirmed
|
||||
// "475646879756.468797564687975646",
|
||||
// app.CommunityParams_Testnet.UpgradeTimeSetStakingRewardsPerSecond.String(),
|
||||
// "testnet kava per second should be correct",
|
||||
// )
|
||||
// }
|
||||
|
||||
func TestUpdateValidatorMinimumCommission(t *testing.T) {
|
||||
tApp := app.NewTestApp()
|
||||
tApp.InitializeFromGenesisStates()
|
||||
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
|
||||
|
||||
sk := tApp.GetStakingKeeper()
|
||||
stakingParams := sk.GetParams(ctx)
|
||||
stakingParams.MinCommissionRate = sdk.ZeroDec()
|
||||
sk.SetParams(ctx, stakingParams)
|
||||
|
||||
// Set some validators with varying commission rates
|
||||
|
||||
vals := []struct {
|
||||
name string
|
||||
operatorAddr sdk.ValAddress
|
||||
consPriv *ethsecp256k1.PrivKey
|
||||
commissionRateMin sdk.Dec
|
||||
commissionRateMax sdk.Dec
|
||||
shouldBeUpdated bool
|
||||
func TestMigrateEvmutilToPrecisebank(t *testing.T) {
|
||||
// Full test case with all components together
|
||||
tests := []struct {
|
||||
name string
|
||||
initialReserve sdkmath.Int
|
||||
fractionalBalances []sdkmath.Int
|
||||
}{
|
||||
{
|
||||
name: "zero commission rate",
|
||||
operatorAddr: sdk.ValAddress("val0"),
|
||||
consPriv: generateConsKey(t),
|
||||
commissionRateMin: sdk.ZeroDec(),
|
||||
commissionRateMax: sdk.ZeroDec(),
|
||||
shouldBeUpdated: true,
|
||||
"no fractional balances",
|
||||
sdkmath.NewInt(0),
|
||||
[]sdkmath.Int{},
|
||||
},
|
||||
{
|
||||
name: "0.01 commission rate",
|
||||
operatorAddr: sdk.ValAddress("val1"),
|
||||
consPriv: generateConsKey(t),
|
||||
commissionRateMin: sdk.MustNewDecFromStr("0.01"),
|
||||
commissionRateMax: sdk.MustNewDecFromStr("0.01"),
|
||||
shouldBeUpdated: true,
|
||||
"sufficient reserve, 0 remainder",
|
||||
// Accounts adding up to 2 int units, same as reserve
|
||||
sdkmath.NewInt(2),
|
||||
[]sdkmath.Int{
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "0.05 commission rate",
|
||||
operatorAddr: sdk.ValAddress("val2"),
|
||||
consPriv: generateConsKey(t),
|
||||
commissionRateMin: sdk.MustNewDecFromStr("0.05"),
|
||||
commissionRateMax: sdk.MustNewDecFromStr("0.05"),
|
||||
shouldBeUpdated: false,
|
||||
"insufficient reserve, 0 remainder",
|
||||
// Accounts adding up to 2 int units, but only 1 int unit in reserve
|
||||
sdkmath.NewInt(1),
|
||||
[]sdkmath.Int{
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "0.06 commission rate",
|
||||
operatorAddr: sdk.ValAddress("val3"),
|
||||
consPriv: generateConsKey(t),
|
||||
commissionRateMin: sdk.MustNewDecFromStr("0.06"),
|
||||
commissionRateMax: sdk.MustNewDecFromStr("0.06"),
|
||||
shouldBeUpdated: false,
|
||||
"excess reserve, 0 remainder",
|
||||
// Accounts adding up to 2 int units, but 3 int unit in reserve
|
||||
sdkmath.NewInt(3),
|
||||
[]sdkmath.Int{
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "0.5 commission rate",
|
||||
operatorAddr: sdk.ValAddress("val4"),
|
||||
consPriv: generateConsKey(t),
|
||||
commissionRateMin: sdk.MustNewDecFromStr("0.5"),
|
||||
commissionRateMax: sdk.MustNewDecFromStr("0.5"),
|
||||
shouldBeUpdated: false,
|
||||
"sufficient reserve, non-zero remainder",
|
||||
// Accounts adding up to 1.5 int units, same as reserve
|
||||
sdkmath.NewInt(2),
|
||||
[]sdkmath.Int{
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"insufficient reserve, non-zero remainder",
|
||||
// Accounts adding up to 1.5 int units, less than reserve,
|
||||
// Reserve should be 2 and remainder 0.5
|
||||
sdkmath.NewInt(1),
|
||||
[]sdkmath.Int{
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"excess reserve, non-zero remainder",
|
||||
// Accounts adding up to 1.5 int units, 3 int units in reserve
|
||||
sdkmath.NewInt(3),
|
||||
[]sdkmath.Int{
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, v := range vals {
|
||||
val, err := stakingtypes.NewValidator(
|
||||
v.operatorAddr,
|
||||
v.consPriv.PubKey(),
|
||||
stakingtypes.Description{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
val.Commission.Rate = v.commissionRateMin
|
||||
val.Commission.MaxRate = v.commissionRateMax
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tApp := app.NewTestApp()
|
||||
tApp.InitializeFromGenesisStates()
|
||||
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
|
||||
|
||||
err = sk.SetValidatorByConsAddr(ctx, val)
|
||||
require.NoError(t, err)
|
||||
sk.SetValidator(ctx, val)
|
||||
}
|
||||
ak := tApp.GetAccountKeeper()
|
||||
bk := tApp.GetBankKeeper()
|
||||
evmuk := tApp.GetEvmutilKeeper()
|
||||
pbk := tApp.GetPrecisebankKeeper()
|
||||
|
||||
require.NotPanics(
|
||||
t, func() {
|
||||
app.UpdateValidatorMinimumCommission(ctx, tApp.App)
|
||||
},
|
||||
)
|
||||
reserveCoin := sdk.NewCoin(precisebanktypes.IntegerCoinDenom, tt.initialReserve)
|
||||
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(reserveCoin))
|
||||
require.NoError(t, err)
|
||||
|
||||
stakingParamsAfter := sk.GetParams(ctx)
|
||||
require.Equal(t, stakingParamsAfter.MinCommissionRate, app.ValidatorMinimumCommission)
|
||||
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
|
||||
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
|
||||
|
||||
// Check that all validators have a commission rate >= 5%
|
||||
for _, val := range vals {
|
||||
t.Run(val.name, func(t *testing.T) {
|
||||
validator, found := sk.GetValidator(ctx, val.operatorAddr)
|
||||
require.True(t, found, "validator should be found")
|
||||
// Double check balances
|
||||
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
|
||||
require.True(
|
||||
t,
|
||||
validator.GetCommission().GTE(app.ValidatorMinimumCommission),
|
||||
"commission rate should be >= 5%",
|
||||
)
|
||||
require.Equal(t, tt.initialReserve, oldReserveBalance.Amount, "initial x/evmutil reserve balance")
|
||||
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
|
||||
|
||||
require.True(
|
||||
t,
|
||||
validator.Commission.MaxRate.GTE(app.ValidatorMinimumCommission),
|
||||
"commission rate max should be >= 5%, got %s",
|
||||
validator.Commission.MaxRate,
|
||||
)
|
||||
// Set accounts
|
||||
for i, balance := range tt.fractionalBalances {
|
||||
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
|
||||
|
||||
if val.shouldBeUpdated {
|
||||
require.Equal(
|
||||
t,
|
||||
ctx.BlockTime(),
|
||||
validator.Commission.UpdateTime,
|
||||
"commission update time should be set to block time",
|
||||
)
|
||||
} else {
|
||||
require.Equal(
|
||||
t,
|
||||
time.Unix(0, 0).UTC(),
|
||||
validator.Commission.UpdateTime,
|
||||
"commission update time should not be changed -- default value is 0",
|
||||
)
|
||||
err := evmuk.SetBalance(ctx, addr, balance)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Run full x/evmutil -> x/precisebank migration
|
||||
err = app.MigrateEvmutilToPrecisebank(
|
||||
ctx,
|
||||
ak,
|
||||
bk,
|
||||
evmuk,
|
||||
pbk,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check old reserve is empty
|
||||
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
|
||||
|
||||
// Check new reserve fully backs fractional balances
|
||||
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
|
||||
remainder := pbk.GetRemainderAmount(ctx)
|
||||
|
||||
expectedReserveBal := fractionalBalanceTotal.Add(remainder)
|
||||
require.Equal(
|
||||
t,
|
||||
expectedReserveBal,
|
||||
newReserveBalanceAfter.Amount.Mul(precisebanktypes.ConversionFactor()),
|
||||
"new reserve should equal total fractional balances",
|
||||
)
|
||||
|
||||
// Check balances are deleted in evmutil and migrated to precisebank
|
||||
for i := range tt.fractionalBalances {
|
||||
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
|
||||
acc := evmuk.GetAccount(ctx, addr)
|
||||
require.Nil(t, acc, "account should be deleted")
|
||||
|
||||
balance := pbk.GetFractionalBalance(ctx, addr)
|
||||
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
|
||||
}
|
||||
|
||||
// Checks balances valid and remainder
|
||||
res, stop := precisebankkeeper.AllInvariants(pbk)(ctx)
|
||||
require.Falsef(t, stop, "invariants should pass: %s", res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// func TestUpdateIncentiveParams(t *testing.T) {
|
||||
// tApp := app.NewTestApp()
|
||||
// tApp.InitializeFromGenesisStates()
|
||||
// ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
|
||||
func TestTransferFractionalBalances(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fractionalBalances []sdkmath.Int
|
||||
}{
|
||||
{
|
||||
"no fractional balances",
|
||||
[]sdkmath.Int{},
|
||||
},
|
||||
{
|
||||
"balanced fractional balances",
|
||||
[]sdkmath.Int{
|
||||
// 4 accounts
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"unbalanced balances",
|
||||
[]sdkmath.Int{
|
||||
// 3 accounts
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// ik := tApp.GetIncentiveKeeper()
|
||||
// params := ik.GetParams(ctx)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tApp := app.NewTestApp()
|
||||
tApp.InitializeFromGenesisStates()
|
||||
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
|
||||
|
||||
// startPeriod := time.Date(2021, 10, 26, 15, 0, 0, 0, time.UTC)
|
||||
// endPeriod := time.Date(2022, 10, 26, 15, 0, 0, 0, time.UTC)
|
||||
evmutilk := tApp.GetEvmutilKeeper()
|
||||
pbk := tApp.GetPrecisebankKeeper()
|
||||
|
||||
// params.EarnRewardPeriods = incentivetypes.MultiRewardPeriods{
|
||||
// incentivetypes.NewMultiRewardPeriod(
|
||||
// true,
|
||||
// "bkava",
|
||||
// startPeriod,
|
||||
// endPeriod,
|
||||
// sdk.NewCoins(
|
||||
// sdk.NewCoin("ukava", sdk.NewInt(159459)),
|
||||
// ),
|
||||
// ),
|
||||
// }
|
||||
// ik.SetParams(ctx, params)
|
||||
for i, balance := range tt.fractionalBalances {
|
||||
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
|
||||
|
||||
// beforeParams := ik.GetParams(ctx)
|
||||
// require.Equal(t, params, beforeParams, "initial incentive params should be set")
|
||||
err := evmutilk.SetBalance(ctx, addr, balance)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// // -- UPGRADE
|
||||
// app.UpdateIncentiveParams(ctx, tApp.App)
|
||||
// Run balance transfer
|
||||
aggregateSum, err := app.TransferFractionalBalances(
|
||||
ctx,
|
||||
evmutilk,
|
||||
pbk,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// // -- After
|
||||
// afterParams := ik.GetParams(ctx)
|
||||
// Check balances are deleted in evmutil and migrated to precisebank
|
||||
sum := sdkmath.ZeroInt()
|
||||
for i := range tt.fractionalBalances {
|
||||
sum = sum.Add(tt.fractionalBalances[i])
|
||||
|
||||
// require.Len(
|
||||
// t,
|
||||
// afterParams.EarnRewardPeriods[0].RewardsPerSecond,
|
||||
// 1,
|
||||
// "bkava earn reward period should only contain 1 coin",
|
||||
// )
|
||||
// require.Equal(
|
||||
// t,
|
||||
// // Manual calculation of
|
||||
// // 600,000 * 1000,000 / (365 * 24 * 60 * 60)
|
||||
// sdk.NewCoin("ukava", sdkmath.NewInt(19025)),
|
||||
// afterParams.EarnRewardPeriods[0].RewardsPerSecond[0],
|
||||
// "bkava earn reward period should be updated",
|
||||
// )
|
||||
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
|
||||
acc := evmutilk.GetAccount(ctx, addr)
|
||||
require.Nil(t, acc, "account should be deleted")
|
||||
|
||||
// // Check that other params are not changed
|
||||
// afterParams.EarnRewardPeriods[0].RewardsPerSecond[0] = beforeParams.EarnRewardPeriods[0].RewardsPerSecond[0]
|
||||
// require.Equal(
|
||||
// t,
|
||||
// beforeParams,
|
||||
// afterParams,
|
||||
// "other param values should not be changed",
|
||||
// )
|
||||
// }
|
||||
balance := pbk.GetFractionalBalance(ctx, addr)
|
||||
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
|
||||
}
|
||||
|
||||
func generateConsKey(
|
||||
t *testing.T,
|
||||
) *ethsecp256k1.PrivKey {
|
||||
t.Helper()
|
||||
|
||||
key, err := ethsecp256k1.GenerateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
return key
|
||||
require.Equal(t, sum, aggregateSum, "aggregate sum should be correct")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeRemainder(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
giveAggregateSum sdkmath.Int
|
||||
wantRemainder sdkmath.Int
|
||||
}{
|
||||
{
|
||||
"0 remainder, 1ukava",
|
||||
precisebanktypes.ConversionFactor(),
|
||||
sdkmath.NewInt(0),
|
||||
},
|
||||
{
|
||||
"0 remainder, multiple ukava",
|
||||
precisebanktypes.ConversionFactor().MulRaw(5),
|
||||
sdkmath.NewInt(0),
|
||||
},
|
||||
{
|
||||
"non-zero remainder, min",
|
||||
precisebanktypes.ConversionFactor().SubRaw(1),
|
||||
sdkmath.NewInt(1),
|
||||
},
|
||||
{
|
||||
"non-zero remainder, max",
|
||||
sdkmath.NewInt(1),
|
||||
precisebanktypes.ConversionFactor().SubRaw(1),
|
||||
},
|
||||
{
|
||||
"non-zero remainder, half",
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tApp := app.NewTestApp()
|
||||
tApp.InitializeFromGenesisStates()
|
||||
|
||||
pbk := tApp.GetPrecisebankKeeper()
|
||||
|
||||
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
|
||||
|
||||
remainder := app.InitializeRemainder(
|
||||
ctx,
|
||||
tApp.GetPrecisebankKeeper(),
|
||||
tt.giveAggregateSum,
|
||||
)
|
||||
require.Equal(t, tt.wantRemainder, remainder)
|
||||
|
||||
// Check actual state
|
||||
remainderAfter := pbk.GetRemainderAmount(ctx)
|
||||
require.Equal(t, tt.wantRemainder, remainderAfter)
|
||||
|
||||
// Not checking invariants here since it requires actual balance state
|
||||
aggregateSumWithRemainder := tt.giveAggregateSum.Add(remainder)
|
||||
require.True(
|
||||
t,
|
||||
aggregateSumWithRemainder.
|
||||
Mod(precisebanktypes.ConversionFactor()).
|
||||
IsZero(),
|
||||
"remainder + aggregate sum should be a multiple of the conversion factor",
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransferFractionalBalanceReserve(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initialReserve sdk.Coin
|
||||
fractionalBalances []sdkmath.Int
|
||||
}{
|
||||
{
|
||||
"balanced reserve, no remainder",
|
||||
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
|
||||
[]sdkmath.Int{
|
||||
// 2 accounts
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"insufficient reserve",
|
||||
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
|
||||
[]sdkmath.Int{
|
||||
// 4 accounts, total 2 int units
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"extra reserve funds",
|
||||
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(2)),
|
||||
[]sdkmath.Int{
|
||||
// 2 accounts, total 1 int units
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"insufficient reserve, with remainder",
|
||||
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
|
||||
[]sdkmath.Int{
|
||||
// 5 accounts, total 2.5 int units
|
||||
// Expected 3 int units in reserve, 0.5 remainder
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"extra reserve funds, with remainder",
|
||||
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(3)),
|
||||
[]sdkmath.Int{
|
||||
// 3 accounts, total 1.5 int units.
|
||||
// Expected 2 int units in reserve, 0.5 remainder
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
precisebanktypes.ConversionFactor().QuoRaw(2),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tApp := app.NewTestApp()
|
||||
tApp.InitializeFromGenesisStates()
|
||||
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
|
||||
|
||||
bk := tApp.GetBankKeeper()
|
||||
pbk := tApp.GetPrecisebankKeeper()
|
||||
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(tt.initialReserve))
|
||||
require.NoError(t, err)
|
||||
|
||||
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
|
||||
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
|
||||
|
||||
// Double check balances
|
||||
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
|
||||
require.Equal(t, tt.initialReserve, oldReserveBalance)
|
||||
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
|
||||
|
||||
for i, balance := range tt.fractionalBalances {
|
||||
addr := sdk.AccAddress([]byte{byte(i)})
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
pbk.SetFractionalBalance(ctx, addr, balance)
|
||||
}, "given fractional balances should be valid")
|
||||
}
|
||||
|
||||
// Run reserve migration
|
||||
err = app.TransferFractionalBalanceReserve(
|
||||
ctx,
|
||||
tApp.GetAccountKeeper(),
|
||||
bk,
|
||||
tApp.GetPrecisebankKeeper(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check old reserve is empty
|
||||
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
|
||||
|
||||
// Check new reserve fully backs fractional balances
|
||||
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
|
||||
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
|
||||
|
||||
expectedReserveBal := fractionalBalanceTotal.
|
||||
Quo(precisebanktypes.ConversionFactor())
|
||||
|
||||
// Check if theres a remainder
|
||||
if fractionalBalanceTotal.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
|
||||
expectedReserveBal = expectedReserveBal.Add(sdkmath.OneInt())
|
||||
}
|
||||
|
||||
require.Equal(
|
||||
t,
|
||||
expectedReserveBal,
|
||||
newReserveBalanceAfter.Amount,
|
||||
"new reserve should equal total fractional balances + remainder",
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -28,9 +28,9 @@ DIRS := $(BUILD_CACHE_DIR) $(BIN_DIR)
|
||||
### Tool Versions ###
|
||||
################################################################################
|
||||
GO_BIN ?= go
|
||||
PROTOC_VERSION ?= v21.9
|
||||
BUF_VERSION ?= v1.9.0
|
||||
PROTOC_GEN_GOCOSMOS_VERSION ?= v0.3.1
|
||||
PROTOC_VERSION ?= v25.1
|
||||
BUF_VERSION ?= v1.28.1
|
||||
PROTOC_GEN_GOCOSMOS_VERSION ?= $(shell $(GO_BIN) list -m -f '{{.Version}}' github.com/cosmos/gogoproto)
|
||||
PROTOC_GEN_GRPC_GATEWAY_VERSION ?= $(shell $(GO_BIN) list -m github.com/grpc-ecosystem/grpc-gateway| sed 's:.* ::')
|
||||
PROTOC_GEN_DOC_VERSION ?= v1.5.1
|
||||
SWAGGER_COMBINE_VERSION ?= v1.4.0
|
||||
@ -68,7 +68,7 @@ $(PROTOC_VERSION_FILE):
|
||||
mkdir -p protoc && cd protoc; \
|
||||
curl -sOL $(PROTOC_DOWNLOAD_URL); \
|
||||
unzip -q $(PROTOC_ARCHIVE_NAME) bin/protoc
|
||||
@cp $(BUILD_CACHE_DIR)/protoc/bin/protoc $(BIN_DIR)/protoc
|
||||
@cp -f $(BUILD_CACHE_DIR)/protoc/bin/protoc $(BIN_DIR)/protoc
|
||||
@rm -rf $(BUILD_CACHE_DIR)/protoc
|
||||
|
||||
PROTOC := $(BIN_DIR)/protoc
|
||||
@ -93,7 +93,7 @@ $(BUF_VERSION_FILE):
|
||||
mkdir -p buf && cd buf; \
|
||||
curl -sOL $(BUF_DOWNLOAD_URL); \
|
||||
tar -xzf $(BUF_ARCHIVE_NAME) buf/bin/buf
|
||||
@cp $(BUILD_CACHE_DIR)/buf/buf/bin/buf $(BIN_DIR)/buf
|
||||
@cp -f $(BUILD_CACHE_DIR)/buf/buf/bin/buf $(BIN_DIR)/buf
|
||||
@rm -rf $(BUILD_CACHE_DIR)/buf
|
||||
|
||||
BUF := $(BIN_DIR)/buf
|
||||
@ -113,8 +113,8 @@ $(PROTOC_GEN_GOCOSMOS_VERSION_FILE):
|
||||
@touch $(PROTOC_GEN_GOCOSMOS_VERSION_FILE)
|
||||
@cd $(BUILD_CACHE_DIR); \
|
||||
mkdir -p protoc-gen-gocosmos && cd protoc-gen-gocosmos; \
|
||||
git clone -q https://github.com/regen-network/cosmos-proto.git; \
|
||||
cd cosmos-proto; \
|
||||
git clone -q https://github.com/cosmos/gogoproto.git; \
|
||||
cd gogoproto; \
|
||||
git checkout -q $(PROTOC_GEN_GOCOSMOS_VERSION); \
|
||||
GOBIN=$(ROOT_DIR)/$(BIN_DIR) $(GO_BIN) install ./protoc-gen-gocosmos
|
||||
@rm -rf $(BUILD_CACHE_DIR)/protoc-gen-gocosmos
|
||||
@ -185,7 +185,7 @@ $(PROTOC_GEN_DOC_VERSION_FILE):
|
||||
mkdir -p protoc-gen-doc && cd protoc-gen-doc; \
|
||||
curl -sOL $(PROTOC_GEN_DOC_DOWNLOAD_URL); \
|
||||
tar -xzf $(PROTOC_GEN_DOC_ARCHIVE_NAME) protoc-gen-doc
|
||||
@cp $(BUILD_CACHE_DIR)/protoc-gen-doc/protoc-gen-doc $(BIN_DIR)/protoc-gen-doc
|
||||
@cp -f $(BUILD_CACHE_DIR)/protoc-gen-doc/protoc-gen-doc $(BIN_DIR)/protoc-gen-doc
|
||||
@rm -rf $(BUILD_CACHE_DIR)/protoc-gen-doc
|
||||
|
||||
PROTOC_GEN_DOC := $(BIN_DIR)/protoc-gen-doc
|
||||
|
45
build/lint.mk
Normal file
45
build/lint.mk
Normal file
@ -0,0 +1,45 @@
|
||||
################################################################################
|
||||
### Required Variables ###
|
||||
################################################################################
|
||||
ifndef DOCKER
|
||||
$(error DOCKER not set)
|
||||
endif
|
||||
|
||||
ifndef BUILD_DIR
|
||||
$(error BUILD_DIR not set)
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
### Lint Settings ###
|
||||
################################################################################
|
||||
|
||||
LINT_FROM_REV ?= $(shell git merge-base origin/master HEAD)
|
||||
|
||||
GOLANGCI_VERSION ?= $(shell cat .golangci-version)
|
||||
GOLANGCI_IMAGE_TAG ?= golangci/golangci-lint:$(GOLANGCI_VERSION)
|
||||
|
||||
GOLANGCI_DIR ?= $(CURDIR)/$(BUILD_DIR)/.golangci-lint
|
||||
|
||||
GOLANGCI_CACHE_DIR ?= $(GOLANGCI_DIR)/$(GOLANGCI_VERSION)-cache
|
||||
GOLANGCI_MOD_CACHE_DIR ?= $(GOLANGCI_DIR)/go-mod
|
||||
|
||||
################################################################################
|
||||
### Lint Target ###
|
||||
################################################################################
|
||||
|
||||
.PHONY: lint
|
||||
lint: $(GOLANGCI_CACHE_DIR) $(GOLANGCI_MOD_CACHE_DIR)
|
||||
@echo "Running lint from rev $(LINT_FROM_REV), use LINT_FROM_REV var to override."
|
||||
$(DOCKER) run -t --rm \
|
||||
-v $(GOLANGCI_CACHE_DIR):/root/.cache \
|
||||
-v $(GOLANGCI_MOD_CACHE_DIR):/go/pkg/mod \
|
||||
-v $(CURDIR):/app \
|
||||
-w /app \
|
||||
$(GOLANGCI_IMAGE_TAG) \
|
||||
golangci-lint run -v --new-from-rev $(LINT_FROM_REV)
|
||||
|
||||
$(GOLANGCI_CACHE_DIR):
|
||||
@mkdir -p $@
|
||||
|
||||
$(GOLANGCI_MOD_CACHE_DIR):
|
||||
@mkdir -p $@
|
@ -14,13 +14,23 @@ PROTOBUF_ANY_DOWNLOAD_URL = https://raw.githubusercontent.com/protocolbuffers/pr
|
||||
#
|
||||
# Proto dependencies under go.mod
|
||||
#
|
||||
GOGO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/gogo/protobuf)
|
||||
TENDERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/tendermint/tendermint)
|
||||
GOGO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/gogoproto)
|
||||
TENDERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cometbft/cometbft)
|
||||
COSMOS_PROTO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/cosmos-proto)
|
||||
COSMOS_SDK_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/cosmos-sdk)
|
||||
IBC_GO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/ibc-go/v6)
|
||||
IBC_GO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/ibc-go/v7)
|
||||
ETHERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/evmos/ethermint)
|
||||
|
||||
#
|
||||
# ICS23 Proof Proto
|
||||
#
|
||||
ICS23_VERSION := $(shell $(GO_BIN) list -m -f '{{.Version}}' github.com/cosmos/ics23/go)
|
||||
|
||||
ICS23_PROOFS_PROTO_PATH := cosmos/ics23/v1/proofs.proto
|
||||
ICS23_PROOFS_PROTO_LOCAL_PATH := third_party/proto/$(ICS23_PROOFS_PROTO_PATH)
|
||||
|
||||
ICS23_PROOFS_PROTO_DOWNLOAD_URL := https://raw.githubusercontent.com/cosmos/ics23/go/$(ICS23_VERSION)/proto/$(ICS23_PROOFS_PROTO_PATH)
|
||||
|
||||
#
|
||||
# Common target directories
|
||||
#
|
||||
@ -44,18 +54,21 @@ proto-update-deps: check-rsync ## Update all third party proto files
|
||||
@curl -sSL $(PROTOBUF_ANY_DOWNLOAD_URL)/any.proto > $(PROTOBUF_GOOGLE_TYPES)/any.proto
|
||||
|
||||
@mkdir -p client/docs
|
||||
@cp $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
|
||||
@cp $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
|
||||
@cp -f $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
|
||||
@cp -f $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
|
||||
@cp -f $(ETHERMINT_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/ethermint-swagger.yml
|
||||
|
||||
@mkdir -p $(COSMOS_PROTO_TYPES)
|
||||
@cp $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto
|
||||
@cp -f $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto
|
||||
|
||||
@mkdir -p $(dir $(ICS23_PROOFS_PROTO_LOCAL_PATH))
|
||||
@curl -sSL $(ICS23_PROOFS_PROTO_DOWNLOAD_URL) > $(ICS23_PROOFS_PROTO_LOCAL_PATH)
|
||||
|
||||
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(GOGO_PATH)/gogoproto third_party/proto
|
||||
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(TENDERMINT_PATH)/proto third_party
|
||||
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(COSMOS_SDK_PATH)/proto third_party
|
||||
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(IBC_GO_PATH)/proto third_party
|
||||
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(ETHERMINT_PATH)/proto third_party
|
||||
@cp -f $(IBC_GO_PATH)/third_party/proto/proofs.proto third_party/proto/proofs.proto
|
||||
|
||||
.PHONY: check-proto-deps
|
||||
check-proto-deps: proto-update-deps ## Return error code 1 if proto dependencies are not changed
|
||||
|
@ -1,7 +1,7 @@
|
||||
.PHONY: proto-lint check-proto-lint
|
||||
proto-lint check-proto-lint: install-build-deps
|
||||
@echo "Linting proto file"
|
||||
@$(BUF) lint
|
||||
@$(BUF) lint proto
|
||||
|
||||
.PHONY: proto-gen
|
||||
proto-gen: install-build-deps
|
||||
|
@ -1,6 +1,8 @@
|
||||
package chaincfg
|
||||
|
||||
import sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
import (
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
const (
|
||||
AppName = "0gchaind"
|
||||
|
2
ci/env/kava-internal-testnet/KAVA.VERSION
vendored
2
ci/env/kava-internal-testnet/KAVA.VERSION
vendored
@ -1 +1 @@
|
||||
a967d2fdda299ec8e1e3b99fb55bd06ecfdb0469
|
||||
6862cde560c70cb82f7908e6cef22ca223465bd2
|
||||
|
128
ci/env/kava-internal-testnet/genesis.json
vendored
128
ci/env/kava-internal-testnet/genesis.json
vendored
@ -22,6 +22,8 @@
|
||||
},
|
||||
"app_hash": "",
|
||||
"app_state": {
|
||||
"06-solomachine": null,
|
||||
"07-tendermint": null,
|
||||
"auction": {
|
||||
"next_auction_id": "1",
|
||||
"params": {
|
||||
@ -505,6 +507,10 @@
|
||||
{
|
||||
"address": "kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq",
|
||||
"coins": [
|
||||
{
|
||||
"denom": "bnb",
|
||||
"amount": "500000000"
|
||||
},
|
||||
{
|
||||
"denom": "btcb",
|
||||
"amount": "200000000"
|
||||
@ -525,6 +531,10 @@
|
||||
"denom": "erc20/axelar/wbtc",
|
||||
"amount": "1000000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/bitgo/wbtc",
|
||||
"amount": "200000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/multichain/usdc",
|
||||
"amount": "1000000000000000000"
|
||||
@ -556,12 +566,20 @@
|
||||
{
|
||||
"denom": "usdx",
|
||||
"amount": "103000000000"
|
||||
},
|
||||
{
|
||||
"denom": "xrpb",
|
||||
"amount": "1000000000000000"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "kava1krh7k30pc9rteejpl2zycj0vau58y8c69xkzws",
|
||||
"coins": [
|
||||
{
|
||||
"denom": "bnb",
|
||||
"amount": "100000000000000000"
|
||||
},
|
||||
{
|
||||
"denom": "btcb",
|
||||
"amount": "200000000"
|
||||
@ -582,6 +600,14 @@
|
||||
"denom": "erc20/axelar/wbtc",
|
||||
"amount": "1000000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/bitgo/wbtc",
|
||||
"amount": "200000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/tether/usdt",
|
||||
"amount": "100000000000"
|
||||
},
|
||||
{
|
||||
"denom": "hard",
|
||||
"amount": "1000000000"
|
||||
@ -597,6 +623,10 @@
|
||||
{
|
||||
"denom": "usdx",
|
||||
"amount": "103000000000"
|
||||
},
|
||||
{
|
||||
"denom": "xrpb",
|
||||
"amount": "103000000000"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -818,6 +848,7 @@
|
||||
"gov_denom": "ukava",
|
||||
"params": {
|
||||
"circuit_breaker": false,
|
||||
"liquidation_block_interval": 500,
|
||||
"collateral_params": [
|
||||
{
|
||||
"denom": "bnb",
|
||||
@ -989,8 +1020,7 @@
|
||||
"check_collateralization_index_count": "10",
|
||||
"conversion_factor": "6"
|
||||
}
|
||||
]
|
||||
,
|
||||
],
|
||||
"debt_auction_lot": "10000000000",
|
||||
"debt_auction_threshold": "100000000000",
|
||||
"debt_param": {
|
||||
@ -1237,7 +1267,15 @@
|
||||
"votes": []
|
||||
},
|
||||
"community": {
|
||||
"params": {}
|
||||
"params": {
|
||||
"upgrade_time_disable_inflation": "2023-11-01T00:00:00Z",
|
||||
"upgrade_time_set_staking_rewards_per_second": "744191",
|
||||
"staking_rewards_per_second": "0"
|
||||
},
|
||||
"staking_rewards_state": {
|
||||
"last_accumulation_time": "0001-01-01T00:00:00Z",
|
||||
"last_truncation_error": "0"
|
||||
}
|
||||
},
|
||||
"crisis": {
|
||||
"constant_fee": {
|
||||
@ -2063,6 +2101,25 @@
|
||||
}
|
||||
],
|
||||
"nested_types": []
|
||||
},
|
||||
{
|
||||
"msg_type_url": "/kava.committee.v1beta1.MsgVote",
|
||||
"msg_value_type_name": "MsgValueCommitteeVote",
|
||||
"value_types": [
|
||||
{
|
||||
"name": "proposal_id",
|
||||
"type": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "voter",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "vote_type",
|
||||
"type": "int32"
|
||||
}
|
||||
],
|
||||
"nested_types": []
|
||||
}
|
||||
],
|
||||
"allow_unprotected_txs": false
|
||||
@ -2225,22 +2282,27 @@
|
||||
"deposits": [],
|
||||
"votes": [],
|
||||
"proposals": [],
|
||||
"deposit_params": {
|
||||
"deposit_params": null,
|
||||
"voting_params": {
|
||||
"voting_period": "604800s"
|
||||
},
|
||||
"tally_params": null,
|
||||
"params": {
|
||||
"min_deposit": [
|
||||
{
|
||||
"denom": "ukava",
|
||||
"amount": "10000000"
|
||||
}
|
||||
],
|
||||
"max_deposit_period": "172800s"
|
||||
},
|
||||
"voting_params": {
|
||||
"voting_period": "600s"
|
||||
},
|
||||
"tally_params": {
|
||||
"max_deposit_period": "172800s",
|
||||
"voting_period": "604800s",
|
||||
"quorum": "0.334000000000000000",
|
||||
"threshold": "0.500000000000000000",
|
||||
"veto_threshold": "0.334000000000000000"
|
||||
"veto_threshold": "0.334000000000000000",
|
||||
"min_initial_deposit_ratio": "0.000000000000000000",
|
||||
"burn_vote_quorum": false,
|
||||
"burn_proposal_deposit_prevote": false,
|
||||
"burn_vote_veto": true
|
||||
}
|
||||
},
|
||||
"hard": {
|
||||
@ -2515,6 +2577,24 @@
|
||||
},
|
||||
"reserve_factor": "0.025000000000000000",
|
||||
"keeper_reward_percentage": "0.020000000000000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/bitgo/wbtc",
|
||||
"borrow_limit": {
|
||||
"has_max_limit": true,
|
||||
"maximum_limit": "0.000000000000000000",
|
||||
"loan_to_value": "0.000000000000000000"
|
||||
},
|
||||
"spot_market_id": "btc:usd:30",
|
||||
"conversion_factor": "100000000",
|
||||
"interest_rate_model": {
|
||||
"base_rate_apy": "0.000000000000000000",
|
||||
"base_multiplier": "0.050000000000000000",
|
||||
"kink": "0.800000000000000000",
|
||||
"jump_multiplier": "5.000000000000000000"
|
||||
},
|
||||
"reserve_factor": "0.025000000000000000",
|
||||
"keeper_reward_percentage": "0.020000000000000000"
|
||||
}
|
||||
],
|
||||
"minimum_borrow_usd_value": "10.000000000000000000"
|
||||
@ -2730,6 +2810,18 @@
|
||||
"amount": "787"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"collateral_type": "erc20/bitgo/wbtc",
|
||||
"start": "2022-11-11T15:00:00Z",
|
||||
"end": "2025-11-11T15:00:00Z",
|
||||
"rewards_per_second": [
|
||||
{
|
||||
"denom": "ukava",
|
||||
"amount": "787"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"hard_borrow_reward_periods": [],
|
||||
@ -3166,6 +3258,16 @@
|
||||
}
|
||||
},
|
||||
"params": null,
|
||||
"packetfowardmiddleware": {
|
||||
"params": {
|
||||
"fee_percentage": "0.000000000000000000"
|
||||
},
|
||||
"in_flight_packets": {}
|
||||
},
|
||||
"precisebank": {
|
||||
"balances": [],
|
||||
"remainder": "0"
|
||||
},
|
||||
"pricefeed": {
|
||||
"params": {
|
||||
"markets": [
|
||||
@ -3639,6 +3741,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"router": {},
|
||||
"savings": {
|
||||
"params": {
|
||||
"supported_denoms": [
|
||||
@ -3810,7 +3913,8 @@
|
||||
"params": {
|
||||
"send_enabled": true,
|
||||
"receive_enabled": true
|
||||
}
|
||||
},
|
||||
"total_escrowed": []
|
||||
},
|
||||
"upgrade": {},
|
||||
"validatorvesting": null,
|
||||
|
27
ci/env/kava-protonet/genesis.json
vendored
27
ci/env/kava-protonet/genesis.json
vendored
@ -837,6 +837,7 @@
|
||||
"gov_denom": "ukava",
|
||||
"params": {
|
||||
"circuit_breaker": false,
|
||||
"liquidation_block_interval": 500,
|
||||
"collateral_params": [
|
||||
{
|
||||
"auction_size": "50000000000",
|
||||
@ -2177,6 +2178,23 @@
|
||||
"quorum": "0.334000000000000000",
|
||||
"threshold": "0.500000000000000000",
|
||||
"veto_threshold": "0.334000000000000000"
|
||||
},
|
||||
"params": {
|
||||
"min_deposit": [
|
||||
{
|
||||
"denom": "ukava",
|
||||
"amount": "10000000"
|
||||
}
|
||||
],
|
||||
"max_deposit_period": "172800s",
|
||||
"voting_period": "600s",
|
||||
"quorum": "0.334000000000000000",
|
||||
"threshold": "0.500000000000000000",
|
||||
"veto_threshold": "0.334000000000000000",
|
||||
"min_initial_deposit_ratio": "0.000000000000000000",
|
||||
"burn_vote_quorum": false,
|
||||
"burn_proposal_deposit_prevote": false,
|
||||
"burn_vote_veto": true
|
||||
}
|
||||
},
|
||||
"hard": {
|
||||
@ -2982,6 +3000,15 @@
|
||||
}
|
||||
},
|
||||
"params": null,
|
||||
"packetfowardmiddleware": {
|
||||
"params": {
|
||||
"fee_percentage": "0.000000000000000000"
|
||||
},
|
||||
"in_flight_packets": {}
|
||||
},
|
||||
"precisebank": {
|
||||
"remainder": "0"
|
||||
},
|
||||
"pricefeed": {
|
||||
"params": {
|
||||
"markets": [
|
||||
|
@ -14,9 +14,9 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/cometbft/cometbft/crypto/ed25519"
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
"github.com/cosmos/cosmos-sdk/tests"
|
||||
@ -813,7 +813,7 @@ func TestKvCLISubmitCommunityPoolSpendProposal(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKvCLIQueryTxPagination(t *testing.T) {
|
||||
// Skip until https://github.com/tendermint/tendermint/issues/4432 has been
|
||||
// Skip until https://github.com/cometbft/cometbft/issues/4432 has been
|
||||
// resolved and included in a release.
|
||||
t.SkipNow()
|
||||
|
||||
|
@ -13,13 +13,13 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
|
||||
"cosmossdk.io/simapp"
|
||||
clientkeys "github.com/cosmos/cosmos-sdk/client/keys"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keys"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
"github.com/cosmos/cosmos-sdk/simapp"
|
||||
"github.com/cosmos/cosmos-sdk/tests"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth"
|
||||
|
@ -182,6 +182,23 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./out/swagger/kava/precisebank/v1/query.swagger.json",
|
||||
"tags": {
|
||||
"rename": {
|
||||
"Query": "Precisebank"
|
||||
}
|
||||
},
|
||||
"operationIds": {
|
||||
"rename": [
|
||||
{
|
||||
"type": "regex",
|
||||
"from": "(.*)",
|
||||
"to": "Precisebank$1"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./out/swagger/kava/pricefeed/v1beta1/query.swagger.json",
|
||||
"tags": {
|
||||
@ -295,6 +312,30 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./client/docs/ethermint-swagger.yml",
|
||||
"dereference": {
|
||||
"circular": "ignore"
|
||||
},
|
||||
"tags": {
|
||||
"rename": {
|
||||
"Query": "Ethermint"
|
||||
}
|
||||
},
|
||||
"operationIds": {
|
||||
"rename": [
|
||||
{
|
||||
"type": "regex",
|
||||
"from": "(.*)",
|
||||
"to": "Ethermint$1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"paths": {
|
||||
"exclude": [
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./client/docs/legacy-swagger.yml",
|
||||
"dereference": {
|
||||
|
File diff suppressed because it is too large
Load Diff
4458
client/docs/ethermint-swagger.yml
Normal file
4458
client/docs/ethermint-swagger.yml
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
381
client/erc20/MintableBurnableERC20.abi
Normal file
381
client/erc20/MintableBurnableERC20.abi
Normal file
@ -0,0 +1,381 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "name",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "symbol",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "decimals_",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "Approval",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "previousOwner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "newOwner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "OwnershipTransferred",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "Transfer",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "allowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "approve",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "account",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "balanceOf",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "burn",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "decimals",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "subtractedValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "decreaseAllowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "addedValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "increaseAllowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "mint",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "name",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "owner",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "renounceOwnership",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "symbol",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "totalSupply",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "transfer",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "transferFrom",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "newOwner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "transferOwnership",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
1
client/erc20/MintableBurnableERC20.bin
Normal file
1
client/erc20/MintableBurnableERC20.bin
Normal file
File diff suppressed because one or more lines are too long
1069
client/erc20/main.go
Normal file
1069
client/erc20/main.go
Normal file
File diff suppressed because one or more lines are too long
74
client/grpc/README.md
Normal file
74
client/grpc/README.md
Normal file
@ -0,0 +1,74 @@
|
||||
# Kava gRPC Client
|
||||
|
||||
The Kava gRPC client is a tool for making gRPC queries on a Kava chain.
|
||||
|
||||
## Features
|
||||
|
||||
- Easy-to-use gRPC client for the Kava chain.
|
||||
- Access all query clients for Cosmos and Kava modules using `client.Query` (e.g., `client.Query.Bank.Balance`).
|
||||
- Utilize utility functions for common queries (e.g., `client.BaseAccount(str)`).
|
||||
|
||||
## Usage
|
||||
|
||||
### Creating a new client
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
kavaGrpc "github.com/0glabs/0g-chain/client/grpc"
|
||||
)
|
||||
grpcUrl := "https://grpc.kava.io:443"
|
||||
client, err := kavaGrpc.NewClient(grpcUrl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Making grpc queries
|
||||
|
||||
Query clients for both Cosmos and Kava modules are available via `client.Query`.
|
||||
|
||||
Example: Query Cosmos module `x/bank` for address balance
|
||||
|
||||
```go
|
||||
import (
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
)
|
||||
|
||||
rsp, err := client.Query.Bank.Balance(context.Background(), &banktypes.QueryBalanceRequest{
|
||||
Address: "kava19rjk5qmmwywnzfccwzyn02jywgpwjqf60afj92",
|
||||
Denom: "ukava",
|
||||
})
|
||||
```
|
||||
|
||||
Example: Query Kava module `x/evmutil` for params
|
||||
|
||||
```go
|
||||
import (
|
||||
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
|
||||
)
|
||||
|
||||
rsp, err := client.Query.Evmutil.Params(
|
||||
context.Background(), &evmutiltypes.QueryParamsRequest{},
|
||||
)
|
||||
```
|
||||
|
||||
#### Query Utilities
|
||||
|
||||
Utility functions for common queries are available directly on the client.
|
||||
|
||||
Example: Util query to get a base account
|
||||
|
||||
```go
|
||||
kavaAcc := "kava19rjk5qmmwywnzfccwzyn02jywgpwjqf60afj92"
|
||||
rsp, err := client.BaseAccount(kavaAcc)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("account sequence for %s: %d\n", kavaAcc, rsp.Sequence)
|
||||
```
|
||||
|
||||
## Query Tests
|
||||
|
||||
To test queries, a Kava node is required. Therefore, the e2e tests for the gRPC client queries can be found in the `tests/e2e` directory. Tests for new utility queries should be added as e2e tests under the `test/e2e` directory.
|
50
client/grpc/client.go
Normal file
50
client/grpc/client.go
Normal file
@ -0,0 +1,50 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/0glabs/0g-chain/client/grpc/query"
|
||||
"github.com/0glabs/0g-chain/client/grpc/util"
|
||||
)
|
||||
|
||||
// ZgChainGrpcClient enables the usage of kava grpc query clients and query utils
|
||||
type ZgChainGrpcClient struct {
|
||||
config ZgChainGrpcClientConfig
|
||||
|
||||
// Query clients for cosmos and kava modules
|
||||
Query *query.QueryClient
|
||||
|
||||
// Utils for common queries (ie fetch an unpacked BaseAccount)
|
||||
*util.Util
|
||||
}
|
||||
|
||||
// ZgChainGrpcClientConfig is a configuration struct for a ZgChainGrpcClient
|
||||
type ZgChainGrpcClientConfig struct {
|
||||
// note: add future config options here
|
||||
}
|
||||
|
||||
// NewClient creates a new ZgChainGrpcClient via a grpc url
|
||||
func NewClient(grpcUrl string) (*ZgChainGrpcClient, error) {
|
||||
return NewClientWithConfig(grpcUrl, NewDefaultConfig())
|
||||
}
|
||||
|
||||
// NewClientWithConfig creates a new ZgChainGrpcClient via a grpc url and config
|
||||
func NewClientWithConfig(grpcUrl string, config ZgChainGrpcClientConfig) (*ZgChainGrpcClient, error) {
|
||||
if grpcUrl == "" {
|
||||
return nil, errors.New("grpc url cannot be empty")
|
||||
}
|
||||
query, error := query.NewQueryClient(grpcUrl)
|
||||
if error != nil {
|
||||
return nil, error
|
||||
}
|
||||
client := &ZgChainGrpcClient{
|
||||
Query: query,
|
||||
Util: util.NewUtil(query),
|
||||
config: config,
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func NewDefaultConfig() ZgChainGrpcClientConfig {
|
||||
return ZgChainGrpcClientConfig{}
|
||||
}
|
15
client/grpc/client_test.go
Normal file
15
client/grpc/client_test.go
Normal file
@ -0,0 +1,15 @@
|
||||
package grpc_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/0glabs/0g-chain/client/grpc"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewClient_InvalidEndpoint(t *testing.T) {
|
||||
_, err := grpc.NewClient("invalid-url")
|
||||
require.ErrorContains(t, err, "unknown grpc url scheme")
|
||||
_, err = grpc.NewClient("")
|
||||
require.ErrorContains(t, err, "grpc url cannot be empty")
|
||||
}
|
52
client/grpc/query/connection.go
Normal file
52
client/grpc/query/connection.go
Normal file
@ -0,0 +1,52 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// newGrpcConnection parses a GRPC endpoint and creates a connection to it
|
||||
func newGrpcConnection(ctx context.Context, endpoint string) (*grpc.ClientConn, error) {
|
||||
grpcUrl, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse grpc connection \"%s\": %v", endpoint, err)
|
||||
}
|
||||
|
||||
var creds credentials.TransportCredentials
|
||||
switch grpcUrl.Scheme {
|
||||
case "http":
|
||||
creds = insecure.NewCredentials()
|
||||
case "https":
|
||||
creds = credentials.NewTLS(&tls.Config{})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown grpc url scheme: %s", grpcUrl.Scheme)
|
||||
}
|
||||
|
||||
// Ensure the encoding config is set up correctly with the query client
|
||||
// otherwise it will produce panics like:
|
||||
// invalid Go type math.Int for field ...
|
||||
encodingConfig := app.MakeEncodingConfig()
|
||||
protoCodec := codec.NewProtoCodec(encodingConfig.InterfaceRegistry)
|
||||
grpcCodec := protoCodec.GRPCCodec()
|
||||
|
||||
secureOpt := grpc.WithTransportCredentials(creds)
|
||||
grpcConn, err := grpc.DialContext(
|
||||
ctx,
|
||||
grpcUrl.Host,
|
||||
secureOpt,
|
||||
grpc.WithDefaultCallOptions(grpc.ForceCodec(grpcCodec)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return grpcConn, nil
|
||||
}
|
7
client/grpc/query/doc.go
Normal file
7
client/grpc/query/doc.go
Normal file
@ -0,0 +1,7 @@
|
||||
/*
|
||||
The query package includes Cosmos and Kava gRPC query clients.
|
||||
|
||||
To ensure that the `QueryClient` stays updated, add new module query clients
|
||||
to the `QueryClient` whenever new modules with grpc queries are added to the Kava app.
|
||||
*/
|
||||
package query
|
108
client/grpc/query/query.go
Normal file
108
client/grpc/query/query.go
Normal file
@ -0,0 +1,108 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client/grpc/tmservice"
|
||||
txtypes "github.com/cosmos/cosmos-sdk/types/tx"
|
||||
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
authz "github.com/cosmos/cosmos-sdk/x/authz"
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
|
||||
disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
|
||||
evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
|
||||
govv1types "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
|
||||
govv1beta1types "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
|
||||
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
|
||||
paramstypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal"
|
||||
slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
|
||||
|
||||
ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types"
|
||||
ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
|
||||
|
||||
bep3types "github.com/0glabs/0g-chain/x/bep3/types"
|
||||
committeetypes "github.com/0glabs/0g-chain/x/committee/types"
|
||||
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
|
||||
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
|
||||
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
|
||||
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
|
||||
)
|
||||
|
||||
// QueryClient is a wrapper with all Cosmos and Kava grpc query clients
|
||||
type QueryClient struct {
|
||||
// cosmos-sdk query clients
|
||||
|
||||
Tm tmservice.ServiceClient
|
||||
Tx txtypes.ServiceClient
|
||||
Auth authtypes.QueryClient
|
||||
Authz authz.QueryClient
|
||||
Bank banktypes.QueryClient
|
||||
Distribution disttypes.QueryClient
|
||||
Evidence evidencetypes.QueryClient
|
||||
Gov govv1types.QueryClient
|
||||
GovBeta govv1beta1types.QueryClient
|
||||
Mint minttypes.QueryClient
|
||||
Params paramstypes.QueryClient
|
||||
Slashing slashingtypes.QueryClient
|
||||
Staking stakingtypes.QueryClient
|
||||
Upgrade upgradetypes.QueryClient
|
||||
Consensus consensustypes.QueryClient
|
||||
|
||||
// 3rd party query clients
|
||||
|
||||
Evm evmtypes.QueryClient
|
||||
Feemarket feemarkettypes.QueryClient
|
||||
IbcClient ibcclienttypes.QueryClient
|
||||
IbcTransfer ibctransfertypes.QueryClient
|
||||
|
||||
// kava module query clients
|
||||
|
||||
Bep3 bep3types.QueryClient
|
||||
Committee committeetypes.QueryClient
|
||||
Evmutil evmutiltypes.QueryClient
|
||||
Issuance issuancetypes.QueryClient
|
||||
Pricefeed pricefeedtypes.QueryClient
|
||||
Precisebank precisebanktypes.QueryClient
|
||||
}
|
||||
|
||||
// NewQueryClient creates a new QueryClient and initializes all the module query clients
|
||||
func NewQueryClient(grpcEndpoint string) (*QueryClient, error) {
|
||||
conn, err := newGrpcConnection(context.Background(), grpcEndpoint)
|
||||
if err != nil {
|
||||
return &QueryClient{}, err
|
||||
}
|
||||
client := &QueryClient{
|
||||
Tm: tmservice.NewServiceClient(conn),
|
||||
Tx: txtypes.NewServiceClient(conn),
|
||||
Auth: authtypes.NewQueryClient(conn),
|
||||
Authz: authz.NewQueryClient(conn),
|
||||
Bank: banktypes.NewQueryClient(conn),
|
||||
Distribution: disttypes.NewQueryClient(conn),
|
||||
Evidence: evidencetypes.NewQueryClient(conn),
|
||||
Gov: govv1types.NewQueryClient(conn),
|
||||
GovBeta: govv1beta1types.NewQueryClient(conn),
|
||||
Mint: minttypes.NewQueryClient(conn),
|
||||
Params: paramstypes.NewQueryClient(conn),
|
||||
Slashing: slashingtypes.NewQueryClient(conn),
|
||||
Staking: stakingtypes.NewQueryClient(conn),
|
||||
Upgrade: upgradetypes.NewQueryClient(conn),
|
||||
Consensus: consensustypes.NewQueryClient(conn),
|
||||
|
||||
Evm: evmtypes.NewQueryClient(conn),
|
||||
Feemarket: feemarkettypes.NewQueryClient(conn),
|
||||
IbcClient: ibcclienttypes.NewQueryClient(conn),
|
||||
IbcTransfer: ibctransfertypes.NewQueryClient(conn),
|
||||
|
||||
Bep3: bep3types.NewQueryClient(conn),
|
||||
Committee: committeetypes.NewQueryClient(conn),
|
||||
Evmutil: evmutiltypes.NewQueryClient(conn),
|
||||
Issuance: issuancetypes.NewQueryClient(conn),
|
||||
Pricefeed: pricefeedtypes.NewQueryClient(conn),
|
||||
Precisebank: precisebanktypes.NewQueryClient(conn),
|
||||
}
|
||||
return client, nil
|
||||
}
|
64
client/grpc/query/query_test.go
Normal file
64
client/grpc/query/query_test.go
Normal file
@ -0,0 +1,64 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/0glabs/0g-chain/client/grpc/query"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewQueryClient_InvalidGprc(t *testing.T) {
|
||||
t.Run("valid connection", func(t *testing.T) {
|
||||
conn, err := query.NewQueryClient("http://localhost:1234")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, conn)
|
||||
})
|
||||
|
||||
t.Run("non-empty url", func(t *testing.T) {
|
||||
_, err := query.NewQueryClient("")
|
||||
require.ErrorContains(t, err, "unknown grpc url scheme")
|
||||
})
|
||||
|
||||
t.Run("invalid url scheme", func(t *testing.T) {
|
||||
_, err := query.NewQueryClient("ftp://localhost:1234")
|
||||
require.ErrorContains(t, err, "unknown grpc url scheme")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewQueryClient_ValidClient(t *testing.T) {
|
||||
t.Run("all clients are created", func(t *testing.T) {
|
||||
client, err := query.NewQueryClient("http://localhost:1234")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, client)
|
||||
|
||||
// validate cosmos clients
|
||||
require.NotNil(t, client.Tm)
|
||||
require.NotNil(t, client.Tx)
|
||||
require.NotNil(t, client.Auth)
|
||||
require.NotNil(t, client.Authz)
|
||||
require.NotNil(t, client.Bank)
|
||||
require.NotNil(t, client.Distribution)
|
||||
require.NotNil(t, client.Evidence)
|
||||
require.NotNil(t, client.Gov)
|
||||
require.NotNil(t, client.GovBeta)
|
||||
require.NotNil(t, client.Mint)
|
||||
require.NotNil(t, client.Params)
|
||||
require.NotNil(t, client.Slashing)
|
||||
require.NotNil(t, client.Staking)
|
||||
require.NotNil(t, client.Upgrade)
|
||||
require.NotNil(t, client.Consensus)
|
||||
|
||||
// validate 3rd party clients
|
||||
require.NotNil(t, client.Evm)
|
||||
require.NotNil(t, client.Feemarket)
|
||||
require.NotNil(t, client.IbcClient)
|
||||
require.NotNil(t, client.IbcTransfer)
|
||||
|
||||
// validate kava clients
|
||||
require.NotNil(t, client.Bep3)
|
||||
require.NotNil(t, client.Committee)
|
||||
require.NotNil(t, client.Evmutil)
|
||||
require.NotNil(t, client.Issuance)
|
||||
require.NotNil(t, client.Pricefeed)
|
||||
})
|
||||
}
|
41
client/grpc/util/account.go
Normal file
41
client/grpc/util/account.go
Normal file
@ -0,0 +1,41 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
)
|
||||
|
||||
// Account fetches an account via an address and returns the unpacked account
|
||||
func (u *Util) Account(addr string) (authtypes.AccountI, error) {
|
||||
res, err := u.query.Auth.Account(context.Background(), &authtypes.QueryAccountRequest{
|
||||
Address: addr,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch account: %w", err)
|
||||
}
|
||||
|
||||
var acc authtypes.AccountI
|
||||
err = u.encodingConfig.Marshaler.UnpackAny(res.Account, &acc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unpack account: %w", err)
|
||||
}
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
// BaseAccount fetches a base account via an address or returns an error if
|
||||
// the account is not a base account
|
||||
func (u *Util) BaseAccount(addr string) (authtypes.BaseAccount, error) {
|
||||
acc, err := u.Account(addr)
|
||||
if err != nil {
|
||||
return authtypes.BaseAccount{}, err
|
||||
}
|
||||
|
||||
bAcc, ok := acc.(*authtypes.BaseAccount)
|
||||
if !ok {
|
||||
return authtypes.BaseAccount{}, fmt.Errorf("%s is not a base account", addr)
|
||||
}
|
||||
|
||||
return *bAcc, nil
|
||||
}
|
8
client/grpc/util/doc.go
Normal file
8
client/grpc/util/doc.go
Normal file
@ -0,0 +1,8 @@
|
||||
/*
|
||||
The util package contains utility functions for the Kava gRPC client.
|
||||
|
||||
For example, `account.go` includes account-related query helpers.
|
||||
In this file, utilities such as `client.Util.BaseAccount(addr)` is exposed to
|
||||
query an account and return an unpacked `BaseAccount` instance.
|
||||
*/
|
||||
package util
|
32
client/grpc/util/util.go
Normal file
32
client/grpc/util/util.go
Normal file
@ -0,0 +1,32 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
grpctypes "github.com/cosmos/cosmos-sdk/types/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/app/params"
|
||||
query "github.com/0glabs/0g-chain/client/grpc/query"
|
||||
)
|
||||
|
||||
// Util contains utility functions for the Kava gRPC client
|
||||
type Util struct {
|
||||
query *query.QueryClient
|
||||
encodingConfig params.EncodingConfig
|
||||
}
|
||||
|
||||
// NewUtil creates a new Util instance
|
||||
func NewUtil(query *query.QueryClient) *Util {
|
||||
return &Util{
|
||||
query: query,
|
||||
encodingConfig: app.MakeEncodingConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Util) CtxAtHeight(height int64) context.Context {
|
||||
heightStr := strconv.FormatInt(height, 10)
|
||||
return metadata.AppendToOutgoingContext(context.Background(), grpctypes.GRPCBlockHeightHeader, heightStr)
|
||||
}
|
@ -13,7 +13,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/cometbft/cometbft/types"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
@ -132,7 +132,7 @@ func (br BaseReq) ValidateBasic(w http.ResponseWriter) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadRESTReq reads and unmarshals a Request's body to the the BaseReq struct.
|
||||
// ReadRESTReq reads and unmarshals a Request's body to the BaseReq struct.
|
||||
// Writes an error response to ResponseWriter and returns false if errors occurred.
|
||||
func ReadRESTReq(w http.ResponseWriter, r *http.Request, cdc *codec.LegacyAmino, req interface{}) bool {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
simappparams "cosmossdk.io/simapp/params"
|
||||
"github.com/0glabs/0g-chain/client/rest"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
@ -19,7 +20,6 @@ import (
|
||||
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
|
||||
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
|
||||
simappparams "github.com/cosmos/cosmos-sdk/simapp/params"
|
||||
"github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
|
@ -7,6 +7,10 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Kava-Labs/opendb"
|
||||
cometbftdb "github.com/cometbft/cometbft-db"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
@ -19,8 +23,6 @@ import (
|
||||
ethermintflags "github.com/evmos/ethermint/server/flags"
|
||||
"github.com/spf13/cast"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
db "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/app/params"
|
||||
@ -29,6 +31,7 @@ import (
|
||||
const (
|
||||
flagMempoolEnableAuth = "mempool.enable-authentication"
|
||||
flagMempoolAuthAddresses = "mempool.authorized-addresses"
|
||||
flagSkipLoadLatest = "skip-load-latest"
|
||||
)
|
||||
|
||||
// appCreator holds functions used by the sdk server to control the 0g-chain app.
|
||||
@ -40,7 +43,7 @@ type appCreator struct {
|
||||
// newApp loads config from AppOptions and returns a new app.
|
||||
func (ac appCreator) newApp(
|
||||
logger log.Logger,
|
||||
db db.DB,
|
||||
db cometbftdb.DB,
|
||||
traceStore io.Writer,
|
||||
appOpts servertypes.AppOptions,
|
||||
) servertypes.Application {
|
||||
@ -61,7 +64,7 @@ func (ac appCreator) newApp(
|
||||
|
||||
homeDir := cast.ToString(appOpts.Get(flags.FlagHome))
|
||||
snapshotDir := filepath.Join(homeDir, "data", "snapshots") // TODO can these directory names be imported from somewhere?
|
||||
snapshotDB, err := sdk.NewLevelDB("metadata", snapshotDir)
|
||||
snapshotDB, err := opendb.OpenDB(appOpts, snapshotDir, "metadata", server.GetAppDBBackend(appOpts))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -88,10 +91,26 @@ func (ac appCreator) newApp(
|
||||
cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent)),
|
||||
)
|
||||
|
||||
// Setup chainId
|
||||
chainID := cast.ToString(appOpts.Get(flags.FlagChainID))
|
||||
if len(chainID) == 0 {
|
||||
// fallback to genesis chain-id
|
||||
appGenesis, err := tmtypes.GenesisDocFromFile(filepath.Join(homeDir, "config", "genesis.json"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
chainID = appGenesis.ChainID
|
||||
}
|
||||
|
||||
skipLoadLatest := false
|
||||
if appOpts.Get(flagSkipLoadLatest) != nil {
|
||||
skipLoadLatest = cast.ToBool(appOpts.Get(flagSkipLoadLatest))
|
||||
}
|
||||
|
||||
return app.NewApp(
|
||||
logger, db, homeDir, traceStore, ac.encodingConfig,
|
||||
app.Options{
|
||||
SkipLoadLatest: false,
|
||||
SkipLoadLatest: skipLoadLatest,
|
||||
SkipUpgradeHeights: skipUpgradeHeights,
|
||||
SkipGenesisInvariants: cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)),
|
||||
InvariantCheckPeriod: cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)),
|
||||
@ -112,18 +131,20 @@ func (ac appCreator) newApp(
|
||||
baseapp.SetIAVLCacheSize(cast.ToInt(appOpts.Get(server.FlagIAVLCacheSize))),
|
||||
baseapp.SetIAVLDisableFastNode(cast.ToBool(iavlDisableFastNode)),
|
||||
baseapp.SetIAVLLazyLoading(cast.ToBool(appOpts.Get(server.FlagIAVLLazyLoading))),
|
||||
baseapp.SetChainID(chainID),
|
||||
)
|
||||
}
|
||||
|
||||
// appExport writes out an app's state to json.
|
||||
func (ac appCreator) appExport(
|
||||
logger log.Logger,
|
||||
db db.DB,
|
||||
db cometbftdb.DB,
|
||||
traceStore io.Writer,
|
||||
height int64,
|
||||
forZeroHeight bool,
|
||||
jailAllowedAddrs []string,
|
||||
appOpts servertypes.AppOptions,
|
||||
modulesToExport []string,
|
||||
) (servertypes.ExportedApp, error) {
|
||||
homePath, ok := appOpts.Get(flags.FlagHome).(string)
|
||||
if !ok || homePath == "" {
|
||||
@ -144,7 +165,7 @@ func (ac appCreator) appExport(
|
||||
} else {
|
||||
tempApp = app.NewApp(logger, db, homePath, traceStore, ac.encodingConfig, options)
|
||||
}
|
||||
return tempApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs)
|
||||
return tempApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs, modulesToExport)
|
||||
}
|
||||
|
||||
// addStartCmdFlags adds flags to the server start command.
|
||||
|
@ -4,10 +4,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
"github.com/cosmos/cosmos-sdk/version"
|
||||
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
|
||||
"github.com/spf13/cobra"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/app/params"
|
||||
|
53
cmd/0gchaind/iavlviewer/data.go
Normal file
53
cmd/0gchaind/iavlviewer/data.go
Normal file
@ -0,0 +1,53 @@
|
||||
package iavlviewer
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/iavl"
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newDataCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "data <prefix> [version number]",
|
||||
Short: "View all keys, hash, & size of tree.",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
prefix := args[0]
|
||||
version := 0
|
||||
if len(args) == 2 {
|
||||
var err error
|
||||
version, err = parseVersion(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tree, err := openPrefixTree(opts, cmd, prefix, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printKeys(tree)
|
||||
hash := tree.Hash()
|
||||
fmt.Printf("Hash: %X\n", hash)
|
||||
fmt.Printf("Size: %X\n", tree.Size())
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func printKeys(tree *iavl.MutableTree) {
|
||||
fmt.Println("Printing all keys with hashed values (to detect diff)")
|
||||
tree.Iterate(func(key []byte, value []byte) bool { //nolint:errcheck
|
||||
printKey := parseWeaveKey(key)
|
||||
digest := sha256.Sum256(value)
|
||||
fmt.Printf(" %s\n %X\n", printKey, digest)
|
||||
return false
|
||||
})
|
||||
}
|
38
cmd/0gchaind/iavlviewer/hash.go
Normal file
38
cmd/0gchaind/iavlviewer/hash.go
Normal file
@ -0,0 +1,38 @@
|
||||
package iavlviewer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newHashCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "hash <prefix> [version number]",
|
||||
Short: "Print the root hash of the iavl tree.",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
prefix := args[0]
|
||||
version := 0
|
||||
if len(args) == 2 {
|
||||
var err error
|
||||
version, err = parseVersion(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tree, err := openPrefixTree(opts, cmd, prefix, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Hash: %X\n", tree.Hash())
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
86
cmd/0gchaind/iavlviewer/root.go
Normal file
86
cmd/0gchaind/iavlviewer/root.go
Normal file
@ -0,0 +1,86 @@
|
||||
package iavlviewer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"cosmossdk.io/log"
|
||||
dbm "github.com/cosmos/cosmos-db"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
"github.com/cosmos/cosmos-sdk/store/wrapper"
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/cosmos/iavl"
|
||||
iavldb "github.com/cosmos/iavl/db"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultCacheSize int = 10000
|
||||
)
|
||||
|
||||
func NewCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "iavlviewer <data|hash|shape|versions> <prefix> [version number]",
|
||||
Short: "Output various data, hashes, and calculations for an iavl tree",
|
||||
}
|
||||
|
||||
cmd.AddCommand(newDataCmd(opts))
|
||||
cmd.AddCommand(newHashCmd(opts))
|
||||
cmd.AddCommand(newShapeCmd(opts))
|
||||
cmd.AddCommand(newVersionsCmd(opts))
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func parseVersion(arg string) (int, error) {
|
||||
version, err := strconv.Atoi(arg)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid version number: '%s'", arg)
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func openPrefixTree(opts ethermintserver.StartOptions, cmd *cobra.Command, prefix string, version int) (*iavl.MutableTree, error) {
|
||||
clientCtx := client.GetClientContextFromCmd(cmd)
|
||||
ctx := server.GetServerContextFromCmd(cmd)
|
||||
ctx.Config.SetRoot(clientCtx.HomeDir)
|
||||
|
||||
db, err := opts.DBOpener(ctx.Viper, clientCtx.HomeDir, server.GetAppDBBackend(ctx.Viper))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open database at %s: %s", clientCtx.HomeDir, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
ctx.Logger.Error("error closing db", "error", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
cosmosdb := wrapper.NewCosmosDB(db)
|
||||
|
||||
tree, err := readTree(cosmosdb, version, []byte(prefix))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read tree with prefix %s: %s", prefix, err)
|
||||
}
|
||||
return tree, nil
|
||||
}
|
||||
|
||||
// ReadTree loads an iavl tree from the directory
|
||||
// If version is 0, load latest, otherwise, load named version
|
||||
// The prefix represents which iavl tree you want to read. The iaviwer will always set a prefix.
|
||||
func readTree(db dbm.DB, version int, prefix []byte) (*iavl.MutableTree, error) {
|
||||
if len(prefix) != 0 {
|
||||
db = dbm.NewPrefixDB(db, prefix)
|
||||
}
|
||||
|
||||
tree := iavl.NewMutableTree(iavldb.NewWrapper(db), DefaultCacheSize, false, log.NewLogger(os.Stdout))
|
||||
ver, err := tree.LoadVersion(int64(version))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmt.Printf("Latest version: %d\n", ver)
|
||||
fmt.Printf("Got version: %d\n", version)
|
||||
return tree, err
|
||||
}
|
47
cmd/0gchaind/iavlviewer/shape.go
Normal file
47
cmd/0gchaind/iavlviewer/shape.go
Normal file
@ -0,0 +1,47 @@
|
||||
package iavlviewer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cosmos/iavl"
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newShapeCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "shape <prefix> [version number]",
|
||||
Short: "View shape of iavl tree.",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
prefix := args[0]
|
||||
version := 0
|
||||
if len(args) == 2 {
|
||||
var err error
|
||||
version, err = parseVersion(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tree, err := openPrefixTree(opts, cmd, prefix, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printShape(tree)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func printShape(tree *iavl.MutableTree) {
|
||||
// shape := tree.RenderShape(" ", nil)
|
||||
// TODO: handle this error
|
||||
shape, _ := tree.RenderShape(" ", nodeEncoder)
|
||||
fmt.Println(strings.Join(shape, "\n"))
|
||||
}
|
74
cmd/0gchaind/iavlviewer/versions.go
Normal file
74
cmd/0gchaind/iavlviewer/versions.go
Normal file
@ -0,0 +1,74 @@
|
||||
package iavlviewer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cosmos/iavl"
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newVersionsCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "versions <prefix>",
|
||||
Short: "Print all versions of iavl tree",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
prefix := args[0]
|
||||
tree, err := openPrefixTree(opts, cmd, prefix, 15)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printVersions(tree)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func printVersions(tree *iavl.MutableTree) {
|
||||
versions := tree.AvailableVersions()
|
||||
fmt.Println("Available versions:")
|
||||
for _, v := range versions {
|
||||
fmt.Printf(" %d\n", v)
|
||||
}
|
||||
}
|
||||
|
||||
// parseWeaveKey assumes a separating : where all in front should be ascii,
|
||||
// and all afterwards may be ascii or binary
|
||||
func parseWeaveKey(key []byte) string {
|
||||
cut := bytes.IndexRune(key, ':')
|
||||
if cut == -1 {
|
||||
return encodeID(key)
|
||||
}
|
||||
prefix := key[:cut]
|
||||
id := key[cut+1:]
|
||||
return fmt.Sprintf("%s:%s", encodeID(prefix), encodeID(id))
|
||||
}
|
||||
|
||||
// casts to a string if it is printable ascii, hex-encodes otherwise
|
||||
func encodeID(id []byte) string {
|
||||
for _, b := range id {
|
||||
if b < 0x20 || b >= 0x80 {
|
||||
return strings.ToUpper(hex.EncodeToString(id))
|
||||
}
|
||||
}
|
||||
return string(id)
|
||||
}
|
||||
|
||||
func nodeEncoder(id []byte, depth int, isLeaf bool) string {
|
||||
prefix := fmt.Sprintf("-%d ", depth)
|
||||
if isLeaf {
|
||||
prefix = fmt.Sprintf("*%d ", depth)
|
||||
}
|
||||
if len(id) == 0 {
|
||||
return fmt.Sprintf("%s<nil>", prefix)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", prefix, parseWeaveKey(id))
|
||||
}
|
@ -1,14 +1,14 @@
|
||||
package client
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
||||
"github.com/cometbft/cometbft/libs/cli"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
"github.com/cosmos/cosmos-sdk/client/keys"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keyring"
|
||||
ethclient "github.com/evmos/ethermint/client"
|
||||
@ -18,9 +18,9 @@ import (
|
||||
|
||||
var ethFlag = "eth"
|
||||
|
||||
// KeyCommands registers a sub-tree of commands to interact with
|
||||
// keyCommands registers a sub-tree of commands to interact with
|
||||
// local private key storage.
|
||||
func KeyCommands(defaultNodeHome string) *cobra.Command {
|
||||
func keyCommands(defaultNodeHome string) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "keys",
|
||||
Short: "Manage your application's keys",
|
||||
@ -52,7 +52,7 @@ The pass backend requires GnuPG: https://gnupg.org/
|
||||
addCmd := keys.AddKeyCommand()
|
||||
addCmd.Flags().Bool(ethFlag, false, "use default evm coin-type (60) and key signing algorithm (\"eth_secp256k1\")")
|
||||
|
||||
algoFlag := addCmd.Flag(flags.FlagKeyAlgorithm)
|
||||
algoFlag := addCmd.Flag(flags.FlagKeyType)
|
||||
algoFlag.DefValue = string(hd.EthSecp256k1Type)
|
||||
err := algoFlag.Value.Set(string(hd.EthSecp256k1Type))
|
||||
if err != nil {
|
||||
@ -107,7 +107,7 @@ func runAddCmd(cmd *cobra.Command, args []string) error {
|
||||
eth, _ := cmd.Flags().GetBool(ethFlag)
|
||||
if eth {
|
||||
cmd.Print("eth flag specified: using coin-type 60 and signing algorithm eth_secp256k1\n")
|
||||
cmd.Flags().Set(flags.FlagKeyAlgorithm, string(hd.EthSecp256k1Type))
|
||||
cmd.Flags().Set(flags.FlagKeyType, string(hd.EthSecp256k1Type))
|
||||
cmd.Flags().Set("coin-type", "60")
|
||||
}
|
||||
|
@ -11,9 +11,7 @@ import (
|
||||
|
||||
func main() {
|
||||
chaincfg.SetSDKConfig().Seal()
|
||||
|
||||
rootCmd := NewRootCmd()
|
||||
|
||||
if err := svrcmd.Execute(rootCmd, chaincfg.EnvPrefix, chaincfg.DefaultNodeHome); err != nil {
|
||||
switch e := err.(type) {
|
||||
case server.ErrorCode:
|
||||
|
216
cmd/0gchaind/rocksdb/compact.go
Normal file
216
cmd/0gchaind/rocksdb/compact.go
Normal file
@ -0,0 +1,216 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/Kava-Labs/opendb"
|
||||
)
|
||||
|
||||
const (
|
||||
flagPrintStatsInterval = "print-stats-interval"
|
||||
)
|
||||
|
||||
var allowedDBs = []string{"application", "blockstore", "state"}
|
||||
|
||||
func CompactRocksDBCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: fmt.Sprintf(
|
||||
"compact <%s>",
|
||||
strings.Join(allowedDBs, "|"),
|
||||
),
|
||||
Short: "force compacts RocksDB",
|
||||
Long: `This is a utility command that performs a force compaction on the state or
|
||||
blockstore. This should only be run once the node has stopped.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
statsIntervalStr, err := cmd.Flags().GetString(flagPrintStatsInterval)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
statsInterval, err := time.ParseDuration(statsIntervalStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse duration for --%s: %w", flagPrintStatsInterval, err)
|
||||
}
|
||||
|
||||
clientCtx := client.GetClientContextFromCmd(cmd)
|
||||
ctx := server.GetServerContextFromCmd(cmd)
|
||||
|
||||
if server.GetAppDBBackend(ctx.Viper) != "rocksdb" {
|
||||
return errors.New("compaction is currently only supported with rocksdb")
|
||||
}
|
||||
|
||||
if !slices.Contains(allowedDBs, args[0]) {
|
||||
return fmt.Errorf(
|
||||
"invalid db name, must be one of the following: %s",
|
||||
strings.Join(allowedDBs, ", "),
|
||||
)
|
||||
}
|
||||
|
||||
return compactRocksDBs(clientCtx.HomeDir, logger, args[0], statsInterval)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().String(flagPrintStatsInterval, "1m", "duration string for how often to print compaction stats")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// compactRocksDBs performs a manual compaction on the given db.
|
||||
func compactRocksDBs(
|
||||
rootDir string,
|
||||
logger log.Logger,
|
||||
dbName string,
|
||||
statsInterval time.Duration,
|
||||
) error {
|
||||
dbPath := filepath.Join(rootDir, "data", dbName+".db")
|
||||
|
||||
dbOpts, cfOpts, err := opendb.LoadLatestOptions(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("opening db", "path", dbPath)
|
||||
db, _, err := grocksdb.OpenDbColumnFamilies(
|
||||
dbOpts,
|
||||
dbPath,
|
||||
[]string{opendb.DefaultColumnFamilyName},
|
||||
[]*grocksdb.Options{cfOpts},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Error("failed to initialize cometbft db", "path", dbPath, "err", err)
|
||||
return fmt.Errorf("failed to open db %s %w", dbPath, err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
logColumnFamilyMetadata(db, logger)
|
||||
|
||||
logger.Info("starting compaction...", "db", dbPath)
|
||||
|
||||
done := make(chan bool)
|
||||
registerSignalHandler(db, logger, done)
|
||||
startCompactionStatsOutput(db, logger, done, statsInterval)
|
||||
|
||||
// Actually run the compaction
|
||||
db.CompactRange(grocksdb.Range{Start: nil, Limit: nil})
|
||||
logger.Info("done compaction", "db", dbPath)
|
||||
|
||||
done <- true
|
||||
return nil
|
||||
}
|
||||
|
||||
// bytesToMB converts bytes to megabytes.
|
||||
func bytesToMB(bytes uint64) float64 {
|
||||
return float64(bytes) / 1024 / 1024
|
||||
}
|
||||
|
||||
// logColumnFamilyMetadata outputs the column family and level metadata.
|
||||
func logColumnFamilyMetadata(
|
||||
db *grocksdb.DB,
|
||||
logger log.Logger,
|
||||
) {
|
||||
metadata := db.GetColumnFamilyMetadata()
|
||||
|
||||
logger.Info(
|
||||
"column family metadata",
|
||||
"name", metadata.Name(),
|
||||
"sizeMB", bytesToMB(metadata.Size()),
|
||||
"fileCount", metadata.FileCount(),
|
||||
"levels", len(metadata.LevelMetas()),
|
||||
)
|
||||
|
||||
for _, level := range metadata.LevelMetas() {
|
||||
logger.Info(
|
||||
fmt.Sprintf("level %d metadata", level.Level()),
|
||||
"sstMetas", strconv.Itoa(len(level.SstMetas())),
|
||||
"sizeMB", strconv.FormatFloat(bytesToMB(level.Size()), 'f', 2, 64),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// startCompactionStatsOutput starts a goroutine that outputs compaction stats
|
||||
// every minute.
|
||||
func startCompactionStatsOutput(
|
||||
db *grocksdb.DB,
|
||||
logger log.Logger,
|
||||
done chan bool,
|
||||
statsInterval time.Duration,
|
||||
) {
|
||||
go func() {
|
||||
ticker := time.NewTicker(statsInterval)
|
||||
isClosed := false
|
||||
|
||||
for {
|
||||
select {
|
||||
// Make sure we don't try reading from the closed db.
|
||||
// We continue the loop so that we can make sure the done channel
|
||||
// does not stall indefinitely from repeated writes and no reader.
|
||||
case <-done:
|
||||
logger.Debug("stopping compaction stats output")
|
||||
isClosed = true
|
||||
case <-ticker.C:
|
||||
if !isClosed {
|
||||
compactionStats := db.GetProperty("rocksdb.stats")
|
||||
fmt.Printf("%s\n", compactionStats)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// registerSignalHandler registers a signal handler that will cancel any running
|
||||
// compaction when the user presses Ctrl+C.
|
||||
func registerSignalHandler(
|
||||
db *grocksdb.DB,
|
||||
logger log.Logger,
|
||||
done chan bool,
|
||||
) {
|
||||
// https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ
|
||||
// Q: Can I close the DB when a manual compaction is in progress?
|
||||
//
|
||||
// A: No, it's not safe to do that. However, you call
|
||||
// CancelAllBackgroundWork(db, true) in another thread to abort the
|
||||
// running compactions, so that you can close the DB sooner. Since
|
||||
// 6.5, you can also speed it up using
|
||||
// DB::DisableManualCompaction().
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
for sig := range c {
|
||||
logger.Info(fmt.Sprintf(
|
||||
"received %s signal, aborting running compaction... Do NOT kill me before compaction is cancelled. I will exit when compaction is cancelled.",
|
||||
sig,
|
||||
))
|
||||
db.DisableManualCompaction()
|
||||
logger.Info("manual compaction disabled")
|
||||
|
||||
// Stop the logging
|
||||
done <- true
|
||||
}
|
||||
}()
|
||||
}
|
19
cmd/0gchaind/rocksdb/rocksdb.go
Normal file
19
cmd/0gchaind/rocksdb/rocksdb.go
Normal file
@ -0,0 +1,19 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// RocksDBCmd defines the root command containing subcommands that assist in
|
||||
// rocksdb related tasks such as manual compaction.
|
||||
var RocksDBCmd = &cobra.Command{
|
||||
Use: "rocksdb",
|
||||
Short: "RocksDB util commands",
|
||||
}
|
||||
|
||||
func init() {
|
||||
RocksDBCmd.AddCommand(CompactRocksDBCmd())
|
||||
}
|
14
cmd/0gchaind/rocksdb/rocksdb_dummy.go
Normal file
14
cmd/0gchaind/rocksdb/rocksdb_dummy.go
Normal file
@ -0,0 +1,14 @@
|
||||
//go:build !rocksdb
|
||||
// +build !rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// RocksDBCmd defines the root command when the rocksdb build tag is not set.
|
||||
var RocksDBCmd = &cobra.Command{
|
||||
Use: "rocksdb",
|
||||
Short: "RocksDB util commands, disabled because rocksdb build tag not set",
|
||||
}
|
@ -1,32 +1,38 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
tmcfg "github.com/cometbft/cometbft/config"
|
||||
tmcli "github.com/cometbft/cometbft/libs/cli"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/config"
|
||||
"github.com/cosmos/cosmos-sdk/client/debug"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keyring"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
|
||||
servertypes "github.com/cosmos/cosmos-sdk/server/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/genutil"
|
||||
genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
|
||||
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
|
||||
ethermintclient "github.com/evmos/ethermint/client"
|
||||
"github.com/evmos/ethermint/crypto/hd"
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
servercfg "github.com/evmos/ethermint/server/config"
|
||||
"github.com/spf13/cobra"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
tmcli "github.com/tendermint/tendermint/libs/cli"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/app/params"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
kavaclient "github.com/0glabs/0g-chain/client"
|
||||
"github.com/0glabs/0g-chain/cmd/opendb"
|
||||
"github.com/0glabs/0g-chain/cmd/0gchaind/iavlviewer"
|
||||
"github.com/0glabs/0g-chain/cmd/0gchaind/rocksdb"
|
||||
"github.com/0glabs/0g-chain/crypto/vrf"
|
||||
"github.com/Kava-Labs/opendb"
|
||||
)
|
||||
|
||||
func customKeyringOptions() keyring.Option {
|
||||
@ -46,11 +52,10 @@ func NewRootCmd() *cobra.Command {
|
||||
WithLegacyAmino(encodingConfig.Amino).
|
||||
WithInput(os.Stdin).
|
||||
WithAccountRetriever(types.AccountRetriever{}).
|
||||
WithBroadcastMode(flags.BroadcastBlock).
|
||||
WithBroadcastMode(flags.FlagBroadcastMode).
|
||||
WithHomeDir(chaincfg.DefaultNodeHome).
|
||||
WithKeyringOptions(customKeyringOptions()).
|
||||
WithKeyringOptions(hd.EthSecp256k1Option()).
|
||||
WithViper(chaincfg.EnvPrefix)
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: chaincfg.AppName,
|
||||
Short: "Daemon and CLI for the 0g-chain blockchain.",
|
||||
@ -84,18 +89,29 @@ func NewRootCmd() *cobra.Command {
|
||||
}
|
||||
|
||||
addSubCmds(rootCmd, encodingConfig, chaincfg.DefaultNodeHome)
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
// addSubCmds registers all the sub commands used by 0g-chain.
|
||||
// dbOpener is a function to open `application.db`, potentially with customized options.
|
||||
// dbOpener sets dataDir to "data", dbName to "application" and calls generic OpenDB function.
|
||||
func dbOpener(opts servertypes.AppOptions, rootDir string, backend dbm.BackendType) (dbm.DB, error) {
|
||||
dataDir := filepath.Join(rootDir, "data")
|
||||
return opendb.OpenDB(opts, dataDir, "application", backend)
|
||||
}
|
||||
|
||||
// addSubCmds registers all the sub commands used by kava.
|
||||
func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, defaultNodeHome string) {
|
||||
gentxModule, ok := app.ModuleBasics[genutiltypes.ModuleName].(genutil.AppModuleBasic)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("expected %s module to be an instance of type %T", genutiltypes.ModuleName, genutil.AppModuleBasic{}))
|
||||
}
|
||||
|
||||
rootCmd.AddCommand(
|
||||
StatusCommand(),
|
||||
ethermintclient.ValidateChainID(
|
||||
genutilcli.InitCmd(app.ModuleBasics, defaultNodeHome),
|
||||
),
|
||||
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, defaultNodeHome),
|
||||
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, defaultNodeHome, gentxModule.GenTxValidator),
|
||||
AssertInvariantsCmd(encodingConfig),
|
||||
genutilcli.GenTxCmd(app.ModuleBasics, encodingConfig.TxConfig, banktypes.GenesisBalancesIterator{}, defaultNodeHome),
|
||||
genutilcli.ValidateGenesisCmd(app.ModuleBasics),
|
||||
@ -113,7 +129,7 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
|
||||
opts := ethermintserver.StartOptions{
|
||||
AppCreator: ac.newApp,
|
||||
DefaultNodeHome: chaincfg.DefaultNodeHome,
|
||||
DBOpener: opendb.OpenDB,
|
||||
DBOpener: dbOpener,
|
||||
}
|
||||
// ethermintserver adds additional flags to start the JSON-RPC server for evm support
|
||||
ethermintserver.AddCommands(
|
||||
@ -123,10 +139,13 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
|
||||
ac.addStartCmdFlags,
|
||||
)
|
||||
|
||||
// add keybase, gas RPC, query, and tx child commands
|
||||
// add keybase, auxiliary RPC, query, and tx child commands
|
||||
rootCmd.AddCommand(
|
||||
newQueryCmd(),
|
||||
newTxCmd(),
|
||||
kavaclient.KeyCommands(chaincfg.DefaultNodeHome),
|
||||
keyCommands(chaincfg.DefaultNodeHome),
|
||||
rocksdb.RocksDBCmd,
|
||||
newShardCmd(opts),
|
||||
iavlviewer.NewCmd(opts),
|
||||
)
|
||||
}
|
||||
|
322
cmd/0gchaind/shard.go
Normal file
322
cmd/0gchaind/shard.go
Normal file
@ -0,0 +1,322 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types"
|
||||
"github.com/cosmos/cosmos-sdk/store/rootmulti"
|
||||
|
||||
tmconfig "github.com/cometbft/cometbft/config"
|
||||
"github.com/cometbft/cometbft/node"
|
||||
tmstate "github.com/cometbft/cometbft/state"
|
||||
"github.com/cometbft/cometbft/store"
|
||||
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
)
|
||||
|
||||
const (
|
||||
flagShardStartBlock = "start"
|
||||
flagShardEndBlock = "end"
|
||||
flagShardOnlyAppState = "only-app-state"
|
||||
flagShardForceAppVersion = "force-app-version"
|
||||
flagShardOnlyCometbftState = "only-cometbft-state"
|
||||
// TODO: --preserve flag for creating & operating on a copy?
|
||||
|
||||
// allow using -1 to mean "latest" (perform no rollbacks)
|
||||
shardEndBlockLatest = -1
|
||||
)
|
||||
|
||||
func newShardCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "shard --home <path-to-home-dir> --start <start-block> --end <end-block> [--only-app-state] [--only-cometbft-state] [--force-app-version <app-version>]",
|
||||
Short: "Strip all blocks from the database outside of a given range",
|
||||
Long: `shard opens a local kava home directory's databases and removes all blocks outside a range defined by --start and --end. The range is inclusive of the end block.
|
||||
|
||||
It works by first rolling back the latest state to the block before the end block, and then by pruning all state before the start block.
|
||||
|
||||
Setting the end block to -1 signals to keep the latest block (no rollbacks).
|
||||
|
||||
The application.db can be loaded at a particular height via the --force-app-version option. This is useful if the sharding process is prematurely terminated while the application.db is being sharded.
|
||||
|
||||
The --only-app-state flag can be used to skip the pruning of the blockstore and cometbft state. This matches the functionality of the cosmos-sdk's "prune" command. Note that rolled back blocks will still affect all stores.
|
||||
|
||||
Similarly, the --only-cometbft-state flag skips pruning app state. This can be useful if the shard command is prematurely terminated during the shard process.
|
||||
|
||||
The shard command only flags the iavl tree nodes for deletion. Actual removal from the databases will be performed when each database is compacted.
|
||||
|
||||
WARNING: this is a destructive action.`,
|
||||
Example: `Create a 1M block data shard (keeps blocks kava 1,000,000 to 2,000,000)
|
||||
$ kava shard --home path/to/.kava --start 1000000 --end 2000000
|
||||
|
||||
Prune all blocks up to 5,000,000:
|
||||
$ kava shard --home path/to/.kava --start 5000000 --end -1
|
||||
|
||||
Prune first 1M blocks _without_ affecting blockstore or cometBFT state:
|
||||
$ kava shard --home path/to/.kava --start 1000000 --end -1 --only-app-state`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
//////////////////////////
|
||||
// parse & validate flags
|
||||
//////////////////////////
|
||||
startBlock, err := cmd.Flags().GetInt64(flagShardStartBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endBlock, err := cmd.Flags().GetInt64(flagShardEndBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (endBlock == 0 || endBlock < startBlock) && endBlock != shardEndBlockLatest {
|
||||
return fmt.Errorf("end block (%d) must be greater than start block (%d)", endBlock, startBlock)
|
||||
}
|
||||
onlyAppState, err := cmd.Flags().GetBool(flagShardOnlyAppState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
forceAppVersion, err := cmd.Flags().GetInt64(flagShardForceAppVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
onlyCometbftState, err := cmd.Flags().GetBool(flagShardOnlyCometbftState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientCtx := client.GetClientContextFromCmd(cmd)
|
||||
|
||||
ctx := server.GetServerContextFromCmd(cmd)
|
||||
ctx.Config.SetRoot(clientCtx.HomeDir)
|
||||
|
||||
////////////////////////
|
||||
// manage db connection
|
||||
////////////////////////
|
||||
// connect to database
|
||||
db, err := opts.DBOpener(ctx.Viper, clientCtx.HomeDir, server.GetAppDBBackend(ctx.Viper))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// close db connection when done
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
ctx.Logger.Error("error closing db", "error", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
///////////////////
|
||||
// load multistore
|
||||
///////////////////
|
||||
// create app in order to load the multistore
|
||||
// skip loading the latest version so the desired height can be manually loaded
|
||||
ctx.Viper.Set("skip-load-latest", true)
|
||||
|
||||
app := opts.AppCreator(ctx.Logger, db, nil, ctx.Viper).(*app.App)
|
||||
if forceAppVersion == shardEndBlockLatest {
|
||||
if err := app.LoadLatestVersion(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := app.LoadVersion(forceAppVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// get the multistore
|
||||
cms := app.CommitMultiStore()
|
||||
multistore, ok := cms.(*rootmulti.Store)
|
||||
if !ok {
|
||||
return fmt.Errorf("only sharding of rootmulti.Store type is supported")
|
||||
}
|
||||
|
||||
////////////////////////
|
||||
// shard application.db
|
||||
////////////////////////
|
||||
if !onlyCometbftState {
|
||||
if err := shardApplicationDb(multistore, startBlock, endBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[%s] skipping sharding of application.db\n", flagShardOnlyCometbftState)
|
||||
}
|
||||
|
||||
//////////////////////////////////
|
||||
// shard blockstore.db & state.db
|
||||
//////////////////////////////////
|
||||
// open block store & cometbft state
|
||||
blockStore, stateStore, err := openCometBftDbs(ctx.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open cometbft dbs: %s", err)
|
||||
}
|
||||
|
||||
if !onlyAppState {
|
||||
if err := shardCometBftDbs(blockStore, stateStore, startBlock, endBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("[%s] skipping sharding of blockstore.db and state.db\n", flagShardOnlyAppState)
|
||||
fmt.Printf("blockstore contains blocks %d - %d\n", blockStore.Base(), blockStore.Height())
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().String(flags.FlagHome, opts.DefaultNodeHome, "The application home directory")
|
||||
cmd.Flags().Int64(flagShardStartBlock, 1, "Start block of data shard (inclusive)")
|
||||
cmd.Flags().Int64(flagShardEndBlock, 0, "End block of data shard (inclusive)")
|
||||
cmd.Flags().Bool(flagShardOnlyAppState, false, "Skip pruning of blockstore & cometbft state")
|
||||
cmd.Flags().Bool(flagShardOnlyCometbftState, false, "Skip pruning of application state")
|
||||
cmd.Flags().Int64(flagShardForceAppVersion, shardEndBlockLatest, "Instead of loading latest, force set the version of the multistore that is loaded")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// shardApplicationDb prunes the multistore up to startBlock and rolls it back to endBlock
|
||||
func shardApplicationDb(multistore *rootmulti.Store, startBlock, endBlock int64) error {
|
||||
//////////////////////////////
|
||||
// Rollback state to endBlock
|
||||
//////////////////////////////
|
||||
// handle desired endblock being latest
|
||||
latest := multistore.LastCommitID().Version
|
||||
if latest == 0 {
|
||||
return fmt.Errorf("failed to find latest height >0")
|
||||
}
|
||||
fmt.Printf("latest height: %d\n", latest)
|
||||
if endBlock == shardEndBlockLatest {
|
||||
endBlock = latest
|
||||
}
|
||||
shardSize := endBlock - startBlock + 1
|
||||
|
||||
// error if requesting block range the database does not have
|
||||
if endBlock > latest {
|
||||
return fmt.Errorf("data does not contain end block (%d): latest version is %d", endBlock, latest)
|
||||
}
|
||||
|
||||
fmt.Printf("pruning data down to heights %d - %d (%d blocks)\n", startBlock, endBlock, shardSize)
|
||||
|
||||
// set pruning options to prevent no-ops from `PruneStores`
|
||||
multistore.SetPruning(pruningtypes.PruningOptions{KeepRecent: uint64(shardSize), Interval: 0})
|
||||
|
||||
// rollback application state
|
||||
if err := multistore.RollbackToVersion(endBlock); err != nil {
|
||||
return fmt.Errorf("failed to rollback application state: %s", err)
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
// Prune blocks to startBlock
|
||||
//////////////////////////////
|
||||
// enumerate all heights to prune
|
||||
pruneHeights := make([]int64, 0, latest-shardSize)
|
||||
for i := int64(1); i < startBlock; i++ {
|
||||
pruneHeights = append(pruneHeights, i)
|
||||
}
|
||||
|
||||
if len(pruneHeights) > 0 {
|
||||
// prune application state
|
||||
fmt.Printf("pruning application state to height %d\n", startBlock)
|
||||
for _, pruneHeight := range pruneHeights {
|
||||
if err := multistore.PruneStores(pruneHeight); err != nil {
|
||||
return fmt.Errorf("failed to prune application state: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shardCometBftDbs shrinks blockstore.db & state.db down to the desired block range
|
||||
func shardCometBftDbs(blockStore *store.BlockStore, stateStore tmstate.Store, startBlock, endBlock int64) error {
|
||||
var err error
|
||||
latest := blockStore.Height()
|
||||
if endBlock == shardEndBlockLatest {
|
||||
endBlock = latest
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
// Rollback state to endBlock
|
||||
//////////////////////////////
|
||||
// prep for outputting progress repeatedly to same line
|
||||
needsRollback := endBlock < latest
|
||||
progress := "rolling back blockstore & cometbft state to height %d"
|
||||
numChars := len(fmt.Sprintf(progress, latest))
|
||||
clearLine := fmt.Sprintf("\r%s\r", strings.Repeat(" ", numChars))
|
||||
printRollbackProgress := func(h int64) {
|
||||
fmt.Print(clearLine)
|
||||
fmt.Printf(progress, h)
|
||||
}
|
||||
|
||||
// rollback tendermint db
|
||||
height := latest
|
||||
for height > endBlock {
|
||||
beforeRollbackHeight := height
|
||||
printRollbackProgress(height - 1)
|
||||
height, _, err = tmstate.Rollback(blockStore, stateStore, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rollback cometbft state: %w", err)
|
||||
}
|
||||
if beforeRollbackHeight == height {
|
||||
return fmt.Errorf("attempting to rollback cometbft state height %d failed (no rollback performed)", height)
|
||||
}
|
||||
}
|
||||
|
||||
if needsRollback {
|
||||
fmt.Println()
|
||||
} else {
|
||||
fmt.Printf("latest store height is already %d\n", latest)
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
// Prune blocks to startBlock
|
||||
//////////////////////////////
|
||||
// get starting block of block store
|
||||
baseBlock := blockStore.Base()
|
||||
|
||||
// only prune if data exists, otherwise blockStore.PruneBlocks will panic
|
||||
if baseBlock < startBlock {
|
||||
// prune block store
|
||||
fmt.Printf("pruning block store from %d - %d\n", baseBlock, startBlock)
|
||||
if _, err := blockStore.PruneBlocks(startBlock); err != nil {
|
||||
return fmt.Errorf("failed to prune block store (retainHeight=%d): %s", startBlock, err)
|
||||
}
|
||||
|
||||
// prune cometbft state
|
||||
fmt.Printf("pruning cometbft state from %d - %d\n", baseBlock, startBlock)
|
||||
if err := stateStore.PruneStates(baseBlock, startBlock); err != nil {
|
||||
return fmt.Errorf("failed to prune cometbft state store (%d - %d): %s", baseBlock, startBlock, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("blockstore and cometbft state begins at block %d\n", baseBlock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// inspired by https://github.com/Kava-Labs/cometbft/blob/277b0853db3f67865a55aa1c54f59790b5f591be/node/node.go#L234
|
||||
func openCometBftDbs(config *tmconfig.Config) (blockStore *store.BlockStore, stateStore tmstate.Store, err error) {
|
||||
dbProvider := node.DefaultDBProvider
|
||||
|
||||
var blockStoreDB dbm.DB
|
||||
blockStoreDB, err = dbProvider(&node.DBContext{ID: "blockstore", Config: config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blockStore = store.NewBlockStore(blockStoreDB)
|
||||
|
||||
stateDB, err := dbProvider(&node.DBContext{ID: "state", Config: config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
stateStore = tmstate.NewStore(stateDB, tmstate.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
@ -5,9 +5,9 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
coretypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/cometbft/cometbft/libs/bytes"
|
||||
"github.com/cometbft/cometbft/p2p"
|
||||
coretypes "github.com/cometbft/cometbft/rpc/core/types"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
|
@ -1,499 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true
|
||||
var rocksdbMetrics *Metrics
|
||||
|
||||
// Metrics contains all rocksdb metrics which will be reported to prometheus
|
||||
type Metrics struct {
|
||||
// Keys
|
||||
NumberKeysWritten metrics.Gauge
|
||||
NumberKeysRead metrics.Gauge
|
||||
NumberKeysUpdated metrics.Gauge
|
||||
EstimateNumKeys metrics.Gauge
|
||||
|
||||
// Files
|
||||
NumberFileOpens metrics.Gauge
|
||||
NumberFileErrors metrics.Gauge
|
||||
|
||||
// Memory
|
||||
BlockCacheUsage metrics.Gauge
|
||||
EstimateTableReadersMem metrics.Gauge
|
||||
CurSizeAllMemTables metrics.Gauge
|
||||
BlockCachePinnedUsage metrics.Gauge
|
||||
|
||||
// Cache
|
||||
BlockCacheMiss metrics.Gauge
|
||||
BlockCacheHit metrics.Gauge
|
||||
BlockCacheAdd metrics.Gauge
|
||||
BlockCacheAddFailures metrics.Gauge
|
||||
|
||||
// Detailed Cache
|
||||
BlockCacheIndexMiss metrics.Gauge
|
||||
BlockCacheIndexHit metrics.Gauge
|
||||
BlockCacheIndexBytesInsert metrics.Gauge
|
||||
|
||||
BlockCacheFilterMiss metrics.Gauge
|
||||
BlockCacheFilterHit metrics.Gauge
|
||||
BlockCacheFilterBytesInsert metrics.Gauge
|
||||
|
||||
BlockCacheDataMiss metrics.Gauge
|
||||
BlockCacheDataHit metrics.Gauge
|
||||
BlockCacheDataBytesInsert metrics.Gauge
|
||||
|
||||
// Latency
|
||||
DBGetMicrosP50 metrics.Gauge
|
||||
DBGetMicrosP95 metrics.Gauge
|
||||
DBGetMicrosP99 metrics.Gauge
|
||||
DBGetMicrosP100 metrics.Gauge
|
||||
DBGetMicrosCount metrics.Gauge
|
||||
|
||||
DBWriteMicrosP50 metrics.Gauge
|
||||
DBWriteMicrosP95 metrics.Gauge
|
||||
DBWriteMicrosP99 metrics.Gauge
|
||||
DBWriteMicrosP100 metrics.Gauge
|
||||
DBWriteMicrosCount metrics.Gauge
|
||||
|
||||
// Write Stall
|
||||
StallMicros metrics.Gauge
|
||||
|
||||
DBWriteStallP50 metrics.Gauge
|
||||
DBWriteStallP95 metrics.Gauge
|
||||
DBWriteStallP99 metrics.Gauge
|
||||
DBWriteStallP100 metrics.Gauge
|
||||
DBWriteStallCount metrics.Gauge
|
||||
DBWriteStallSum metrics.Gauge
|
||||
|
||||
// Bloom Filter
|
||||
BloomFilterUseful metrics.Gauge
|
||||
BloomFilterFullPositive metrics.Gauge
|
||||
BloomFilterFullTruePositive metrics.Gauge
|
||||
|
||||
// LSM Tree Stats
|
||||
LastLevelReadBytes metrics.Gauge
|
||||
LastLevelReadCount metrics.Gauge
|
||||
NonLastLevelReadBytes metrics.Gauge
|
||||
NonLastLevelReadCount metrics.Gauge
|
||||
|
||||
GetHitL0 metrics.Gauge
|
||||
GetHitL1 metrics.Gauge
|
||||
GetHitL2AndUp metrics.Gauge
|
||||
}
|
||||
|
||||
// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable
|
||||
func registerMetrics() {
|
||||
if rocksdbMetrics != nil {
|
||||
// metrics already registered
|
||||
return
|
||||
}
|
||||
|
||||
labels := make([]string, 0)
|
||||
rocksdbMetrics = &Metrics{
|
||||
// Keys
|
||||
NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_written",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_read",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_updated",
|
||||
Help: "",
|
||||
}, labels),
|
||||
EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "estimate_num_keys",
|
||||
Help: "estimated number of total keys in the active and unflushed immutable memtables and storage",
|
||||
}, labels),
|
||||
|
||||
// Files
|
||||
NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "file",
|
||||
Name: "number_file_opens",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "file",
|
||||
Name: "number_file_errors",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Memory
|
||||
BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "block_cache_usage",
|
||||
Help: "memory size for the entries residing in block cache",
|
||||
}, labels),
|
||||
EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "estimate_table_readers_mem",
|
||||
Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)",
|
||||
}, labels),
|
||||
CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "cur_size_all_mem_tables",
|
||||
Help: "approximate size of active and unflushed immutable memtables (bytes)",
|
||||
}, labels),
|
||||
BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "block_cache_pinned_usage",
|
||||
Help: "returns the memory size for the entries being pinned",
|
||||
}, labels),
|
||||
|
||||
// Cache
|
||||
BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_miss",
|
||||
Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss",
|
||||
}, labels),
|
||||
BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_hit",
|
||||
Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit",
|
||||
}, labels),
|
||||
BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_add",
|
||||
Help: "number of blocks added to block cache",
|
||||
}, labels),
|
||||
BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_add_failures",
|
||||
Help: "number of failures when adding blocks to block cache",
|
||||
}, labels),
|
||||
|
||||
// Detailed Cache
|
||||
BlockCacheIndexMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_index_miss",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheIndexHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_index_hit",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheIndexBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_index_bytes_insert",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
BlockCacheFilterMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_filter_miss",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheFilterHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_filter_hit",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheFilterBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_filter_bytes_insert",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
BlockCacheDataMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_data_miss",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheDataHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_data_hit",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheDataBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_data_bytes_insert",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Latency
|
||||
DBGetMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p50",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p95",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p99",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p100",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
DBWriteMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p50",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p95",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p99",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p100",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Write Stall
|
||||
StallMicros: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "stall_micros",
|
||||
Help: "Writer has to wait for compaction or flush to finish.",
|
||||
}, labels),
|
||||
|
||||
DBWriteStallP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p50",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p95",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p99",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p100",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallSum: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_sum",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Bloom Filter
|
||||
BloomFilterUseful: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "filter",
|
||||
Name: "bloom_filter_useful",
|
||||
Help: "number of times bloom filter has avoided file reads, i.e., negatives.",
|
||||
}, labels),
|
||||
BloomFilterFullPositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "filter",
|
||||
Name: "bloom_filter_full_positive",
|
||||
Help: "number of times bloom FullFilter has not avoided the reads.",
|
||||
}, labels),
|
||||
BloomFilterFullTruePositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "filter",
|
||||
Name: "bloom_filter_full_true_positive",
|
||||
Help: "number of times bloom FullFilter has not avoided the reads and data actually exist.",
|
||||
}, labels),
|
||||
|
||||
// LSM Tree Stats
|
||||
LastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "last_level_read_bytes",
|
||||
Help: "",
|
||||
}, labels),
|
||||
LastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "last_level_read_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NonLastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "non_last_level_read_bytes",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NonLastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "non_last_level_read_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
GetHitL0: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "get_hit_l0",
|
||||
Help: "number of Get() queries served by L0",
|
||||
}, labels),
|
||||
GetHitL1: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "get_hit_l1",
|
||||
Help: "number of Get() queries served by L1",
|
||||
}, labels),
|
||||
GetHitL2AndUp: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "get_hit_l2_and_up",
|
||||
Help: "number of Get() queries served by L2 and up",
|
||||
}, labels),
|
||||
}
|
||||
}
|
||||
|
||||
// report reports metrics to prometheus based on rocksdb props and stats
|
||||
func (m *Metrics) report(props *properties, stats *stats) {
|
||||
// Keys
|
||||
m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten))
|
||||
m.NumberKeysRead.Set(float64(stats.NumberKeysRead))
|
||||
m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated))
|
||||
m.EstimateNumKeys.Set(float64(props.EstimateNumKeys))
|
||||
|
||||
// Files
|
||||
m.NumberFileOpens.Set(float64(stats.NumberFileOpens))
|
||||
m.NumberFileErrors.Set(float64(stats.NumberFileErrors))
|
||||
|
||||
// Memory
|
||||
m.BlockCacheUsage.Set(float64(props.BlockCacheUsage))
|
||||
m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem))
|
||||
m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables))
|
||||
m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage))
|
||||
|
||||
// Cache
|
||||
m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss))
|
||||
m.BlockCacheHit.Set(float64(stats.BlockCacheHit))
|
||||
m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd))
|
||||
m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures))
|
||||
|
||||
// Detailed Cache
|
||||
m.BlockCacheIndexMiss.Set(float64(stats.BlockCacheIndexMiss))
|
||||
m.BlockCacheIndexHit.Set(float64(stats.BlockCacheIndexHit))
|
||||
m.BlockCacheIndexBytesInsert.Set(float64(stats.BlockCacheIndexBytesInsert))
|
||||
|
||||
m.BlockCacheFilterMiss.Set(float64(stats.BlockCacheFilterMiss))
|
||||
m.BlockCacheFilterHit.Set(float64(stats.BlockCacheFilterHit))
|
||||
m.BlockCacheFilterBytesInsert.Set(float64(stats.BlockCacheFilterBytesInsert))
|
||||
|
||||
m.BlockCacheDataMiss.Set(float64(stats.BlockCacheDataMiss))
|
||||
m.BlockCacheDataHit.Set(float64(stats.BlockCacheDataHit))
|
||||
m.BlockCacheDataBytesInsert.Set(float64(stats.BlockCacheDataBytesInsert))
|
||||
|
||||
// Latency
|
||||
m.DBGetMicrosP50.Set(stats.DBGetMicros.P50)
|
||||
m.DBGetMicrosP95.Set(stats.DBGetMicros.P95)
|
||||
m.DBGetMicrosP99.Set(stats.DBGetMicros.P99)
|
||||
m.DBGetMicrosP100.Set(stats.DBGetMicros.P100)
|
||||
m.DBGetMicrosCount.Set(stats.DBGetMicros.Count)
|
||||
|
||||
m.DBWriteMicrosP50.Set(stats.DBWriteMicros.P50)
|
||||
m.DBWriteMicrosP95.Set(stats.DBWriteMicros.P95)
|
||||
m.DBWriteMicrosP99.Set(stats.DBWriteMicros.P99)
|
||||
m.DBWriteMicrosP100.Set(stats.DBWriteMicros.P100)
|
||||
m.DBWriteMicrosCount.Set(stats.DBWriteMicros.Count)
|
||||
|
||||
// Write Stall
|
||||
m.StallMicros.Set(float64(stats.StallMicros))
|
||||
|
||||
m.DBWriteStallP50.Set(stats.DBWriteStallHistogram.P50)
|
||||
m.DBWriteStallP95.Set(stats.DBWriteStallHistogram.P95)
|
||||
m.DBWriteStallP99.Set(stats.DBWriteStallHistogram.P99)
|
||||
m.DBWriteStallP100.Set(stats.DBWriteStallHistogram.P100)
|
||||
m.DBWriteStallCount.Set(stats.DBWriteStallHistogram.Count)
|
||||
m.DBWriteStallSum.Set(stats.DBWriteStallHistogram.Sum)
|
||||
|
||||
// Bloom Filter
|
||||
m.BloomFilterUseful.Set(float64(stats.BloomFilterUseful))
|
||||
m.BloomFilterFullPositive.Set(float64(stats.BloomFilterFullPositive))
|
||||
m.BloomFilterFullTruePositive.Set(float64(stats.BloomFilterFullTruePositive))
|
||||
|
||||
// LSM Tree Stats
|
||||
m.LastLevelReadBytes.Set(float64(stats.LastLevelReadBytes))
|
||||
m.LastLevelReadCount.Set(float64(stats.LastLevelReadCount))
|
||||
m.NonLastLevelReadBytes.Set(float64(stats.NonLastLevelReadBytes))
|
||||
m.NonLastLevelReadCount.Set(float64(stats.NonLastLevelReadCount))
|
||||
|
||||
m.GetHitL0.Set(float64(stats.GetHitL0))
|
||||
m.GetHitL1.Set(float64(stats.GetHitL1))
|
||||
m.GetHitL2AndUp.Set(float64(stats.GetHitL2AndUp))
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
//go:build !rocksdb
|
||||
// +build !rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/server/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
// OpenDB is a copy of default DBOpener function used by ethermint, see for details:
|
||||
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/server/start.go#L647
|
||||
func OpenDB(_ types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
|
||||
dataDir := filepath.Join(home, "data")
|
||||
return dbm.NewDB("application", backendType, dataDir)
|
||||
}
|
@ -1,398 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
// Copyright 2023 Kava Labs, Inc.
|
||||
// Copyright 2023 Cronos Labs, Inc.
|
||||
//
|
||||
// Derived from https://github.com/crypto-org-chain/cronos@496ce7e
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/server/types"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
"github.com/spf13/cast"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var ErrUnexpectedConfiguration = errors.New("unexpected rocksdb configuration, rocksdb should have only one column family named default")
|
||||
|
||||
const (
|
||||
// default tm-db block cache size for RocksDB
|
||||
defaultBlockCacheSize = 1 << 30
|
||||
|
||||
defaultColumnFamilyName = "default"
|
||||
|
||||
enableMetricsOptName = "rocksdb.enable-metrics"
|
||||
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
|
||||
defaultReportMetricsIntervalSecs = 15
|
||||
|
||||
maxOpenFilesDBOptName = "rocksdb.max-open-files"
|
||||
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
|
||||
tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits"
|
||||
allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes"
|
||||
allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads"
|
||||
useFsyncDBOptName = "rocksdb.use_fsync"
|
||||
useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex"
|
||||
bytesPerSyncDBOptName = "rocksdb.bytes_per_sync"
|
||||
maxBackgroundJobsDBOptName = "rocksdb.max-background-jobs"
|
||||
|
||||
writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
|
||||
numLevelsCFOptName = "rocksdb.num-levels"
|
||||
maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number"
|
||||
minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge"
|
||||
maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base"
|
||||
maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier"
|
||||
targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base"
|
||||
targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier"
|
||||
level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger"
|
||||
level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger"
|
||||
|
||||
blockCacheSizeBBTOOptName = "rocksdb.block_cache_size"
|
||||
bitsPerKeyBBTOOptName = "rocksdb.bits_per_key"
|
||||
blockSizeBBTOOptName = "rocksdb.block_size"
|
||||
cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks"
|
||||
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache"
|
||||
formatVersionBBTOOptName = "rocksdb.format_version"
|
||||
|
||||
asyncIOReadOptName = "rocksdb.read-async-io"
|
||||
)
|
||||
|
||||
func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
|
||||
dataDir := filepath.Join(home, "data")
|
||||
if backendType == dbm.RocksDBBackend {
|
||||
return openRocksdb(dataDir, appOpts)
|
||||
}
|
||||
|
||||
return dbm.NewDB("application", backendType, dataDir)
|
||||
}
|
||||
|
||||
// openRocksdb loads existing options, overrides some of them with appOpts and opens database
|
||||
// option will be overridden only in case if it explicitly specified in appOpts
|
||||
func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) {
|
||||
optionsPath := filepath.Join(dir, "application.db")
|
||||
dbOpts, cfOpts, err := loadLatestOptions(optionsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// customize rocksdb options
|
||||
bbtoOpts := bbtoFromAppOpts(appOpts)
|
||||
dbOpts.SetBlockBasedTableFactory(bbtoOpts)
|
||||
cfOpts.SetBlockBasedTableFactory(bbtoOpts)
|
||||
dbOpts = overrideDBOpts(dbOpts, appOpts)
|
||||
cfOpts = overrideCFOpts(cfOpts, appOpts)
|
||||
readOpts := readOptsFromAppOpts(appOpts)
|
||||
|
||||
enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName))
|
||||
reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName))
|
||||
if reportMetricsIntervalSecs == 0 {
|
||||
reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs
|
||||
}
|
||||
|
||||
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, readOpts, enableMetrics, reportMetricsIntervalSecs)
|
||||
}
|
||||
|
||||
// loadLatestOptions loads and returns database and column family options
|
||||
// if options file not found, it means database isn't created yet, in such case default tm-db options will be returned
|
||||
// if database exists it should have only one column family named default
|
||||
func loadLatestOptions(dir string) (*grocksdb.Options, *grocksdb.Options, error) {
|
||||
latestOpts, err := grocksdb.LoadLatestOptions(dir, grocksdb.NewDefaultEnv(), true, grocksdb.NewLRUCache(defaultBlockCacheSize))
|
||||
if err != nil && strings.HasPrefix(err.Error(), "NotFound: ") {
|
||||
return newDefaultOptions(), newDefaultOptions(), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfNames := latestOpts.ColumnFamilyNames()
|
||||
cfOpts := latestOpts.ColumnFamilyOpts()
|
||||
// db should have only one column family named default
|
||||
ok := len(cfNames) == 1 && cfNames[0] == defaultColumnFamilyName
|
||||
if !ok {
|
||||
return nil, nil, ErrUnexpectedConfiguration
|
||||
}
|
||||
|
||||
// return db and cf opts
|
||||
return latestOpts.Options(), &cfOpts[0], nil
|
||||
}
|
||||
|
||||
// overrideDBOpts merges dbOpts and appOpts, appOpts takes precedence
|
||||
func overrideDBOpts(dbOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
|
||||
maxOpenFiles := appOpts.Get(maxOpenFilesDBOptName)
|
||||
if maxOpenFiles != nil {
|
||||
dbOpts.SetMaxOpenFiles(cast.ToInt(maxOpenFiles))
|
||||
}
|
||||
|
||||
maxFileOpeningThreads := appOpts.Get(maxFileOpeningThreadsDBOptName)
|
||||
if maxFileOpeningThreads != nil {
|
||||
dbOpts.SetMaxFileOpeningThreads(cast.ToInt(maxFileOpeningThreads))
|
||||
}
|
||||
|
||||
tableCacheNumshardbits := appOpts.Get(tableCacheNumshardbitsDBOptName)
|
||||
if tableCacheNumshardbits != nil {
|
||||
dbOpts.SetTableCacheNumshardbits(cast.ToInt(tableCacheNumshardbits))
|
||||
}
|
||||
|
||||
allowMMAPWrites := appOpts.Get(allowMMAPWritesDBOptName)
|
||||
if allowMMAPWrites != nil {
|
||||
dbOpts.SetAllowMmapWrites(cast.ToBool(allowMMAPWrites))
|
||||
}
|
||||
|
||||
allowMMAPReads := appOpts.Get(allowMMAPReadsDBOptName)
|
||||
if allowMMAPReads != nil {
|
||||
dbOpts.SetAllowMmapReads(cast.ToBool(allowMMAPReads))
|
||||
}
|
||||
|
||||
useFsync := appOpts.Get(useFsyncDBOptName)
|
||||
if useFsync != nil {
|
||||
dbOpts.SetUseFsync(cast.ToBool(useFsync))
|
||||
}
|
||||
|
||||
useAdaptiveMutex := appOpts.Get(useAdaptiveMutexDBOptName)
|
||||
if useAdaptiveMutex != nil {
|
||||
dbOpts.SetUseAdaptiveMutex(cast.ToBool(useAdaptiveMutex))
|
||||
}
|
||||
|
||||
bytesPerSync := appOpts.Get(bytesPerSyncDBOptName)
|
||||
if bytesPerSync != nil {
|
||||
dbOpts.SetBytesPerSync(cast.ToUint64(bytesPerSync))
|
||||
}
|
||||
|
||||
maxBackgroundJobs := appOpts.Get(maxBackgroundJobsDBOptName)
|
||||
if maxBackgroundJobs != nil {
|
||||
dbOpts.SetMaxBackgroundJobs(cast.ToInt(maxBackgroundJobs))
|
||||
}
|
||||
|
||||
return dbOpts
|
||||
}
|
||||
|
||||
// overrideCFOpts merges cfOpts and appOpts, appOpts takes precedence
|
||||
func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
|
||||
writeBufferSize := appOpts.Get(writeBufferSizeCFOptName)
|
||||
if writeBufferSize != nil {
|
||||
cfOpts.SetWriteBufferSize(cast.ToUint64(writeBufferSize))
|
||||
}
|
||||
|
||||
numLevels := appOpts.Get(numLevelsCFOptName)
|
||||
if numLevels != nil {
|
||||
cfOpts.SetNumLevels(cast.ToInt(numLevels))
|
||||
}
|
||||
|
||||
maxWriteBufferNumber := appOpts.Get(maxWriteBufferNumberCFOptName)
|
||||
if maxWriteBufferNumber != nil {
|
||||
cfOpts.SetMaxWriteBufferNumber(cast.ToInt(maxWriteBufferNumber))
|
||||
}
|
||||
|
||||
minWriteBufferNumberToMerge := appOpts.Get(minWriteBufferNumberToMergeCFOptName)
|
||||
if minWriteBufferNumberToMerge != nil {
|
||||
cfOpts.SetMinWriteBufferNumberToMerge(cast.ToInt(minWriteBufferNumberToMerge))
|
||||
}
|
||||
|
||||
maxBytesForLevelBase := appOpts.Get(maxBytesForLevelBaseCFOptName)
|
||||
if maxBytesForLevelBase != nil {
|
||||
cfOpts.SetMaxBytesForLevelBase(cast.ToUint64(maxBytesForLevelBase))
|
||||
}
|
||||
|
||||
maxBytesForLevelMultiplier := appOpts.Get(maxBytesForLevelMultiplierCFOptName)
|
||||
if maxBytesForLevelMultiplier != nil {
|
||||
cfOpts.SetMaxBytesForLevelMultiplier(cast.ToFloat64(maxBytesForLevelMultiplier))
|
||||
}
|
||||
|
||||
targetFileSizeBase := appOpts.Get(targetFileSizeBaseCFOptName)
|
||||
if targetFileSizeBase != nil {
|
||||
cfOpts.SetTargetFileSizeBase(cast.ToUint64(targetFileSizeBase))
|
||||
}
|
||||
|
||||
targetFileSizeMultiplier := appOpts.Get(targetFileSizeMultiplierCFOptName)
|
||||
if targetFileSizeMultiplier != nil {
|
||||
cfOpts.SetTargetFileSizeMultiplier(cast.ToInt(targetFileSizeMultiplier))
|
||||
}
|
||||
|
||||
level0FileNumCompactionTrigger := appOpts.Get(level0FileNumCompactionTriggerCFOptName)
|
||||
if level0FileNumCompactionTrigger != nil {
|
||||
cfOpts.SetLevel0FileNumCompactionTrigger(cast.ToInt(level0FileNumCompactionTrigger))
|
||||
}
|
||||
|
||||
level0SlowdownWritesTrigger := appOpts.Get(level0SlowdownWritesTriggerCFOptName)
|
||||
if level0SlowdownWritesTrigger != nil {
|
||||
cfOpts.SetLevel0SlowdownWritesTrigger(cast.ToInt(level0SlowdownWritesTrigger))
|
||||
}
|
||||
|
||||
return cfOpts
|
||||
}
|
||||
|
||||
func readOptsFromAppOpts(appOpts types.AppOptions) *grocksdb.ReadOptions {
|
||||
ro := grocksdb.NewDefaultReadOptions()
|
||||
asyncIO := appOpts.Get(asyncIOReadOptName)
|
||||
if asyncIO != nil {
|
||||
ro.SetAsyncIO(cast.ToBool(asyncIO))
|
||||
}
|
||||
|
||||
return ro
|
||||
}
|
||||
|
||||
func bbtoFromAppOpts(appOpts types.AppOptions) *grocksdb.BlockBasedTableOptions {
|
||||
bbto := defaultBBTO()
|
||||
|
||||
blockCacheSize := appOpts.Get(blockCacheSizeBBTOOptName)
|
||||
if blockCacheSize != nil {
|
||||
cache := grocksdb.NewLRUCache(cast.ToUint64(blockCacheSize))
|
||||
bbto.SetBlockCache(cache)
|
||||
}
|
||||
|
||||
bitsPerKey := appOpts.Get(bitsPerKeyBBTOOptName)
|
||||
if bitsPerKey != nil {
|
||||
filter := grocksdb.NewBloomFilter(cast.ToFloat64(bitsPerKey))
|
||||
bbto.SetFilterPolicy(filter)
|
||||
}
|
||||
|
||||
blockSize := appOpts.Get(blockSizeBBTOOptName)
|
||||
if blockSize != nil {
|
||||
bbto.SetBlockSize(cast.ToInt(blockSize))
|
||||
}
|
||||
|
||||
cacheIndexAndFilterBlocks := appOpts.Get(cacheIndexAndFilterBlocksBBTOOptName)
|
||||
if cacheIndexAndFilterBlocks != nil {
|
||||
bbto.SetCacheIndexAndFilterBlocks(cast.ToBool(cacheIndexAndFilterBlocks))
|
||||
}
|
||||
|
||||
pinL0FilterAndIndexBlocksInCache := appOpts.Get(pinL0FilterAndIndexBlocksInCacheBBTOOptName)
|
||||
if pinL0FilterAndIndexBlocksInCache != nil {
|
||||
bbto.SetPinL0FilterAndIndexBlocksInCache(cast.ToBool(pinL0FilterAndIndexBlocksInCache))
|
||||
}
|
||||
|
||||
formatVersion := appOpts.Get(formatVersionBBTOOptName)
|
||||
if formatVersion != nil {
|
||||
bbto.SetFormatVersion(cast.ToInt(formatVersion))
|
||||
}
|
||||
|
||||
return bbto
|
||||
}
|
||||
|
||||
// newRocksDBWithOptions opens rocksdb with provided database and column family options
|
||||
// newRocksDBWithOptions expects that db has only one column family named default
|
||||
func newRocksDBWithOptions(
|
||||
name string,
|
||||
dir string,
|
||||
dbOpts *grocksdb.Options,
|
||||
cfOpts *grocksdb.Options,
|
||||
readOpts *grocksdb.ReadOptions,
|
||||
enableMetrics bool,
|
||||
reportMetricsIntervalSecs int64,
|
||||
) (*dbm.RocksDB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
|
||||
// Ensure path exists
|
||||
if err := os.MkdirAll(dbPath, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create db path: %w", err)
|
||||
}
|
||||
|
||||
// EnableStatistics adds overhead so shouldn't be enabled in production
|
||||
if enableMetrics {
|
||||
dbOpts.EnableStatistics()
|
||||
}
|
||||
|
||||
db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{defaultColumnFamilyName}, []*grocksdb.Options{cfOpts})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if enableMetrics {
|
||||
registerMetrics()
|
||||
go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs))
|
||||
}
|
||||
|
||||
wo := grocksdb.NewDefaultWriteOptions()
|
||||
woSync := grocksdb.NewDefaultWriteOptions()
|
||||
woSync.SetSync(true)
|
||||
return dbm.NewRocksDBWithRawDB(db, readOpts, wo, woSync), nil
|
||||
}
|
||||
|
||||
// newDefaultOptions returns default tm-db options for RocksDB, see for details:
|
||||
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
|
||||
func newDefaultOptions() *grocksdb.Options {
|
||||
// default rocksdb option, good enough for most cases, including heavy workloads.
|
||||
// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads).
|
||||
// compression: snappy as default, need to -lsnappy to enable.
|
||||
bbto := defaultBBTO()
|
||||
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetBlockBasedTableFactory(bbto)
|
||||
// SetMaxOpenFiles to 4096 seems to provide a reliable performance boost
|
||||
opts.SetMaxOpenFiles(4096)
|
||||
opts.SetCreateIfMissing(true)
|
||||
opts.IncreaseParallelism(runtime.NumCPU())
|
||||
// 1.5GB maximum memory use for writebuffer.
|
||||
opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// defaultBBTO returns default tm-db bbto options for RocksDB, see for details:
|
||||
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
|
||||
func defaultBBTO() *grocksdb.BlockBasedTableOptions {
|
||||
bbto := grocksdb.NewDefaultBlockBasedTableOptions()
|
||||
bbto.SetBlockCache(grocksdb.NewLRUCache(defaultBlockCacheSize))
|
||||
bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10))
|
||||
|
||||
return bbto
|
||||
}
|
||||
|
||||
// reportMetrics periodically requests stats from rocksdb and reports to prometheus
|
||||
// NOTE: should be launched as a goroutine
|
||||
func reportMetrics(db *grocksdb.DB, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
props, stats, err := getPropsAndStats(db)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rocksdbMetrics.report(props, stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getPropsAndStats gets statistics from rocksdb
|
||||
func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) {
|
||||
propsLoader := newPropsLoader(db)
|
||||
props, err := propsLoader.load()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statMap, err := parseSerializedStats(props.OptionsStatistics)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statLoader := newStatLoader(statMap)
|
||||
stats, err := statLoader.load()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return props, stats, nil
|
||||
}
|
@ -1,384 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/linxGnu/grocksdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mockAppOptions struct {
|
||||
opts map[string]interface{}
|
||||
}
|
||||
|
||||
func newMockAppOptions(opts map[string]interface{}) *mockAppOptions {
|
||||
return &mockAppOptions{
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockAppOptions) Get(key string) interface{} {
|
||||
return m.opts[key]
|
||||
}
|
||||
|
||||
func TestOpenRocksdb(t *testing.T) {
|
||||
t.Run("db already exists", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
maxOpenFiles int
|
||||
maxFileOpeningThreads int
|
||||
writeBufferSize uint64
|
||||
numLevels int
|
||||
}{
|
||||
{
|
||||
desc: "default options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 2 options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 4 options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
maxFileOpeningThreadsDBOptName: 9,
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
numLevelsCFOptName: 9,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: 9,
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
db, err := openRocksdb(dir, tc.mockAppOptions)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("db doesn't exist yet", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
mockAppOpts := newMockAppOptions(map[string]interface{}{})
|
||||
db, err := openRocksdb(dir, mockAppOpts)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadLatestOptions(t *testing.T) {
|
||||
t.Run("db already exists", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
const testCasesNum = 3
|
||||
dbOptsList := make([]*grocksdb.Options, testCasesNum)
|
||||
cfOptsList := make([]*grocksdb.Options, testCasesNum)
|
||||
|
||||
dbOptsList[0] = newDefaultOptions()
|
||||
cfOptsList[0] = newDefaultOptions()
|
||||
|
||||
dbOptsList[1] = newDefaultOptions()
|
||||
dbOptsList[1].SetMaxOpenFiles(999)
|
||||
cfOptsList[1] = newDefaultOptions()
|
||||
cfOptsList[1].SetWriteBufferSize(999_999)
|
||||
|
||||
dbOptsList[2] = newDefaultOptions()
|
||||
dbOptsList[2].SetMaxOpenFiles(999)
|
||||
dbOptsList[2].SetMaxFileOpeningThreads(9)
|
||||
cfOptsList[2] = newDefaultOptions()
|
||||
cfOptsList[2].SetWriteBufferSize(999_999)
|
||||
cfOptsList[2].SetNumLevels(9)
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
dbOpts *grocksdb.Options
|
||||
cfOpts *grocksdb.Options
|
||||
maxOpenFiles int
|
||||
maxFileOpeningThreads int
|
||||
writeBufferSize uint64
|
||||
numLevels int
|
||||
}{
|
||||
{
|
||||
desc: "default options",
|
||||
dbOpts: dbOptsList[0],
|
||||
cfOpts: cfOptsList[0],
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 2 options",
|
||||
dbOpts: dbOptsList[1],
|
||||
cfOpts: cfOptsList[1],
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 4 options",
|
||||
dbOpts: dbOptsList[2],
|
||||
cfOpts: cfOptsList[2],
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: 9,
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
name := "application"
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("db doesn't exist yet", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
|
||||
func TestOverrideDBOpts(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
maxOpenFiles int
|
||||
maxFileOpeningThreads int
|
||||
}{
|
||||
{
|
||||
desc: "override nothing",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
},
|
||||
{
|
||||
desc: "override max-open-files",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
},
|
||||
{
|
||||
desc: "override max-file-opening-threads",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxFileOpeningThreadsDBOptName: 9,
|
||||
}),
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: 9,
|
||||
},
|
||||
{
|
||||
desc: "override max-open-files and max-file-opening-threads",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
maxFileOpeningThreadsDBOptName: 9,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
dbOpts := newDefaultOptions()
|
||||
dbOpts = overrideDBOpts(dbOpts, tc.mockAppOptions)
|
||||
|
||||
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverrideCFOpts(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
writeBufferSize uint64
|
||||
numLevels int
|
||||
}{
|
||||
{
|
||||
desc: "override nothing",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "override write-buffer-size",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
}),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "override num-levels",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
numLevelsCFOptName: 9,
|
||||
}),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: 9,
|
||||
},
|
||||
{
|
||||
desc: "override write-buffer-size and num-levels",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
numLevelsCFOptName: 9,
|
||||
}),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
cfOpts := newDefaultOptions()
|
||||
cfOpts = overrideCFOpts(cfOpts, tc.mockAppOptions)
|
||||
|
||||
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadOptsFromAppOpts(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
asyncIO bool
|
||||
}{
|
||||
{
|
||||
desc: "default options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
asyncIO: false,
|
||||
},
|
||||
{
|
||||
desc: "set asyncIO option to true",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
asyncIOReadOptName: true,
|
||||
}),
|
||||
asyncIO: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
readOpts := readOptsFromAppOpts(tc.mockAppOptions)
|
||||
|
||||
require.Equal(t, tc.asyncIO, readOpts.IsAsyncIO())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRocksDBWithOptions(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
name := "application"
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
dbOpts := newDefaultOptions()
|
||||
dbOpts.SetMaxOpenFiles(999)
|
||||
cfOpts := newDefaultOptions()
|
||||
cfOpts.SetWriteBufferSize(999_999)
|
||||
|
||||
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err = loadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 999, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, uint64(999_999), cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, defaultOpts.GetNumLevels(), dbOpts.GetNumLevels())
|
||||
}
|
||||
|
||||
func TestNewDefaultOptions(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
maxOpenFiles := defaultOpts.GetMaxOpenFiles()
|
||||
require.Equal(t, 4096, maxOpenFiles)
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
)
|
||||
|
||||
type propsGetter interface {
|
||||
GetProperty(propName string) (value string)
|
||||
GetIntProperty(propName string) (value uint64, success bool)
|
||||
}
|
||||
|
||||
type propsLoader struct {
|
||||
db propsGetter
|
||||
errorMsgs []string
|
||||
}
|
||||
|
||||
func newPropsLoader(db propsGetter) *propsLoader {
|
||||
return &propsLoader{
|
||||
db: db,
|
||||
errorMsgs: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *propsLoader) load() (*properties, error) {
|
||||
props := &properties{
|
||||
BaseLevel: l.getIntProperty("rocksdb.base-level"),
|
||||
BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"),
|
||||
BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"),
|
||||
BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"),
|
||||
CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"),
|
||||
CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"),
|
||||
EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"),
|
||||
EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"),
|
||||
EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"),
|
||||
LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"),
|
||||
SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"),
|
||||
OptionsStatistics: l.getProperty("rocksdb.options-statistics"),
|
||||
}
|
||||
|
||||
if len(l.errorMsgs) != 0 {
|
||||
errorMsg := strings.Join(l.errorMsgs, ";")
|
||||
return nil, errors.New(errorMsg)
|
||||
}
|
||||
|
||||
return props, nil
|
||||
}
|
||||
|
||||
func (l *propsLoader) getProperty(propName string) string {
|
||||
value := l.db.GetProperty(propName)
|
||||
if value == "" {
|
||||
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName))
|
||||
return ""
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (l *propsLoader) getIntProperty(propName string) uint64 {
|
||||
value, ok := l.db.GetIntProperty(propName)
|
||||
if !ok {
|
||||
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName))
|
||||
return 0
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
type properties struct {
|
||||
BaseLevel uint64
|
||||
BlockCacheCapacity uint64
|
||||
BlockCachePinnedUsage uint64
|
||||
BlockCacheUsage uint64
|
||||
CurSizeActiveMemTable uint64
|
||||
CurSizeAllMemTables uint64
|
||||
EstimateLiveDataSize uint64
|
||||
EstimateNumKeys uint64
|
||||
EstimateTableReadersMem uint64
|
||||
LiveSSTFilesSize uint64
|
||||
SizeAllMemTables uint64
|
||||
OptionsStatistics string
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user